ia64/xen-unstable

changeset 1452:92b8e1efa784

bitkeeper revision 1.952 (40c8935a3XSRdQfnx5RoO7XgaggvOQ)

Towards x86_64 support. Merged a bunch of the existing x86_64 stuff
back into a generic 'x86' architecture. Aim is to share as much
as possible between 32- and 64-bit worlds.
author kaf24@scramble.cl.cam.ac.uk
date Thu Jun 10 16:59:06 2004 +0000 (2004-06-10)
parents 3567bb7e1227
children 7031f4a4155f
files .rootkeys xen/Rules.mk xen/arch/i386/Makefile xen/arch/i386/Rules.mk xen/arch/i386/acpi.c xen/arch/i386/apic.c xen/arch/i386/boot/boot.S xen/arch/i386/delay.c xen/arch/i386/domain_page.c xen/arch/i386/entry.S xen/arch/i386/extable.c xen/arch/i386/flushtlb.c xen/arch/i386/i387.c xen/arch/i386/i8259.c xen/arch/i386/idle0_task.c xen/arch/i386/io_apic.c xen/arch/i386/ioremap.c xen/arch/i386/irq.c xen/arch/i386/mm.c xen/arch/i386/mpparse.c xen/arch/i386/nmi.c xen/arch/i386/pci-dma.c xen/arch/i386/pci-i386.c xen/arch/i386/pci-i386.h xen/arch/i386/pci-irq.c xen/arch/i386/pci-pc.c xen/arch/i386/pdb-linux.c xen/arch/i386/pdb-stub.c xen/arch/i386/process.c xen/arch/i386/rwlock.c xen/arch/i386/setup.c xen/arch/i386/smp.c xen/arch/i386/smpboot.c xen/arch/i386/time.c xen/arch/i386/trampoline.S xen/arch/i386/traps.c xen/arch/i386/usercopy.c xen/arch/i386/xen.lds xen/arch/x86/Makefile xen/arch/x86/Rules.mk xen/arch/x86/acpi.c xen/arch/x86/apic.c xen/arch/x86/boot/boot.S xen/arch/x86/delay.c xen/arch/x86/domain_page.c xen/arch/x86/entry.S xen/arch/x86/extable.c xen/arch/x86/flushtlb.c xen/arch/x86/i387.c xen/arch/x86/i8259.c xen/arch/x86/idle0_task.c xen/arch/x86/io_apic.c xen/arch/x86/irq.c xen/arch/x86/mm.c xen/arch/x86/mpparse.c xen/arch/x86/nmi.c xen/arch/x86/pci-irq.c xen/arch/x86/pci-pc.c xen/arch/x86/pci-x86.c xen/arch/x86/pci-x86.h xen/arch/x86/pdb-linux.c xen/arch/x86/pdb-stub.c xen/arch/x86/process.c xen/arch/x86/rwlock.c xen/arch/x86/setup.c xen/arch/x86/smp.c xen/arch/x86/smpboot.c xen/arch/x86/time.c xen/arch/x86/trampoline.S xen/arch/x86/traps.c xen/arch/x86/usercopy.c xen/arch/x86/xen.lds xen/arch/x86_64/Rules.mk xen/drivers/char/keyboard.c xen/drivers/char/serial.c xen/drivers/pci/pci.c xen/include/asm-i386/acpi.h xen/include/asm-i386/apic.h xen/include/asm-i386/apicdef.h xen/include/asm-i386/atomic.h xen/include/asm-i386/bitops.h xen/include/asm-i386/cache.h xen/include/asm-i386/config.h xen/include/asm-i386/cpufeature.h xen/include/asm-i386/current.h xen/include/asm-i386/debugreg.h xen/include/asm-i386/delay.h xen/include/asm-i386/desc.h xen/include/asm-i386/div64.h xen/include/asm-i386/dma.h xen/include/asm-i386/domain_page.h xen/include/asm-i386/fixmap.h xen/include/asm-i386/flushtlb.h xen/include/asm-i386/hardirq.h xen/include/asm-i386/hdreg.h xen/include/asm-i386/i387.h xen/include/asm-i386/ide.h xen/include/asm-i386/io.h xen/include/asm-i386/io_apic.h xen/include/asm-i386/irq.h xen/include/asm-i386/ldt.h xen/include/asm-i386/mc146818rtc.h xen/include/asm-i386/mpspec.h xen/include/asm-i386/msr.h xen/include/asm-i386/page.h xen/include/asm-i386/param.h xen/include/asm-i386/pci.h xen/include/asm-i386/pdb.h xen/include/asm-i386/pgalloc.h xen/include/asm-i386/processor.h xen/include/asm-i386/ptrace.h xen/include/asm-i386/rwlock.h xen/include/asm-i386/scatterlist.h xen/include/asm-i386/smp.h xen/include/asm-i386/smpboot.h xen/include/asm-i386/softirq.h xen/include/asm-i386/spinlock.h xen/include/asm-i386/string.h xen/include/asm-i386/system.h xen/include/asm-i386/time.h xen/include/asm-i386/timex.h xen/include/asm-i386/types.h xen/include/asm-i386/uaccess.h xen/include/asm-i386/unaligned.h xen/include/asm-x86/acpi.h xen/include/asm-x86/apic.h xen/include/asm-x86/apicdef.h xen/include/asm-x86/atomic.h xen/include/asm-x86/bitops.h xen/include/asm-x86/cache.h xen/include/asm-x86/config.h xen/include/asm-x86/cpufeature.h xen/include/asm-x86/current.h xen/include/asm-x86/debugreg.h xen/include/asm-x86/delay.h xen/include/asm-x86/desc.h xen/include/asm-x86/div64.h xen/include/asm-x86/domain_page.h xen/include/asm-x86/fixmap.h xen/include/asm-x86/flushtlb.h xen/include/asm-x86/hardirq.h xen/include/asm-x86/i387.h xen/include/asm-x86/io.h xen/include/asm-x86/io_apic.h xen/include/asm-x86/irq.h xen/include/asm-x86/ldt.h xen/include/asm-x86/mc146818rtc.h xen/include/asm-x86/mpspec.h xen/include/asm-x86/msr.h xen/include/asm-x86/page.h xen/include/asm-x86/param.h xen/include/asm-x86/pci.h xen/include/asm-x86/pdb.h xen/include/asm-x86/processor.h xen/include/asm-x86/ptrace.h xen/include/asm-x86/rwlock.h xen/include/asm-x86/smp.h xen/include/asm-x86/smpboot.h xen/include/asm-x86/softirq.h xen/include/asm-x86/spinlock.h xen/include/asm-x86/string.h xen/include/asm-x86/system.h xen/include/asm-x86/time.h xen/include/asm-x86/timex.h xen/include/asm-x86/types.h xen/include/asm-x86/uaccess.h xen/include/asm-x86/unaligned.h xen/include/asm-x86/x86_64/config.h xen/include/asm-x86/x86_64/current.h xen/include/asm-x86/x86_64/desc.h xen/include/asm-x86/x86_64/ldt.h xen/include/asm-x86/x86_64/page.h xen/include/asm-x86/x86_64/pda.h xen/include/asm-x86/x86_64/processor.h xen/include/asm-x86/x86_64/ptrace.h xen/include/asm-x86/x86_64/uaccess.h xen/include/asm-x86_64/apic.h xen/include/asm-x86_64/apicdef.h xen/include/asm-x86_64/atomic.h xen/include/asm-x86_64/bitops.h xen/include/asm-x86_64/cache.h xen/include/asm-x86_64/config.h xen/include/asm-x86_64/cpufeature.h xen/include/asm-x86_64/current.h xen/include/asm-x86_64/debugreg.h xen/include/asm-x86_64/delay.h xen/include/asm-x86_64/desc.h xen/include/asm-x86_64/dma.h xen/include/asm-x86_64/domain_page.h xen/include/asm-x86_64/fixmap.h xen/include/asm-x86_64/flushtlb.h xen/include/asm-x86_64/hardirq.h xen/include/asm-x86_64/hdreg.h xen/include/asm-x86_64/i387.h xen/include/asm-x86_64/ide.h xen/include/asm-x86_64/io.h xen/include/asm-x86_64/io_apic.h xen/include/asm-x86_64/irq.h xen/include/asm-x86_64/ldt.h xen/include/asm-x86_64/mc146818rtc.h xen/include/asm-x86_64/mpspec.h xen/include/asm-x86_64/msr.h xen/include/asm-x86_64/page.h xen/include/asm-x86_64/param.h xen/include/asm-x86_64/pci.h xen/include/asm-x86_64/pda.h xen/include/asm-x86_64/pdb.h xen/include/asm-x86_64/pgalloc.h xen/include/asm-x86_64/processor.h xen/include/asm-x86_64/ptrace.h xen/include/asm-x86_64/rwlock.h xen/include/asm-x86_64/scatterlist.h xen/include/asm-x86_64/smp.h xen/include/asm-x86_64/smpboot.h xen/include/asm-x86_64/softirq.h xen/include/asm-x86_64/spinlock.h xen/include/asm-x86_64/string.h xen/include/asm-x86_64/system.h xen/include/asm-x86_64/time.h xen/include/asm-x86_64/timex.h xen/include/asm-x86_64/types.h xen/include/asm-x86_64/uaccess.h xen/include/asm-x86_64/unaligned.h xen/include/hypervisor-ifs/arch-i386/hypervisor-if.h xen/include/hypervisor-ifs/arch-x86/hypervisor-if.h xen/include/xen/mm.h
line diff
     1.1 --- a/.rootkeys	Thu Jun 10 14:24:30 2004 +0000
     1.2 +++ b/.rootkeys	Thu Jun 10 16:59:06 2004 +0000
     1.3 @@ -228,43 +228,40 @@ 4050c413NtuyIq5lsYJV4P7KIjujXw tools/xen
     1.4  3f72f1bdJPsV3JCnBqs9ddL9tr6D2g xen/COPYING
     1.5  3ddb79bcbOVHh38VJzc97-JEGD4dJQ xen/Makefile
     1.6  3ddb79bcWnTwYsQRWl_PaneJfa6p0w xen/Rules.mk
     1.7 -3ddb79bcZbRBzT3elFWSX7u6NtMagQ xen/arch/i386/Makefile
     1.8 -3ddb79bcBQF85CfLS4i1WGZ4oLLaCA xen/arch/i386/Rules.mk
     1.9 -3e5636e5FAYZ5_vQnmgwFJfSdmO5Mw xen/arch/i386/acpi.c
    1.10 -3ddb79bcsjinG9k1KcvbVBuas1R2dA xen/arch/i386/apic.c
    1.11 -3ddb79bcSC_LvnmFlX-T5iTgaR0SKg xen/arch/i386/boot/boot.S
    1.12 -3ddb79bcUrk2EIaM5VsT6wUudH1kkg xen/arch/i386/delay.c
    1.13 -3e32af9aRnYGl4GMOaDKp7JdfhOGhg xen/arch/i386/domain_page.c
    1.14 -3ddb79bcecupHj56ZbTa3B0FxDowMg xen/arch/i386/entry.S
    1.15 -3ddb79bcY5zW7KhvI9gvfuPi3ZumEg xen/arch/i386/extable.c
    1.16 -3fe443fdDDb0Sw6NQBCk4GQapayfTA xen/arch/i386/flushtlb.c
    1.17 -3ddb79bcesE5E-lS4QhRhlqXxqj9cA xen/arch/i386/i387.c
    1.18 -3ddb79bcCAq6IpdkHueChoVTfXqEQQ xen/arch/i386/i8259.c
    1.19 -3ddb79bcBit4xJXbwtX0kb1hh2uO1Q xen/arch/i386/idle0_task.c
    1.20 -3ddb79bcKIkRR0kqWaJhe5VUDkMdxg xen/arch/i386/io_apic.c
    1.21 -3ddb79bc1uNlAtc-84Ioq4qfcnI_CQ xen/arch/i386/ioremap.c
    1.22 -3ddb79bdqfIcjkz_h9Hvtp8Tk_19Zw xen/arch/i386/irq.c
    1.23 -3ddb79bcHwuCQDjBICDTSis52hWguw xen/arch/i386/mm.c
    1.24 -3ddb79bdS4UeWWXDH-FaBKqcpMFcnw xen/arch/i386/mpparse.c
    1.25 -3f12cff65EV3qOG2j37Qm0ShgvXGRw xen/arch/i386/nmi.c
    1.26 -3ddb79bcnL-_Dtsbtjgxl7vJU3vBiQ xen/arch/i386/pci-dma.c
    1.27 -3ddb79bdeJ7_86z03yTAPIeeywOg3Q xen/arch/i386/pci-i386.c
    1.28 -3ddb79bdIKgipvGoqExEQ7jawfVowA xen/arch/i386/pci-i386.h
    1.29 -3ddb79bdHe6_Uij4-glW91vInNtBYQ xen/arch/i386/pci-irq.c
    1.30 -3ddb79bcZ_2FxINljqNSkqa17ISyJw xen/arch/i386/pci-pc.c
    1.31 -40a4dfced2dnSzbKgJFlD3chKHexjQ xen/arch/i386/pdb-linux.c
    1.32 -4022a73czgX7d-2zfF_cb33oVemApQ xen/arch/i386/pdb-stub.c
    1.33 -3ddb79bc1_2bAt67x9MFCP4AZrQnvQ xen/arch/i386/process.c
    1.34 -3ddb79bc7KxGCEJsgBnkDX7XjD_ZEQ xen/arch/i386/rwlock.c
    1.35 -3ddb79bcrD6Z_rUvSDgrvjyb4846Eg xen/arch/i386/setup.c
    1.36 -3ddb79bcSx2e8JSR3pdSGa8x1ScYzA xen/arch/i386/smp.c
    1.37 -3ddb79bcfUN3-UBCPzX26IU8bq-3aw xen/arch/i386/smpboot.c
    1.38 -3ddb79bc-Udq7ol-NX4q9XsYnN7A2Q xen/arch/i386/time.c
    1.39 -3ddb79bccYVzXZJyVaxuv5T42Z1Fsw xen/arch/i386/trampoline.S
    1.40 -3ddb79bcOftONV9h4QCxXOfiT0h91w xen/arch/i386/traps.c
    1.41 -3ddb79bc4nTpGQOe6_-MbyZzkhlhFQ xen/arch/i386/usercopy.c
    1.42 -3ddb79bcOMCu9-5mKpjIh5d0qqBDPg xen/arch/i386/xen.lds
    1.43 -404f1b91uzXgPOtIhs8UZPGbZvlHfg xen/arch/x86_64/Rules.mk
    1.44 +3ddb79bcZbRBzT3elFWSX7u6NtMagQ xen/arch/x86/Makefile
    1.45 +3ddb79bcBQF85CfLS4i1WGZ4oLLaCA xen/arch/x86/Rules.mk
    1.46 +3e5636e5FAYZ5_vQnmgwFJfSdmO5Mw xen/arch/x86/acpi.c
    1.47 +3ddb79bcsjinG9k1KcvbVBuas1R2dA xen/arch/x86/apic.c
    1.48 +3ddb79bcSC_LvnmFlX-T5iTgaR0SKg xen/arch/x86/boot/boot.S
    1.49 +3ddb79bcUrk2EIaM5VsT6wUudH1kkg xen/arch/x86/delay.c
    1.50 +3e32af9aRnYGl4GMOaDKp7JdfhOGhg xen/arch/x86/domain_page.c
    1.51 +3ddb79bcecupHj56ZbTa3B0FxDowMg xen/arch/x86/entry.S
    1.52 +3ddb79bcY5zW7KhvI9gvfuPi3ZumEg xen/arch/x86/extable.c
    1.53 +3fe443fdDDb0Sw6NQBCk4GQapayfTA xen/arch/x86/flushtlb.c
    1.54 +3ddb79bcesE5E-lS4QhRhlqXxqj9cA xen/arch/x86/i387.c
    1.55 +3ddb79bcCAq6IpdkHueChoVTfXqEQQ xen/arch/x86/i8259.c
    1.56 +3ddb79bcBit4xJXbwtX0kb1hh2uO1Q xen/arch/x86/idle0_task.c
    1.57 +3ddb79bcKIkRR0kqWaJhe5VUDkMdxg xen/arch/x86/io_apic.c
    1.58 +3ddb79bdqfIcjkz_h9Hvtp8Tk_19Zw xen/arch/x86/irq.c
    1.59 +3ddb79bcHwuCQDjBICDTSis52hWguw xen/arch/x86/mm.c
    1.60 +3ddb79bdS4UeWWXDH-FaBKqcpMFcnw xen/arch/x86/mpparse.c
    1.61 +3f12cff65EV3qOG2j37Qm0ShgvXGRw xen/arch/x86/nmi.c
    1.62 +3ddb79bdHe6_Uij4-glW91vInNtBYQ xen/arch/x86/pci-irq.c
    1.63 +3ddb79bcZ_2FxINljqNSkqa17ISyJw xen/arch/x86/pci-pc.c
    1.64 +3ddb79bdeJ7_86z03yTAPIeeywOg3Q xen/arch/x86/pci-x86.c
    1.65 +3ddb79bdIKgipvGoqExEQ7jawfVowA xen/arch/x86/pci-x86.h
    1.66 +40a4dfced2dnSzbKgJFlD3chKHexjQ xen/arch/x86/pdb-linux.c
    1.67 +4022a73czgX7d-2zfF_cb33oVemApQ xen/arch/x86/pdb-stub.c
    1.68 +3ddb79bc1_2bAt67x9MFCP4AZrQnvQ xen/arch/x86/process.c
    1.69 +3ddb79bc7KxGCEJsgBnkDX7XjD_ZEQ xen/arch/x86/rwlock.c
    1.70 +3ddb79bcrD6Z_rUvSDgrvjyb4846Eg xen/arch/x86/setup.c
    1.71 +3ddb79bcSx2e8JSR3pdSGa8x1ScYzA xen/arch/x86/smp.c
    1.72 +3ddb79bcfUN3-UBCPzX26IU8bq-3aw xen/arch/x86/smpboot.c
    1.73 +3ddb79bc-Udq7ol-NX4q9XsYnN7A2Q xen/arch/x86/time.c
    1.74 +3ddb79bccYVzXZJyVaxuv5T42Z1Fsw xen/arch/x86/trampoline.S
    1.75 +3ddb79bcOftONV9h4QCxXOfiT0h91w xen/arch/x86/traps.c
    1.76 +3ddb79bc4nTpGQOe6_-MbyZzkhlhFQ xen/arch/x86/usercopy.c
    1.77 +3ddb79bcOMCu9-5mKpjIh5d0qqBDPg xen/arch/x86/xen.lds
    1.78  3ddb79bdff-gj-jFGKjOejeHLqL8Lg xen/common/Makefile
    1.79  3e397e66AyyD5fYraAySWuwi9uqSXg xen/common/ac_timer.c
    1.80  4022a73c_BbDFd2YJ_NQYVvKX5Oz7w xen/common/debug-linux.c
    1.81 @@ -347,103 +344,60 @@ 40715b2d3CdS6dIpZDTiCJRlDG3LCA xen/inclu
    1.82  40715b2dKRW7A71SNaeV6zfrEzYxPw xen/include/acpi/platform/acenv.h
    1.83  40715b2d8fYydJMcODFrV1ocLklGDg xen/include/acpi/platform/acgcc.h
    1.84  40715b2d1yZkqyAt0kgx2xEwsatuuA xen/include/acpi/platform/aclinux.h
    1.85 -40715b2dWe0tDhx9LkLXzTQkvD49RA xen/include/asm-i386/acpi.h
    1.86 -3ddb79c3l4IiQtf6MS2jIzcd-hJS8g xen/include/asm-i386/apic.h
    1.87 -3ddb79c3QJYWr8LLGdonLbWmNb9pQQ xen/include/asm-i386/apicdef.h
    1.88 -3ddb79c3OiG9eTsi9Dy3F_OkuRAzKA xen/include/asm-i386/atomic.h
    1.89 -3ddb79c3rM-Ote0Xn6Ytg8Y6YqAG-A xen/include/asm-i386/bitops.h
    1.90 -3ddb79c3KhTI0F_Iw_hRL9QEyOVK-g xen/include/asm-i386/cache.h
    1.91 -404f1b920OQVnrbnXnySS-WxrH9Wzw xen/include/asm-i386/config.h
    1.92 -3ddb79c2LLt11EQHjrd6sB7FUqvFfA xen/include/asm-i386/cpufeature.h
    1.93 -3ddb79c2ADvRmdexd9y3AYK9_NTx-Q xen/include/asm-i386/current.h
    1.94 -3ddb79c2jFkPAZTDmU35L6IUssYMgQ xen/include/asm-i386/debugreg.h
    1.95 -3ddb79c3r9-31dIsewPV3P3i8HALsQ xen/include/asm-i386/delay.h
    1.96 -3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen/include/asm-i386/desc.h
    1.97 -40715b2dTokMLYGSuD58BnxOqyWVew xen/include/asm-i386/div64.h
    1.98 -3e564149UkU91RX7onzpCAmbj_IFjw xen/include/asm-i386/dma.h
    1.99 -3e20b82fl1jmQiKdLy7fxMcutfpjWA xen/include/asm-i386/domain_page.h
   1.100 -3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen/include/asm-i386/fixmap.h
   1.101 -3e2d29944GI24gf7vOP_7x8EyuqxeA xen/include/asm-i386/flushtlb.h
   1.102 -3ddb79c39o75zPP0T1aQQ4mNrCAN2w xen/include/asm-i386/hardirq.h
   1.103 -3ddb79c3BFEIwXR4IsWbwp4BoL4DkA xen/include/asm-i386/hdreg.h
   1.104 -3ddb79c3TMDjkxVndKFKnGiwY0HzDg xen/include/asm-i386/i387.h
   1.105 -3ddb79c3otbjpnqFDSzSeD0J-0xcwg xen/include/asm-i386/ide.h
   1.106 -3ddb79c3fQ_O3o5NHK2N8AJdk0Ea4Q xen/include/asm-i386/io.h
   1.107 -3ddb79c2TKeScYHQZreTdHqYNLbehQ xen/include/asm-i386/io_apic.h
   1.108 -3ddb79c2L7rTlFzazOLW1XuSZefpFw xen/include/asm-i386/irq.h
   1.109 -404f1b93OjLO4bFfBXYNaJdIqlNz-Q xen/include/asm-i386/ldt.h
   1.110 -3ddb79c3I98vWcQR8xEo34JMJ4Ahyw xen/include/asm-i386/mc146818rtc.h
   1.111 -3ddb79c3n_UbPuxlkNxvvLycClIkxA xen/include/asm-i386/mpspec.h
   1.112 -3ddb79c2wa0dA_LGigxOelSGbJ284Q xen/include/asm-i386/msr.h
   1.113 -3ddb79c3xjYnrv5t3VqYlR4tNEOl4Q xen/include/asm-i386/page.h
   1.114 -3e450943kzme29HPCtq5HNOVQkddfw xen/include/asm-i386/param.h
   1.115 -3ddb79c3ysKUbxZuwKBRK3WXU2TlEg xen/include/asm-i386/pci.h
   1.116 -4022a73diKn2Ax4-R4gzk59lm1YdDg xen/include/asm-i386/pdb.h
   1.117 -3ddb79c3nm2zdzeO6Mj8g7ex3txgGw xen/include/asm-i386/pgalloc.h
   1.118 -3ddb79c2QF5-pZGzuX4QukPCDAl59A xen/include/asm-i386/processor.h
   1.119 -3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-i386/ptrace.h
   1.120 -3ddb79c2plf7ciNgoNjU-RsbUzawsw xen/include/asm-i386/rwlock.h
   1.121 -3ddb79c2mJI9YuGMScjofPlD8EdtgA xen/include/asm-i386/scatterlist.h
   1.122 -3ddb79c3Hgbb2g8CyWLMCK-6_ZVQSQ xen/include/asm-i386/smp.h
   1.123 -3ddb79c3jn8ALV_S9W5aeTYUQRKBpg xen/include/asm-i386/smpboot.h
   1.124 -3ddb79c3e9DCEoR-WzNxcOQDzLu7BQ xen/include/asm-i386/softirq.h
   1.125 -3ddb79c3NiyQE2vQnyGiaBnNjBO1rA xen/include/asm-i386/spinlock.h
   1.126 -3e7f358aG11EvMI9VJ4_9hD4LUO7rQ xen/include/asm-i386/string.h
   1.127 -3ddb79c3ezddh34MdelJpa5tNR00Dw xen/include/asm-i386/system.h
   1.128 -3e397e66xPNc8eaSqC9pPbyAtRGzHA xen/include/asm-i386/time.h
   1.129 -3e450943TfE-iovQIY_tMO_VdGsPhA xen/include/asm-i386/timex.h
   1.130 -3ddb79c4HugMq7IYGxcQKFBpKwKhzA xen/include/asm-i386/types.h
   1.131 -3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-i386/uaccess.h
   1.132 -3ddb79c3uPGcP_l_2xyGgBSWd5aC-Q xen/include/asm-i386/unaligned.h
   1.133 -404f1b95z0B0jb2IfvZJ7uvmYqsqpg xen/include/asm-x86_64/apic.h
   1.134 -404f1b95_OZH-rw_durHSa_Kgdo95A xen/include/asm-x86_64/apicdef.h
   1.135 -404f1b967UWSPkB0cwT9v-rilNzkHw xen/include/asm-x86_64/atomic.h
   1.136 -404f1b97UDomt73PizniyrCaxVRkXQ xen/include/asm-x86_64/bitops.h
   1.137 -404f1b99W-dMUlFpsvt--tVpQvNgEQ xen/include/asm-x86_64/cache.h
   1.138 -404f1b9b_phpQlRnyiWqP6RodfZDpg xen/include/asm-x86_64/config.h
   1.139 -404f1b9cz7UV611DK6CTY1ZAiwGtTw xen/include/asm-x86_64/cpufeature.h
   1.140 -404f1b9ceJeGVaPNIENm2FkK0AgEOQ xen/include/asm-x86_64/current.h
   1.141 -404f1b9d854xae6HKv-9W8lLSgROdQ xen/include/asm-x86_64/debugreg.h
   1.142 -404f1b9eRm9rtcM29P5O2nrPFOGSow xen/include/asm-x86_64/delay.h
   1.143 -404f1b9fl6AQ_a-T1TDK3fuwTPXmHw xen/include/asm-x86_64/desc.h
   1.144 -404f1ba05mjpUREtosjzz3PPL5cTJA xen/include/asm-x86_64/dma.h
   1.145 -404f1ba13mnjeZT2ytPm0DB63703nA xen/include/asm-x86_64/domain_page.h
   1.146 -404f1ba31i0gS-cdqvd0RZX1HVnxsA xen/include/asm-x86_64/fixmap.h
   1.147 -404f1ba4KXQ_V7HOkenF04KRU7Tl7w xen/include/asm-x86_64/flushtlb.h
   1.148 -404f1ba5Sqzc22eXORShvCF9-rpMbA xen/include/asm-x86_64/hardirq.h
   1.149 -404f1ba6_nDjomU9HJVvUugj63LvEg xen/include/asm-x86_64/hdreg.h
   1.150 -404f1ba7Q-lF892SDZLWjJ62wmauSA xen/include/asm-x86_64/i387.h
   1.151 -404f1ba8yxfnHH0NWC1B-wmd6bK2wg xen/include/asm-x86_64/ide.h
   1.152 -404f1ba9_7NIylhSRmokesN8TNIiNg xen/include/asm-x86_64/io.h
   1.153 -404f1baaiXXy7vChbzKmluSyJ5LWIw xen/include/asm-x86_64/io_apic.h
   1.154 -404f1baceMqjaYFs7oZoNsPkaZJ0WQ xen/include/asm-x86_64/irq.h
   1.155 -404f1badfXZJZ2sU8sh9PS2EZvd19Q xen/include/asm-x86_64/ldt.h
   1.156 -404f1bae_yI5vMg-_k4EySMERbbz2Q xen/include/asm-x86_64/mc146818rtc.h
   1.157 -404f1bafYfNwntXQGIggyj7D6YruJQ xen/include/asm-x86_64/mpspec.h
   1.158 -404f1bb0asrts1dyLQhyARCgzhL0NA xen/include/asm-x86_64/msr.h
   1.159 -404f1bb1LSCqrMDSfRAti5NdMQPJBQ xen/include/asm-x86_64/page.h
   1.160 -404f1bb2IUaGWD82SrQFaacyBixVFw xen/include/asm-x86_64/param.h
   1.161 -404f1bb3zSQfhMuQ24xNtq9Ed09jGw xen/include/asm-x86_64/pci.h
   1.162 -404f1bb41Yl-5ZjIWnG66HDCj6OIWA xen/include/asm-x86_64/pda.h
   1.163 -404f1bb5toGAnZVAlJ2fWWMv28DFJQ xen/include/asm-x86_64/pdb.h
   1.164 -404f1bb6pz982jtehZacFKhFUac0ug xen/include/asm-x86_64/pgalloc.h
   1.165 -404f1bb756fZfxk5HDx7J7BW3R-1jQ xen/include/asm-x86_64/processor.h
   1.166 -404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86_64/ptrace.h
   1.167 -404f1bb9K0pcyDrV4Ctva1HUczoueQ xen/include/asm-x86_64/rwlock.h
   1.168 -404f1bbaIdS7vc3sE032fQG6EnY8AQ xen/include/asm-x86_64/scatterlist.h
   1.169 -404f1bbbR5n83SiPof3joEPv9xWPPA xen/include/asm-x86_64/smp.h
   1.170 -404f1bbc67CEECfR8ATd7dPD1ajLng xen/include/asm-x86_64/smpboot.h
   1.171 -404f1bbdXaaPrIp5AUIjC8Hsp2H0Aw xen/include/asm-x86_64/softirq.h
   1.172 -404f1bbeomkO5YarnkIRWxVhlB5EJA xen/include/asm-x86_64/spinlock.h
   1.173 -404f1bbf82VK-kyDVBmR7CTvtTBKaw xen/include/asm-x86_64/string.h
   1.174 -404f1bc0laOnGpDxFpgdiuZpEyOOKw xen/include/asm-x86_64/system.h
   1.175 -404f1bc1FnfxOhmgWYHP97TPqA40Pw xen/include/asm-x86_64/time.h
   1.176 -404f1bc2mx9ZbazcdFh-AN70ZvNMJQ xen/include/asm-x86_64/timex.h
   1.177 -404f1bc3R2o0PIpQme8bDWeHcqHNGw xen/include/asm-x86_64/types.h
   1.178 -404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86_64/uaccess.h
   1.179 -404f1bc5idyWKKROGo_hvHVx58Gmkw xen/include/asm-x86_64/unaligned.h
   1.180 +40715b2dWe0tDhx9LkLXzTQkvD49RA xen/include/asm-x86/acpi.h
   1.181 +3ddb79c3l4IiQtf6MS2jIzcd-hJS8g xen/include/asm-x86/apic.h
   1.182 +3ddb79c3QJYWr8LLGdonLbWmNb9pQQ xen/include/asm-x86/apicdef.h
   1.183 +3ddb79c3OiG9eTsi9Dy3F_OkuRAzKA xen/include/asm-x86/atomic.h
   1.184 +3ddb79c3rM-Ote0Xn6Ytg8Y6YqAG-A xen/include/asm-x86/bitops.h
   1.185 +3ddb79c3KhTI0F_Iw_hRL9QEyOVK-g xen/include/asm-x86/cache.h
   1.186 +404f1b920OQVnrbnXnySS-WxrH9Wzw xen/include/asm-x86/config.h
   1.187 +3ddb79c2LLt11EQHjrd6sB7FUqvFfA xen/include/asm-x86/cpufeature.h
   1.188 +3ddb79c2ADvRmdexd9y3AYK9_NTx-Q xen/include/asm-x86/current.h
   1.189 +3ddb79c2jFkPAZTDmU35L6IUssYMgQ xen/include/asm-x86/debugreg.h
   1.190 +3ddb79c3r9-31dIsewPV3P3i8HALsQ xen/include/asm-x86/delay.h
   1.191 +3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen/include/asm-x86/desc.h
   1.192 +40715b2dTokMLYGSuD58BnxOqyWVew xen/include/asm-x86/div64.h
   1.193 +3e20b82fl1jmQiKdLy7fxMcutfpjWA xen/include/asm-x86/domain_page.h
   1.194 +3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen/include/asm-x86/fixmap.h
   1.195 +3e2d29944GI24gf7vOP_7x8EyuqxeA xen/include/asm-x86/flushtlb.h
   1.196 +3ddb79c39o75zPP0T1aQQ4mNrCAN2w xen/include/asm-x86/hardirq.h
   1.197 +3ddb79c3TMDjkxVndKFKnGiwY0HzDg xen/include/asm-x86/i387.h
   1.198 +3ddb79c3fQ_O3o5NHK2N8AJdk0Ea4Q xen/include/asm-x86/io.h
   1.199 +3ddb79c2TKeScYHQZreTdHqYNLbehQ xen/include/asm-x86/io_apic.h
   1.200 +3ddb79c2L7rTlFzazOLW1XuSZefpFw xen/include/asm-x86/irq.h
   1.201 +404f1b93OjLO4bFfBXYNaJdIqlNz-Q xen/include/asm-x86/ldt.h
   1.202 +3ddb79c3I98vWcQR8xEo34JMJ4Ahyw xen/include/asm-x86/mc146818rtc.h
   1.203 +3ddb79c3n_UbPuxlkNxvvLycClIkxA xen/include/asm-x86/mpspec.h
   1.204 +3ddb79c2wa0dA_LGigxOelSGbJ284Q xen/include/asm-x86/msr.h
   1.205 +3ddb79c3xjYnrv5t3VqYlR4tNEOl4Q xen/include/asm-x86/page.h
   1.206 +3e450943kzme29HPCtq5HNOVQkddfw xen/include/asm-x86/param.h
   1.207 +3ddb79c3ysKUbxZuwKBRK3WXU2TlEg xen/include/asm-x86/pci.h
   1.208 +4022a73diKn2Ax4-R4gzk59lm1YdDg xen/include/asm-x86/pdb.h
   1.209 +3ddb79c2QF5-pZGzuX4QukPCDAl59A xen/include/asm-x86/processor.h
   1.210 +3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/ptrace.h
   1.211 +3ddb79c2plf7ciNgoNjU-RsbUzawsw xen/include/asm-x86/rwlock.h
   1.212 +3ddb79c3Hgbb2g8CyWLMCK-6_ZVQSQ xen/include/asm-x86/smp.h
   1.213 +3ddb79c3jn8ALV_S9W5aeTYUQRKBpg xen/include/asm-x86/smpboot.h
   1.214 +3ddb79c3e9DCEoR-WzNxcOQDzLu7BQ xen/include/asm-x86/softirq.h
   1.215 +3ddb79c3NiyQE2vQnyGiaBnNjBO1rA xen/include/asm-x86/spinlock.h
   1.216 +3e7f358aG11EvMI9VJ4_9hD4LUO7rQ xen/include/asm-x86/string.h
   1.217 +3ddb79c3ezddh34MdelJpa5tNR00Dw xen/include/asm-x86/system.h
   1.218 +3e397e66xPNc8eaSqC9pPbyAtRGzHA xen/include/asm-x86/time.h
   1.219 +3e450943TfE-iovQIY_tMO_VdGsPhA xen/include/asm-x86/timex.h
   1.220 +3ddb79c4HugMq7IYGxcQKFBpKwKhzA xen/include/asm-x86/types.h
   1.221 +3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/uaccess.h
   1.222 +3ddb79c3uPGcP_l_2xyGgBSWd5aC-Q xen/include/asm-x86/unaligned.h
   1.223 +404f1b9b_phpQlRnyiWqP6RodfZDpg xen/include/asm-x86/x86_64/config.h
   1.224 +404f1b9ceJeGVaPNIENm2FkK0AgEOQ xen/include/asm-x86/x86_64/current.h
   1.225 +404f1b9fl6AQ_a-T1TDK3fuwTPXmHw xen/include/asm-x86/x86_64/desc.h
   1.226 +404f1badfXZJZ2sU8sh9PS2EZvd19Q xen/include/asm-x86/x86_64/ldt.h
   1.227 +404f1bb1LSCqrMDSfRAti5NdMQPJBQ xen/include/asm-x86/x86_64/page.h
   1.228 +404f1bb41Yl-5ZjIWnG66HDCj6OIWA xen/include/asm-x86/x86_64/pda.h
   1.229 +404f1bb756fZfxk5HDx7J7BW3R-1jQ xen/include/asm-x86/x86_64/processor.h
   1.230 +404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/ptrace.h
   1.231 +404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h
   1.232  400304fcmRQmDdFYEzDh0wcBba9alg xen/include/hypervisor-ifs/COPYING
   1.233 -404f1bc68SXxmv0zQpXBWGrCzSyp8w xen/include/hypervisor-ifs/arch-i386/hypervisor-if.h
   1.234 +404f1bc68SXxmv0zQpXBWGrCzSyp8w xen/include/hypervisor-ifs/arch-x86/hypervisor-if.h
   1.235  404f1bc7IwU-qnH8mJeVu0YsNGMrcw xen/include/hypervisor-ifs/arch-x86_64/hypervisor-if.h
   1.236  3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/hypervisor-ifs/dom0_ops.h
   1.237  403cd194j2pyLqXD8FJ-ukvZzkPenw xen/include/hypervisor-ifs/event_channel.h
     2.1 --- a/xen/Rules.mk	Thu Jun 10 14:24:30 2004 +0000
     2.2 +++ b/xen/Rules.mk	Thu Jun 10 16:59:06 2004 +0000
     2.3 @@ -4,8 +4,14 @@ debugger    ?= n
     2.4  perfc       ?= n
     2.5  trace       ?= n
     2.6  
     2.7 -COMPILE_ARCH := $(shell uname -m | sed -e s/i.86/i386/)
     2.8 -TARGET_ARCH  ?= $(COMPILE_ARCH)
     2.9 +# Currently supported architectures:
    2.10 +#  {COMPILE,TARGET}_ARCH    := x86
    2.11 +#  {COMPILE,TARGET}_SUBARCH := x86_32 | x86_64
    2.12 +COMPILE_ARCH    := x86
    2.13 +COMPILE_SUBARCH := $(shell uname -m | sed -e s/i.86/x86_32/)
    2.14 +
    2.15 +TARGET_ARCH     ?= $(COMPILE_ARCH)
    2.16 +TARGET_SUBARCH  ?= $(COMPILE_SUBARCH)
    2.17  
    2.18  TARGET  := $(BASEDIR)/xen
    2.19  HDRS    := $(wildcard $(BASEDIR)/include/xen/*.h)
     3.1 --- a/xen/arch/i386/Makefile	Thu Jun 10 14:24:30 2004 +0000
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,23 +0,0 @@
     3.4 -
     3.5 -include $(BASEDIR)/Rules.mk
     3.6 -
     3.7 -ifneq ($(debugger),y)
     3.8 -OBJS := $(subst pdb-linux.o,,$(OBJS))
     3.9 -OBJS := $(subst pdb-stub.o,,$(OBJS))
    3.10 -endif
    3.11 -
    3.12 -# What happens here? We link monitor object files together, starting
    3.13 -# at MONITOR_BASE (a very high address). But bootloader cannot put
    3.14 -# things there, so we initially load at LOAD_BASE. A hacky little
    3.15 -# tool called `elf-reloc' is used to modify segment offsets from
    3.16 -# MONITOR_BASE-relative to LOAD_BASE-relative.
    3.17 -# (NB. Linux gets round this by turning its image into raw binary, then 
    3.18 -# wrapping that with a low-memory bootstrapper.)
    3.19 -default: boot/boot.o $(OBJS)
    3.20 -	$(LD) -r -o arch.o $(OBJS)
    3.21 -	$(LD) $(LDFLAGS) boot/boot.o $(ALL_OBJS) -o $(TARGET).dbg
    3.22 -	objcopy -R .note -R .comment -S $(TARGET).dbg $(TARGET)
    3.23 -	$(BASEDIR)/tools/elf-reloc $(MONITOR_BASE) $(LOAD_BASE) $(TARGET)
    3.24 -
    3.25 -clean:
    3.26 -	rm -f *.o *~ core boot/*.o boot/*~ boot/core
     4.1 --- a/xen/arch/i386/Rules.mk	Thu Jun 10 14:24:30 2004 +0000
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,19 +0,0 @@
     4.4 -########################################
     4.5 -# x86-specific definitions
     4.6 -
     4.7 -CC := gcc
     4.8 -LD := ld
     4.9 -# Linker should relocate monitor to this address
    4.10 -MONITOR_BASE := 0xFC500000
    4.11 -# Bootloader should load monitor to this real address
    4.12 -LOAD_BASE    := 0x00100000
    4.13 -CFLAGS  := -nostdinc -fno-builtin -fno-common -fno-strict-aliasing -O3
    4.14 -CFLAGS  += -iwithprefix include -Wall -Werror -DMONITOR_BASE=$(MONITOR_BASE)
    4.15 -CFLAGS  += -fomit-frame-pointer -I$(BASEDIR)/include -D__KERNEL__
    4.16 -CFLAGS  += -Wno-pointer-arith -Wredundant-decls -m32
    4.17 -TARGET_CPU := i686
    4.18 -CFLAGS += -march=$(TARGET_CPU)
    4.19 -LDARCHFLAGS := --oformat elf32-i386 
    4.20 -LDFLAGS := -T xen.lds -N 
    4.21 -
    4.22 -
     5.1 --- a/xen/arch/i386/acpi.c	Thu Jun 10 14:24:30 2004 +0000
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,676 +0,0 @@
     5.4 -/*
     5.5 - *  acpi.c - Architecture-Specific Low-Level ACPI Support
     5.6 - *
     5.7 - *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
     5.8 - *  Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
     5.9 - *  Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
    5.10 - *
    5.11 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    5.12 - *
    5.13 - *  This program is free software; you can redistribute it and/or modify
    5.14 - *  it under the terms of the GNU General Public License as published by
    5.15 - *  the Free Software Foundation; either version 2 of the License, or
    5.16 - *  (at your option) any later version.
    5.17 - *
    5.18 - *  This program is distributed in the hope that it will be useful,
    5.19 - *  but WITHOUT ANY WARRANTY; without even the implied warranty of
    5.20 - *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    5.21 - *  GNU General Public License for more details.
    5.22 - *
    5.23 - *  You should have received a copy of the GNU General Public License
    5.24 - *  along with this program; if not, write to the Free Software
    5.25 - *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    5.26 - *
    5.27 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    5.28 - */
    5.29 -
    5.30 -#include <xen/config.h>
    5.31 -#include <xen/kernel.h>
    5.32 -#include <xen/init.h>
    5.33 -#include <xen/types.h>
    5.34 -/*#include <xen/stddef.h>*/
    5.35 -#include <xen/slab.h>
    5.36 -#include <xen/pci.h>
    5.37 -/*#include <xen/bootmem.h>*/
    5.38 -#include <xen/irq.h>
    5.39 -#include <xen/acpi.h>
    5.40 -#include <asm/mpspec.h>
    5.41 -#include <asm/io.h>
    5.42 -#include <asm/apic.h>
    5.43 -#include <asm/apicdef.h>
    5.44 -#include <asm/page.h>
    5.45 -/*#include <asm/pgtable.h>*/
    5.46 -#include <asm/pgalloc.h>
    5.47 -#include <asm/io_apic.h>
    5.48 -#include <asm/acpi.h>
    5.49 -/*#include <asm/save_state.h>*/
    5.50 -#include <asm/smpboot.h>
    5.51 -
    5.52 -
    5.53 -#define PREFIX			"ACPI: "
    5.54 -
    5.55 -int acpi_lapic = 0;
    5.56 -int acpi_ioapic = 0;
    5.57 -
    5.58 -/* --------------------------------------------------------------------------
    5.59 -                              Boot-time Configuration
    5.60 -   -------------------------------------------------------------------------- */
    5.61 -
    5.62 -#ifdef CONFIG_ACPI_BOOT
    5.63 -int acpi_noirq __initdata = 0;  /* skip ACPI IRQ initialization */
    5.64 -int acpi_ht __initdata = 1;     /* enable HT */
    5.65 -
    5.66 -enum acpi_irq_model_id		acpi_irq_model;
    5.67 -
    5.68 -
    5.69 -/*
    5.70 - * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
    5.71 - * to map the target physical address. The problem is that set_fixmap()
    5.72 - * provides a single page, and it is possible that the page is not
    5.73 - * sufficient.
    5.74 - * By using this area, we can map up to MAX_IO_APICS pages temporarily,
    5.75 - * i.e. until the next __va_range() call.
    5.76 - *
    5.77 - * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
    5.78 - * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
    5.79 - * count idx down while incrementing the phys address.
    5.80 - */
    5.81 -char *__acpi_map_table(unsigned long phys, unsigned long size)
    5.82 -{
    5.83 -	unsigned long base, offset, mapped_size;
    5.84 -	int idx;
    5.85 -
    5.86 -	if (phys + size < 8*1024*1024) 
    5.87 -		return __va(phys); 
    5.88 -
    5.89 -	offset = phys & (PAGE_SIZE - 1);
    5.90 -	mapped_size = PAGE_SIZE - offset;
    5.91 -	set_fixmap(FIX_ACPI_END, phys);
    5.92 -	base = fix_to_virt(FIX_ACPI_END);
    5.93 -
    5.94 -	/*
    5.95 -	 * Most cases can be covered by the below.
    5.96 -	 */
    5.97 -	idx = FIX_ACPI_END;
    5.98 -	while (mapped_size < size) {
    5.99 -		if (--idx < FIX_ACPI_BEGIN)
   5.100 -			return 0;	/* cannot handle this */
   5.101 -		phys += PAGE_SIZE;
   5.102 -		set_fixmap(idx, phys);
   5.103 -		mapped_size += PAGE_SIZE;
   5.104 -	}
   5.105 -
   5.106 -	return ((unsigned char *) base + offset);
   5.107 -}
   5.108 -
   5.109 -
   5.110 -#ifdef CONFIG_X86_LOCAL_APIC
   5.111 -
   5.112 -static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
   5.113 -
   5.114 -
   5.115 -static int __init
   5.116 -acpi_parse_madt (
   5.117 -	unsigned long		phys_addr,
   5.118 -	unsigned long		size)
   5.119 -{
   5.120 -	struct acpi_table_madt	*madt = NULL;
   5.121 -
   5.122 -	if (!phys_addr || !size)
   5.123 -		return -EINVAL;
   5.124 -
   5.125 -	madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size);
   5.126 -	if (!madt) {
   5.127 -		printk(KERN_WARNING PREFIX "Unable to map MADT\n");
   5.128 -		return -ENODEV;
   5.129 -	}
   5.130 -
   5.131 -	if (madt->lapic_address)
   5.132 -		acpi_lapic_addr = (u64) madt->lapic_address;
   5.133 -
   5.134 -	printk(KERN_INFO PREFIX "Local APIC address 0x%08x\n",
   5.135 -		madt->lapic_address);
   5.136 -
   5.137 -	detect_clustered_apic(madt->header.oem_id, madt->header.oem_table_id);
   5.138 -
   5.139 -	return 0;
   5.140 -}
   5.141 -
   5.142 -
   5.143 -static int __init
   5.144 -acpi_parse_lapic (
   5.145 -	acpi_table_entry_header *header)
   5.146 -{
   5.147 -	struct acpi_table_lapic	*processor = NULL;
   5.148 -
   5.149 -	processor = (struct acpi_table_lapic*) header;
   5.150 -	if (!processor)
   5.151 -		return -EINVAL;
   5.152 -
   5.153 -	acpi_table_print_madt_entry(header);
   5.154 -
   5.155 -	mp_register_lapic (
   5.156 -		processor->id,					   /* APIC ID */
   5.157 -		processor->flags.enabled);			  /* Enabled? */
   5.158 -
   5.159 -	return 0;
   5.160 -}
   5.161 -
   5.162 -
   5.163 -static int __init
   5.164 -acpi_parse_lapic_addr_ovr (
   5.165 -	acpi_table_entry_header *header)
   5.166 -{
   5.167 -	struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
   5.168 -
   5.169 -	lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header;
   5.170 -	if (!lapic_addr_ovr)
   5.171 -		return -EINVAL;
   5.172 -
   5.173 -	acpi_lapic_addr = lapic_addr_ovr->address;
   5.174 -
   5.175 -	return 0;
   5.176 -}
   5.177 -
   5.178 -static int __init
   5.179 -acpi_parse_lapic_nmi (
   5.180 -	acpi_table_entry_header *header)
   5.181 -{
   5.182 -	struct acpi_table_lapic_nmi *lapic_nmi = NULL;
   5.183 -
   5.184 -	lapic_nmi = (struct acpi_table_lapic_nmi*) header;
   5.185 -	if (!lapic_nmi)
   5.186 -		return -EINVAL;
   5.187 -
   5.188 -	acpi_table_print_madt_entry(header);
   5.189 -
   5.190 -	if (lapic_nmi->lint != 1)
   5.191 -		printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
   5.192 -
   5.193 -	return 0;
   5.194 -}
   5.195 -
   5.196 -#endif /*CONFIG_X86_LOCAL_APIC*/
   5.197 -
   5.198 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
   5.199 -
   5.200 -static int __init
   5.201 -acpi_parse_ioapic (
   5.202 -	acpi_table_entry_header *header)
   5.203 -{
   5.204 -	struct acpi_table_ioapic *ioapic = NULL;
   5.205 -
   5.206 -	ioapic = (struct acpi_table_ioapic*) header;
   5.207 -	if (!ioapic)
   5.208 -		return -EINVAL;
   5.209 - 
   5.210 -	acpi_table_print_madt_entry(header);
   5.211 -
   5.212 -	mp_register_ioapic (
   5.213 -		ioapic->id,
   5.214 -		ioapic->address,
   5.215 -		ioapic->global_irq_base);
   5.216 - 
   5.217 -	return 0;
   5.218 -}
   5.219 -
   5.220 -
   5.221 -static int __init
   5.222 -acpi_parse_int_src_ovr (
   5.223 -	acpi_table_entry_header *header)
   5.224 -{
   5.225 -	struct acpi_table_int_src_ovr *intsrc = NULL;
   5.226 -
   5.227 -	intsrc = (struct acpi_table_int_src_ovr*) header;
   5.228 -	if (!intsrc)
   5.229 -		return -EINVAL;
   5.230 -
   5.231 -	acpi_table_print_madt_entry(header);
   5.232 -
   5.233 -	mp_override_legacy_irq (
   5.234 -		intsrc->bus_irq,
   5.235 -		intsrc->flags.polarity,
   5.236 -		intsrc->flags.trigger,
   5.237 -		intsrc->global_irq);
   5.238 -
   5.239 -	return 0;
   5.240 -}
   5.241 -
   5.242 -
   5.243 -static int __init
   5.244 -acpi_parse_nmi_src (
   5.245 -	acpi_table_entry_header *header)
   5.246 -{
   5.247 -	struct acpi_table_nmi_src *nmi_src = NULL;
   5.248 -
   5.249 -	nmi_src = (struct acpi_table_nmi_src*) header;
   5.250 -	if (!nmi_src)
   5.251 -		return -EINVAL;
   5.252 -
   5.253 -	acpi_table_print_madt_entry(header);
   5.254 -
   5.255 -	/* TBD: Support nimsrc entries? */
   5.256 -
   5.257 -	return 0;
   5.258 -}
   5.259 -
   5.260 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
   5.261 -
   5.262 -
   5.263 -static unsigned long __init
   5.264 -acpi_scan_rsdp (
   5.265 -	unsigned long		start,
   5.266 -	unsigned long		length)
   5.267 -{
   5.268 -	unsigned long		offset = 0;
   5.269 -	unsigned long		sig_len = sizeof("RSD PTR ") - 1;
   5.270 -
   5.271 -	/*
   5.272 -	 * Scan all 16-byte boundaries of the physical memory region for the
   5.273 -	 * RSDP signature.
   5.274 -	 */
   5.275 -	for (offset = 0; offset < length; offset += 16) {
   5.276 -		if (strncmp((char *) (start + offset), "RSD PTR ", sig_len))
   5.277 -			continue;
   5.278 -		return (start + offset);
   5.279 -	}
   5.280 -
   5.281 -	return 0;
   5.282 -}
   5.283 -
   5.284 -
   5.285 -unsigned long __init
   5.286 -acpi_find_rsdp (void)
   5.287 -{
   5.288 -	unsigned long		rsdp_phys = 0;
   5.289 -
   5.290 -	/*
   5.291 -	 * Scan memory looking for the RSDP signature. First search EBDA (low
   5.292 -	 * memory) paragraphs and then search upper memory (E0000-FFFFF).
   5.293 -	 */
   5.294 -	rsdp_phys = acpi_scan_rsdp (0, 0x400);
   5.295 -	if (!rsdp_phys)
   5.296 -		rsdp_phys = acpi_scan_rsdp (0xE0000, 0xFFFFF);
   5.297 -
   5.298 -	return rsdp_phys;
   5.299 -}
   5.300 -
   5.301 -
   5.302 -/*
   5.303 - * acpi_boot_init()
   5.304 - *  called from setup_arch(), always.
   5.305 - *	1. maps ACPI tables for later use
   5.306 - *	2. enumerates lapics
   5.307 - *	3. enumerates io-apics
   5.308 - *
   5.309 - * side effects:
   5.310 - * 	acpi_lapic = 1 if LAPIC found
   5.311 - *	acpi_ioapic = 1 if IOAPIC found
   5.312 - *	if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
   5.313 - *	if acpi_blacklisted() acpi_disabled = 1;
   5.314 - *	acpi_irq_model=...
   5.315 - *	...
   5.316 - *
   5.317 - * return value: (currently ignored)
   5.318 - *	0: success
   5.319 - *	!0: failure
   5.320 - */
   5.321 -int __init
   5.322 -acpi_boot_init (void)
   5.323 -{
   5.324 -	int			result = 0;
   5.325 -
   5.326 -	if (acpi_disabled && !acpi_ht)
   5.327 -		return(1);
   5.328 -
   5.329 -	/*
   5.330 -	 * The default interrupt routing model is PIC (8259).  This gets
   5.331 -	 * overriden if IOAPICs are enumerated (below).
   5.332 -	 */
   5.333 -	acpi_irq_model = ACPI_IRQ_MODEL_PIC;
   5.334 -
   5.335 -	/* 
   5.336 -	 * Initialize the ACPI boot-time table parser.
   5.337 -	 */
   5.338 -	result = acpi_table_init();
   5.339 -	if (result) {
   5.340 -		acpi_disabled = 1;
   5.341 -		return result;
   5.342 -	}
   5.343 -
   5.344 -	result = acpi_blacklisted();
   5.345 -	if (result) {
   5.346 -		printk(KERN_NOTICE PREFIX "BIOS listed in blacklist, disabling ACPI support\n");
   5.347 -		acpi_disabled = 1;
   5.348 -		return result;
   5.349 -	}
   5.350 -
   5.351 -#ifdef CONFIG_X86_LOCAL_APIC
   5.352 -
   5.353 -	/* 
   5.354 -	 * MADT
   5.355 -	 * ----
   5.356 -	 * Parse the Multiple APIC Description Table (MADT), if exists.
   5.357 -	 * Note that this table provides platform SMP configuration 
   5.358 -	 * information -- the successor to MPS tables.
   5.359 -	 */
   5.360 -
   5.361 -	result = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
   5.362 -	if (!result) {
   5.363 -		return 0;
   5.364 -	}
   5.365 -	else if (result < 0) {
   5.366 -		printk(KERN_ERR PREFIX "Error parsing MADT\n");
   5.367 -		return result;
   5.368 -	}
   5.369 -	else if (result > 1) 
   5.370 -		printk(KERN_WARNING PREFIX "Multiple MADT tables exist\n");
   5.371 -
   5.372 -	/* 
   5.373 -	 * Local APIC
   5.374 -	 * ----------
   5.375 -	 * Note that the LAPIC address is obtained from the MADT (32-bit value)
   5.376 -	 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
   5.377 -	 */
   5.378 -
   5.379 -	result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr);
   5.380 -	if (result < 0) {
   5.381 -		printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
   5.382 -		return result;
   5.383 -	}
   5.384 -
   5.385 -	mp_register_lapic_address(acpi_lapic_addr);
   5.386 -
   5.387 -	result = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic);
   5.388 -	if (!result) { 
   5.389 -		printk(KERN_ERR PREFIX "No LAPIC entries present\n");
   5.390 -		/* TBD: Cleanup to allow fallback to MPS */
   5.391 -		return -ENODEV;
   5.392 -	}
   5.393 -	else if (result < 0) {
   5.394 -		printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
   5.395 -		/* TBD: Cleanup to allow fallback to MPS */
   5.396 -		return result;
   5.397 -	}
   5.398 -
   5.399 -	result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi);
   5.400 -	if (result < 0) {
   5.401 -		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
   5.402 -		/* TBD: Cleanup to allow fallback to MPS */
   5.403 -		return result;
   5.404 -	}
   5.405 -
   5.406 -	acpi_lapic = 1;
   5.407 -
   5.408 -#endif /*CONFIG_X86_LOCAL_APIC*/
   5.409 -
   5.410 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
   5.411 -
   5.412 -	/* 
   5.413 -	 * I/O APIC 
   5.414 -	 * --------
   5.415 -	 */
   5.416 -
   5.417 -	/*
   5.418 -	 * ACPI interpreter is required to complete interrupt setup,
   5.419 -	 * so if it is off, don't enumerate the io-apics with ACPI.
   5.420 -	 * If MPS is present, it will handle them,
   5.421 -	 * otherwise the system will stay in PIC mode
   5.422 -	 */
   5.423 -	if (acpi_disabled || acpi_noirq) {
   5.424 -		return 1;
   5.425 -	}
   5.426 -
   5.427 -	/*
   5.428 -	 * if "noapic" boot option, don't look for IO-APICs
   5.429 -	 */
   5.430 -	if (ioapic_setup_disabled()) {
   5.431 -		printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
   5.432 -			"due to 'noapic' option.\n");
   5.433 -		return 1;
   5.434 -        }
   5.435 -
   5.436 -
   5.437 -	result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic);
   5.438 -	if (!result) { 
   5.439 -		printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
   5.440 -		return -ENODEV;
   5.441 -	}
   5.442 -	else if (result < 0) {
   5.443 -		printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
   5.444 -		return result;
   5.445 -	}
   5.446 -
   5.447 -	/* Build a default routing table for legacy (ISA) interrupts. */
   5.448 -	mp_config_acpi_legacy_irqs();
   5.449 -
   5.450 -	result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr);
   5.451 -	if (result < 0) {
   5.452 -		printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
   5.453 -		/* TBD: Cleanup to allow fallback to MPS */
   5.454 -		return result;
   5.455 -	}
   5.456 -
   5.457 -	result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src);
   5.458 -	if (result < 0) {
   5.459 -		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
   5.460 -		/* TBD: Cleanup to allow fallback to MPS */
   5.461 -		return result;
   5.462 -	}
   5.463 -
   5.464 -	acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
   5.465 -
   5.466 -	acpi_irq_balance_set(NULL);
   5.467 -
   5.468 -	acpi_ioapic = 1;
   5.469 -
   5.470 -	if (acpi_lapic && acpi_ioapic)
   5.471 -		smp_found_config = 1;
   5.472 -
   5.473 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
   5.474 -
   5.475 -	return 0;
   5.476 -}
   5.477 -
   5.478 -#endif /*CONFIG_ACPI_BOOT*/
   5.479 -
   5.480 -#ifdef	CONFIG_ACPI_BUS
   5.481 -/*
   5.482 - * "acpi_pic_sci=level" (current default)
   5.483 - * programs the PIC-mode SCI to Level Trigger.
   5.484 - * (NO-OP if the BIOS set Level Trigger already)
   5.485 - *
   5.486 - * If a PIC-mode SCI is not recogznied or gives spurious IRQ7's
   5.487 - * it may require Edge Trigger -- use "acpi_pic_sci=edge"
   5.488 - * (NO-OP if the BIOS set Edge Trigger already)
   5.489 - *
   5.490 - * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
   5.491 - * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
   5.492 - * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
   5.493 - * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
   5.494 - */
   5.495 -
   5.496 -static __initdata int	acpi_pic_sci_trigger;	/* 0: level, 1: edge */
   5.497 -
   5.498 -void __init
   5.499 -acpi_pic_sci_set_trigger(unsigned int irq)
   5.500 -{
   5.501 -	unsigned char mask = 1 << (irq & 7);
   5.502 -	unsigned int port = 0x4d0 + (irq >> 3);
   5.503 -	unsigned char val = inb(port);
   5.504 -
   5.505 -	
   5.506 -	printk(PREFIX "IRQ%d SCI:", irq);
   5.507 -	if (!(val & mask)) {
   5.508 -		printk(" Edge");
   5.509 -
   5.510 -		if (!acpi_pic_sci_trigger) {
   5.511 -			printk(" set to Level");
   5.512 -			outb(val | mask, port);
   5.513 -		}
   5.514 -	} else {
   5.515 -		printk(" Level");
   5.516 -
   5.517 -		if (acpi_pic_sci_trigger) {
   5.518 -			printk(" set to Edge");
   5.519 -			outb(val | mask, port);
   5.520 -		}
   5.521 -	}
   5.522 -	printk(" Trigger.\n");
   5.523 -}
   5.524 -
   5.525 -int __init
   5.526 -acpi_pic_sci_setup(char *str)
   5.527 -{
   5.528 -	while (str && *str) {
   5.529 -		if (strncmp(str, "level", 5) == 0)
   5.530 -			acpi_pic_sci_trigger = 0;	/* force level trigger */
   5.531 -		if (strncmp(str, "edge", 4) == 0)
   5.532 -			acpi_pic_sci_trigger = 1;	/* force edge trigger */
   5.533 -		str = strchr(str, ',');
   5.534 -		if (str)
   5.535 -			str += strspn(str, ", \t");
   5.536 -	}
   5.537 -	return 1;
   5.538 -}
   5.539 -
   5.540 -__setup("acpi_pic_sci=", acpi_pic_sci_setup);
   5.541 -
   5.542 -#endif /* CONFIG_ACPI_BUS */
   5.543 -
   5.544 -
   5.545 -
   5.546 -/* --------------------------------------------------------------------------
   5.547 -                              Low-Level Sleep Support
   5.548 -   -------------------------------------------------------------------------- */
   5.549 -
   5.550 -#ifdef CONFIG_ACPI_SLEEP
   5.551 -
   5.552 -#define DEBUG
   5.553 -
   5.554 -#ifdef DEBUG
   5.555 -#include <xen/serial.h>
   5.556 -#endif
   5.557 -
   5.558 -/* address in low memory of the wakeup routine. */
   5.559 -unsigned long acpi_wakeup_address = 0;
   5.560 -
   5.561 -/* new page directory that we will be using */
   5.562 -static pmd_t *pmd;
   5.563 -
   5.564 -/* saved page directory */
   5.565 -static pmd_t saved_pmd;
   5.566 -
   5.567 -/* page which we'll use for the new page directory */
   5.568 -static pte_t *ptep;
   5.569 -
   5.570 -extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
   5.571 -
   5.572 -/*
   5.573 - * acpi_create_identity_pmd
   5.574 - *
   5.575 - * Create a new, identity mapped pmd.
   5.576 - *
   5.577 - * Do this by creating new page directory, and marking all the pages as R/W
   5.578 - * Then set it as the new Page Middle Directory.
   5.579 - * And, of course, flush the TLB so it takes effect.
   5.580 - *
   5.581 - * We save the address of the old one, for later restoration.
   5.582 - */
   5.583 -static void acpi_create_identity_pmd (void)
   5.584 -{
   5.585 -	pgd_t *pgd;
   5.586 -	int i;
   5.587 -
   5.588 -	ptep = (pte_t*)__get_free_page(GFP_KERNEL);
   5.589 -
   5.590 -	/* fill page with low mapping */
   5.591 -	for (i = 0; i < PTRS_PER_PTE; i++)
   5.592 -		set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED));
   5.593 -
   5.594 -	pgd = pgd_offset(current->active_mm, 0);
   5.595 -	pmd = pmd_alloc(current->mm,pgd, 0);
   5.596 -
   5.597 -	/* save the old pmd */
   5.598 -	saved_pmd = *pmd;
   5.599 -
   5.600 -	/* set the new one */
   5.601 -	set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(ptep)));
   5.602 -
   5.603 -	/* flush the TLB */
   5.604 -	local_flush_tlb();
   5.605 -}
   5.606 -
   5.607 -/*
   5.608 - * acpi_restore_pmd
   5.609 - *
   5.610 - * Restore the old pmd saved by acpi_create_identity_pmd and
   5.611 - * free the page that said function alloc'd
   5.612 - */
   5.613 -static void acpi_restore_pmd (void)
   5.614 -{
   5.615 -	set_pmd(pmd, saved_pmd);
   5.616 -	local_flush_tlb();
   5.617 -	free_page((unsigned long)ptep);
   5.618 -}
   5.619 -
   5.620 -/**
   5.621 - * acpi_save_state_mem - save kernel state
   5.622 - *
   5.623 - * Create an identity mapped page table and copy the wakeup routine to
   5.624 - * low memory.
   5.625 - */
   5.626 -int acpi_save_state_mem (void)
   5.627 -{
   5.628 -	acpi_create_identity_pmd();
   5.629 -	acpi_copy_wakeup_routine(acpi_wakeup_address);
   5.630 -
   5.631 -	return 0;
   5.632 -}
   5.633 -
   5.634 -/**
   5.635 - * acpi_save_state_disk - save kernel state to disk
   5.636 - *
   5.637 - */
   5.638 -int acpi_save_state_disk (void)
   5.639 -{
   5.640 -	return 1;
   5.641 -}
   5.642 -
   5.643 -/*
   5.644 - * acpi_restore_state
   5.645 - */
   5.646 -void acpi_restore_state_mem (void)
   5.647 -{
   5.648 -	acpi_restore_pmd();
   5.649 -}
   5.650 -
   5.651 -/**
   5.652 - * acpi_reserve_bootmem - do _very_ early ACPI initialisation
   5.653 - *
   5.654 - * We allocate a page in low memory for the wakeup
   5.655 - * routine for when we come back from a sleep state. The
   5.656 - * runtime allocator allows specification of <16M pages, but not
   5.657 - * <1M pages.
   5.658 - */
   5.659 -void __init acpi_reserve_bootmem(void)
   5.660 -{
   5.661 -	acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
   5.662 -	printk(KERN_DEBUG "ACPI: have wakeup address 0x%8.8lx\n", acpi_wakeup_address);
   5.663 -}
   5.664 -
   5.665 -void do_suspend_lowlevel_s4bios(int resume)
   5.666 -{
   5.667 -	if (!resume) {
   5.668 -		save_processor_context();
   5.669 -		acpi_save_register_state((unsigned long)&&acpi_sleep_done);
   5.670 -		acpi_enter_sleep_state_s4bios();
   5.671 -		return;
   5.672 -	}
   5.673 -acpi_sleep_done:
   5.674 -	restore_processor_context();
   5.675 -}
   5.676 -
   5.677 -
   5.678 -#endif /*CONFIG_ACPI_SLEEP*/
   5.679 -
     6.1 --- a/xen/arch/i386/apic.c	Thu Jun 10 14:24:30 2004 +0000
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,830 +0,0 @@
     6.4 -/*
     6.5 - *  Local APIC handling, local APIC timers
     6.6 - *
     6.7 - *  (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
     6.8 - *
     6.9 - *  Fixes
    6.10 - *  Maciej W. Rozycki   :   Bits for genuine 82489DX APICs;
    6.11 - *                  thanks to Eric Gilmore
    6.12 - *                  and Rolf G. Tews
    6.13 - *                  for testing these extensively.
    6.14 - *	Maciej W. Rozycki	:	Various updates and fixes.
    6.15 - *	Mikael Pettersson	:	Power Management for UP-APIC.
    6.16 - */
    6.17 -
    6.18 -
    6.19 -#include <xen/config.h>
    6.20 -#include <xen/init.h>
    6.21 -#include <xen/sched.h>
    6.22 -#include <xen/irq.h>
    6.23 -#include <xen/delay.h>
    6.24 -#include <asm/mc146818rtc.h>
    6.25 -#include <asm/msr.h>
    6.26 -#include <xen/errno.h>
    6.27 -#include <asm/atomic.h>
    6.28 -#include <xen/smp.h>
    6.29 -#include <xen/interrupt.h>
    6.30 -#include <asm/mpspec.h>
    6.31 -#include <asm/pgalloc.h>
    6.32 -#include <asm/hardirq.h>
    6.33 -#include <asm/apic.h>
    6.34 -#include <xen/mm.h>
    6.35 -#include <asm/io_apic.h>
    6.36 -#include <asm/timex.h>
    6.37 -#include <xen/ac_timer.h>
    6.38 -#include <xen/perfc.h>
    6.39 -
    6.40 -
    6.41 -/* Using APIC to generate smp_local_timer_interrupt? */
    6.42 -int using_apic_timer = 0;
    6.43 -
    6.44 -static int enabled_via_apicbase;
    6.45 -
    6.46 -int get_maxlvt(void)
    6.47 -{
    6.48 -    unsigned int v, ver, maxlvt;
    6.49 -
    6.50 -    v = apic_read(APIC_LVR);
    6.51 -    ver = GET_APIC_VERSION(v);
    6.52 -    /* 82489DXs do not report # of LVT entries. */
    6.53 -    maxlvt = APIC_INTEGRATED(ver) ? GET_APIC_MAXLVT(v) : 2;
    6.54 -    return maxlvt;
    6.55 -}
    6.56 -
    6.57 -void clear_local_APIC(void)
    6.58 -{
    6.59 -    int maxlvt;
    6.60 -    unsigned long v;
    6.61 -
    6.62 -    maxlvt = get_maxlvt();
    6.63 -
    6.64 -    /*
    6.65 -     * Masking an LVT entry on a P6 can trigger a local APIC error
    6.66 -     * if the vector is zero. Mask LVTERR first to prevent this.
    6.67 -     */
    6.68 -    if (maxlvt >= 3) {
    6.69 -        v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
    6.70 -        apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED);
    6.71 -    }
    6.72 -    /*
    6.73 -     * Careful: we have to set masks only first to deassert
    6.74 -     * any level-triggered sources.
    6.75 -     */
    6.76 -    v = apic_read(APIC_LVTT);
    6.77 -    apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
    6.78 -    v = apic_read(APIC_LVT0);
    6.79 -    apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
    6.80 -    v = apic_read(APIC_LVT1);
    6.81 -    apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED);
    6.82 -    if (maxlvt >= 4) {
    6.83 -        v = apic_read(APIC_LVTPC);
    6.84 -        apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
    6.85 -    }
    6.86 -
    6.87 -    /*
    6.88 -     * Clean APIC state for other OSs:
    6.89 -     */
    6.90 -    apic_write_around(APIC_LVTT, APIC_LVT_MASKED);
    6.91 -    apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
    6.92 -    apic_write_around(APIC_LVT1, APIC_LVT_MASKED);
    6.93 -    if (maxlvt >= 3)
    6.94 -        apic_write_around(APIC_LVTERR, APIC_LVT_MASKED);
    6.95 -    if (maxlvt >= 4)
    6.96 -        apic_write_around(APIC_LVTPC, APIC_LVT_MASKED);
    6.97 -    v = GET_APIC_VERSION(apic_read(APIC_LVR));
    6.98 -    if (APIC_INTEGRATED(v)) {	/* !82489DX */
    6.99 -        if (maxlvt > 3)
   6.100 -            apic_write(APIC_ESR, 0);
   6.101 -        apic_read(APIC_ESR);
   6.102 -    }
   6.103 -}
   6.104 -
   6.105 -void __init connect_bsp_APIC(void)
   6.106 -{
   6.107 -    if (pic_mode) {
   6.108 -        /*
   6.109 -         * Do not trust the local APIC being empty at bootup.
   6.110 -         */
   6.111 -        clear_local_APIC();
   6.112 -        /*
   6.113 -         * PIC mode, enable APIC mode in the IMCR, i.e.
   6.114 -         * connect BSP's local APIC to INT and NMI lines.
   6.115 -         */
   6.116 -        printk("leaving PIC mode, enabling APIC mode.\n");
   6.117 -        outb(0x70, 0x22);
   6.118 -        outb(0x01, 0x23);
   6.119 -    }
   6.120 -}
   6.121 -
   6.122 -void disconnect_bsp_APIC(void)
   6.123 -{
   6.124 -    if (pic_mode) {
   6.125 -        /*
   6.126 -         * Put the board back into PIC mode (has an effect
   6.127 -         * only on certain older boards).  Note that APIC
   6.128 -         * interrupts, including IPIs, won't work beyond
   6.129 -         * this point!  The only exception are INIT IPIs.
   6.130 -         */
   6.131 -        printk("disabling APIC mode, entering PIC mode.\n");
   6.132 -        outb(0x70, 0x22);
   6.133 -        outb(0x00, 0x23);
   6.134 -    }
   6.135 -}
   6.136 -
   6.137 -void disable_local_APIC(void)
   6.138 -{
   6.139 -    unsigned long value;
   6.140 -
   6.141 -    clear_local_APIC();
   6.142 -
   6.143 -    /*
   6.144 -     * Disable APIC (implies clearing of registers
   6.145 -     * for 82489DX!).
   6.146 -     */
   6.147 -    value = apic_read(APIC_SPIV);
   6.148 -    value &= ~APIC_SPIV_APIC_ENABLED;
   6.149 -    apic_write_around(APIC_SPIV, value);
   6.150 -
   6.151 -    if (enabled_via_apicbase) {
   6.152 -        unsigned int l, h;
   6.153 -        rdmsr(MSR_IA32_APICBASE, l, h);
   6.154 -        l &= ~MSR_IA32_APICBASE_ENABLE;
   6.155 -        wrmsr(MSR_IA32_APICBASE, l, h);
   6.156 -    }
   6.157 -}
   6.158 -
   6.159 -/*
   6.160 - * This is to verify that we're looking at a real local APIC.
   6.161 - * Check these against your board if the CPUs aren't getting
   6.162 - * started for no apparent reason.
   6.163 - */
   6.164 -int __init verify_local_APIC(void)
   6.165 -{
   6.166 -    unsigned int reg0, reg1;
   6.167 -
   6.168 -    /*
   6.169 -     * The version register is read-only in a real APIC.
   6.170 -     */
   6.171 -    reg0 = apic_read(APIC_LVR);
   6.172 -    Dprintk("Getting VERSION: %x\n", reg0);
   6.173 -    apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
   6.174 -    reg1 = apic_read(APIC_LVR);
   6.175 -    Dprintk("Getting VERSION: %x\n", reg1);
   6.176 -
   6.177 -    /*
   6.178 -     * The two version reads above should print the same
   6.179 -     * numbers.  If the second one is different, then we
   6.180 -     * poke at a non-APIC.
   6.181 -     */
   6.182 -    if (reg1 != reg0)
   6.183 -        return 0;
   6.184 -
   6.185 -    /*
   6.186 -     * Check if the version looks reasonably.
   6.187 -     */
   6.188 -    reg1 = GET_APIC_VERSION(reg0);
   6.189 -    if (reg1 == 0x00 || reg1 == 0xff)
   6.190 -        return 0;
   6.191 -    reg1 = get_maxlvt();
   6.192 -    if (reg1 < 0x02 || reg1 == 0xff)
   6.193 -        return 0;
   6.194 -
   6.195 -    /*
   6.196 -     * The ID register is read/write in a real APIC.
   6.197 -     */
   6.198 -    reg0 = apic_read(APIC_ID);
   6.199 -    Dprintk("Getting ID: %x\n", reg0);
   6.200 -    apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
   6.201 -    reg1 = apic_read(APIC_ID);
   6.202 -    Dprintk("Getting ID: %x\n", reg1);
   6.203 -    apic_write(APIC_ID, reg0);
   6.204 -    if (reg1 != (reg0 ^ APIC_ID_MASK))
   6.205 -        return 0;
   6.206 -
   6.207 -    /*
   6.208 -     * The next two are just to see if we have sane values.
   6.209 -     * They're only really relevant if we're in Virtual Wire
   6.210 -     * compatibility mode, but most boxes are anymore.
   6.211 -     */
   6.212 -    reg0 = apic_read(APIC_LVT0);
   6.213 -    Dprintk("Getting LVT0: %x\n", reg0);
   6.214 -    reg1 = apic_read(APIC_LVT1);
   6.215 -    Dprintk("Getting LVT1: %x\n", reg1);
   6.216 -
   6.217 -    return 1;
   6.218 -}
   6.219 -
   6.220 -void __init sync_Arb_IDs(void)
   6.221 -{
   6.222 -    /*
   6.223 -     * Wait for idle.
   6.224 -     */
   6.225 -    apic_wait_icr_idle();
   6.226 -
   6.227 -    Dprintk("Synchronizing Arb IDs.\n");
   6.228 -    apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
   6.229 -                      | APIC_DM_INIT);
   6.230 -}
   6.231 -
   6.232 -extern void __error_in_apic_c (void);
   6.233 -
   6.234 -/*
   6.235 - * WAS: An initial setup of the virtual wire mode.
   6.236 - * NOW: We don't bother doing anything. All we need at this point
   6.237 - * is to receive timer ticks, so that 'jiffies' is incremented.
   6.238 - * If we're SMP, then we can assume BIOS did setup for us.
   6.239 - * If we're UP, then the APIC should be disabled (it is at reset).
   6.240 - * If we're UP and APIC is enabled, then BIOS is clever and has 
   6.241 - * probably done initial interrupt routing for us.
   6.242 - */
   6.243 -void __init init_bsp_APIC(void)
   6.244 -{
   6.245 -}
   6.246 -
   6.247 -static unsigned long calculate_ldr(unsigned long old)
   6.248 -{
   6.249 -    unsigned long id = 1UL << smp_processor_id();
   6.250 -    return (old & ~APIC_LDR_MASK)|SET_APIC_LOGICAL_ID(id);
   6.251 -}
   6.252 -
   6.253 -void __init setup_local_APIC (void)
   6.254 -{
   6.255 -    unsigned long value, ver, maxlvt;
   6.256 -
   6.257 -    value = apic_read(APIC_LVR);
   6.258 -    ver = GET_APIC_VERSION(value);
   6.259 -
   6.260 -    if ((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f)
   6.261 -        __error_in_apic_c();
   6.262 -
   6.263 -    /* Double-check wether this APIC is really registered. */
   6.264 -    if (!test_bit(GET_APIC_ID(apic_read(APIC_ID)), &phys_cpu_present_map))
   6.265 -        BUG();
   6.266 -
   6.267 -    /*
   6.268 -     * Intel recommends to set DFR, LDR and TPR before enabling
   6.269 -     * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
   6.270 -     * document number 292116).  So here it goes...
   6.271 -     */
   6.272 -
   6.273 -    /*
   6.274 -     * In clustered apic mode, the firmware does this for us 
   6.275 -     * Put the APIC into flat delivery mode.
   6.276 -     * Must be "all ones" explicitly for 82489DX.
   6.277 -     */
   6.278 -    apic_write_around(APIC_DFR, APIC_DFR_FLAT);
   6.279 -
   6.280 -    /*
   6.281 -     * Set up the logical destination ID.
   6.282 -     */
   6.283 -    value = apic_read(APIC_LDR);
   6.284 -    apic_write_around(APIC_LDR, calculate_ldr(value));
   6.285 -
   6.286 -    /*
   6.287 -     * Set Task Priority to 'accept all'. We never change this
   6.288 -     * later on.
   6.289 -     */
   6.290 -    value = apic_read(APIC_TASKPRI);
   6.291 -    value &= ~APIC_TPRI_MASK;
   6.292 -    apic_write_around(APIC_TASKPRI, value);
   6.293 -
   6.294 -    /*
   6.295 -     * Now that we are all set up, enable the APIC
   6.296 -     */
   6.297 -    value = apic_read(APIC_SPIV);
   6.298 -    value &= ~APIC_VECTOR_MASK;
   6.299 -    /*
   6.300 -     * Enable APIC
   6.301 -     */
   6.302 -    value |= APIC_SPIV_APIC_ENABLED;
   6.303 -
   6.304 -    /* Enable focus processor (bit==0) */
   6.305 -    value &= ~APIC_SPIV_FOCUS_DISABLED;
   6.306 -
   6.307 -    /* Set spurious IRQ vector */
   6.308 -    value |= SPURIOUS_APIC_VECTOR;
   6.309 -    apic_write_around(APIC_SPIV, value);
   6.310 -
   6.311 -    /*
   6.312 -     * Set up LVT0, LVT1:
   6.313 -     *
   6.314 -     * set up through-local-APIC on the BP's LINT0. This is not
   6.315 -     * strictly necessery in pure symmetric-IO mode, but sometimes
   6.316 -     * we delegate interrupts to the 8259A.
   6.317 -     */
   6.318 -    /*
   6.319 -     * TODO: set up through-local-APIC from through-I/O-APIC? --macro
   6.320 -     */
   6.321 -    value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
   6.322 -    if (!smp_processor_id()) { 
   6.323 -        value = APIC_DM_EXTINT;
   6.324 -        printk("enabled ExtINT on CPU#%d\n", smp_processor_id());
   6.325 -    } else {
   6.326 -        value = APIC_DM_EXTINT | APIC_LVT_MASKED;
   6.327 -        printk("masked ExtINT on CPU#%d\n", smp_processor_id());
   6.328 -    }
   6.329 -    apic_write_around(APIC_LVT0, value);
   6.330 -
   6.331 -    /*
   6.332 -     * only the BP should see the LINT1 NMI signal, obviously.
   6.333 -     */
   6.334 -    if (!smp_processor_id())
   6.335 -        value = APIC_DM_NMI;
   6.336 -    else
   6.337 -        value = APIC_DM_NMI | APIC_LVT_MASKED;
   6.338 -    if (!APIC_INTEGRATED(ver))      /* 82489DX */
   6.339 -        value |= APIC_LVT_LEVEL_TRIGGER;
   6.340 -    apic_write_around(APIC_LVT1, value);
   6.341 -
   6.342 -    if (APIC_INTEGRATED(ver)) {     /* !82489DX */
   6.343 -        maxlvt = get_maxlvt();
   6.344 -        if (maxlvt > 3)     /* Due to the Pentium erratum 3AP. */
   6.345 -            apic_write(APIC_ESR, 0);
   6.346 -        value = apic_read(APIC_ESR);
   6.347 -        printk("ESR value before enabling vector: %08lx\n", value);
   6.348 -
   6.349 -        value = ERROR_APIC_VECTOR;      /* enables sending errors */
   6.350 -        apic_write_around(APIC_LVTERR, value);
   6.351 -        /* spec says clear errors after enabling vector. */
   6.352 -        if (maxlvt > 3)
   6.353 -            apic_write(APIC_ESR, 0);
   6.354 -        value = apic_read(APIC_ESR);
   6.355 -        printk("ESR value after enabling vector: %08lx\n", value);
   6.356 -    } else {
   6.357 -        printk("No ESR for 82489DX.\n");
   6.358 -    }
   6.359 -
   6.360 -    if ( (smp_processor_id() == 0) && (nmi_watchdog == NMI_LOCAL_APIC) )
   6.361 -        setup_apic_nmi_watchdog();
   6.362 -}
   6.363 -
   6.364 -
   6.365 -static inline void apic_pm_init1(void) { }
   6.366 -static inline void apic_pm_init2(void) { }
   6.367 -
   6.368 -
   6.369 -/*
   6.370 - * Detect and enable local APICs on non-SMP boards.
   6.371 - * Original code written by Keir Fraser.
   6.372 - */
   6.373 -
   6.374 -static int __init detect_init_APIC (void)
   6.375 -{
   6.376 -    u32 h, l, features;
   6.377 -    extern void get_cpu_vendor(struct cpuinfo_x86*);
   6.378 -
   6.379 -    /* Workaround for us being called before identify_cpu(). */
   6.380 -    get_cpu_vendor(&boot_cpu_data);
   6.381 -
   6.382 -    switch (boot_cpu_data.x86_vendor) {
   6.383 -    case X86_VENDOR_AMD:
   6.384 -        if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1)
   6.385 -            break;
   6.386 -        if (boot_cpu_data.x86 == 15 && cpu_has_apic)
   6.387 -            break;
   6.388 -        goto no_apic;
   6.389 -    case X86_VENDOR_INTEL:
   6.390 -        if (boot_cpu_data.x86 == 6 ||
   6.391 -            (boot_cpu_data.x86 == 15 && cpu_has_apic) ||
   6.392 -            (boot_cpu_data.x86 == 5 && cpu_has_apic))
   6.393 -            break;
   6.394 -        goto no_apic;
   6.395 -    default:
   6.396 -        goto no_apic;
   6.397 -    }
   6.398 -
   6.399 -    if (!cpu_has_apic) {
   6.400 -        /*
   6.401 -         * Some BIOSes disable the local APIC in the
   6.402 -         * APIC_BASE MSR. This can only be done in
   6.403 -         * software for Intel P6 and AMD K7 (Model > 1).
   6.404 -         */
   6.405 -        rdmsr(MSR_IA32_APICBASE, l, h);
   6.406 -        if (!(l & MSR_IA32_APICBASE_ENABLE)) {
   6.407 -            printk("Local APIC disabled by BIOS -- reenabling.\n");
   6.408 -            l &= ~MSR_IA32_APICBASE_BASE;
   6.409 -            l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
   6.410 -            wrmsr(MSR_IA32_APICBASE, l, h);
   6.411 -            enabled_via_apicbase = 1;
   6.412 -        }
   6.413 -    }
   6.414 -
   6.415 -    /* The APIC feature bit should now be enabled in `cpuid' */
   6.416 -    features = cpuid_edx(1);
   6.417 -    if (!(features & (1 << X86_FEATURE_APIC))) {
   6.418 -        printk("Could not enable APIC!\n");
   6.419 -        return -1;
   6.420 -    }
   6.421 -
   6.422 -    set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability);
   6.423 -    mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
   6.424 -    boot_cpu_physical_apicid = 0;
   6.425 -
   6.426 -    /* The BIOS may have set up the APIC at some other address */
   6.427 -    rdmsr(MSR_IA32_APICBASE, l, h);
   6.428 -    if (l & MSR_IA32_APICBASE_ENABLE)
   6.429 -        mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
   6.430 -
   6.431 -	if (nmi_watchdog != NMI_NONE)
   6.432 -		nmi_watchdog = NMI_LOCAL_APIC;
   6.433 -
   6.434 -    printk("Found and enabled local APIC!\n");
   6.435 -    apic_pm_init1();
   6.436 -    return 0;
   6.437 -
   6.438 - no_apic:
   6.439 -    printk("No local APIC present or hardware disabled\n");
   6.440 -    return -1;
   6.441 -}
   6.442 -
   6.443 -void __init init_apic_mappings(void)
   6.444 -{
   6.445 -    unsigned long apic_phys = 0;
   6.446 -
   6.447 -    /*
   6.448 -     * If no local APIC can be found then set up a fake all zeroes page to 
   6.449 -     * simulate the local APIC and another one for the IO-APIC.
   6.450 -     */
   6.451 -    if (!smp_found_config && detect_init_APIC()) {
   6.452 -        apic_phys = get_free_page(GFP_KERNEL);
   6.453 -        apic_phys = __pa(apic_phys);
   6.454 -    } else
   6.455 -        apic_phys = mp_lapic_addr;
   6.456 -
   6.457 -    set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
   6.458 -    Dprintk("mapped APIC to %08lx (%08lx)\n", APIC_BASE, apic_phys);
   6.459 -
   6.460 -    /*
   6.461 -     * Fetch the APIC ID of the BSP in case we have a
   6.462 -     * default configuration (or the MP table is broken).
   6.463 -     */
   6.464 -    if (boot_cpu_physical_apicid == -1U)
   6.465 -        boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
   6.466 -
   6.467 -#ifdef CONFIG_X86_IO_APIC
   6.468 -    {
   6.469 -        unsigned long ioapic_phys = 0, idx = FIX_IO_APIC_BASE_0;
   6.470 -        int i;
   6.471 -
   6.472 -        for (i = 0; i < nr_ioapics; i++) {
   6.473 -            if (smp_found_config)
   6.474 -                ioapic_phys = mp_ioapics[i].mpc_apicaddr;
   6.475 -            set_fixmap_nocache(idx, ioapic_phys);
   6.476 -            Dprintk("mapped IOAPIC to %08lx (%08lx)\n",
   6.477 -                    __fix_to_virt(idx), ioapic_phys);
   6.478 -            idx++;
   6.479 -        }
   6.480 -    }
   6.481 -#endif
   6.482 -}
   6.483 -
   6.484 -/*****************************************************************************
   6.485 - * APIC calibration
   6.486 - * 
   6.487 - * The APIC is programmed in bus cycles.
   6.488 - * Timeout values should specified in real time units.
   6.489 - * The "cheapest" time source is the cyclecounter.
   6.490 - * 
   6.491 - * Thus, we need a mappings from: bus cycles <- cycle counter <- system time
   6.492 - * 
   6.493 - * The calibration is currently a bit shoddy since it requires the external
   6.494 - * timer chip to generate periodic timer interupts. 
   6.495 - *****************************************************************************/
   6.496 -
   6.497 -/* used for system time scaling */
   6.498 -static unsigned int bus_freq;
   6.499 -static u32          bus_cycle;   /* length of one bus cycle in pico-seconds */
   6.500 -static u32          bus_scale;   /* scaling factor convert ns to bus cycles */
   6.501 -
   6.502 -/*
   6.503 - * The timer chip is already set up at HZ interrupts per second here,
   6.504 - * but we do not accept timer interrupts yet. We only allow the BP
   6.505 - * to calibrate.
   6.506 - */
   6.507 -static unsigned int __init get_8254_timer_count(void)
   6.508 -{
   6.509 -    /*extern spinlock_t i8253_lock;*/
   6.510 -    /*unsigned long flags;*/
   6.511 -    unsigned int count;
   6.512 -    /*spin_lock_irqsave(&i8253_lock, flags);*/
   6.513 -    outb_p(0x00, 0x43);
   6.514 -    count = inb_p(0x40);
   6.515 -    count |= inb_p(0x40) << 8;
   6.516 -    /*spin_unlock_irqrestore(&i8253_lock, flags);*/
   6.517 -    return count;
   6.518 -}
   6.519 -
   6.520 -void __init wait_8254_wraparound(void)
   6.521 -{
   6.522 -    unsigned int curr_count, prev_count=~0;
   6.523 -    int delta;
   6.524 -    curr_count = get_8254_timer_count();
   6.525 -    do {
   6.526 -        prev_count = curr_count;
   6.527 -        curr_count = get_8254_timer_count();
   6.528 -        delta = curr_count-prev_count;
   6.529 -        /*
   6.530 -         * This limit for delta seems arbitrary, but it isn't, it's slightly 
   6.531 -         * above the level of error a buggy Mercury/Neptune chipset timer can 
   6.532 -         * cause.
   6.533 -         */
   6.534 -    } while (delta < 300);
   6.535 -}
   6.536 -
   6.537 -/*
   6.538 - * This function sets up the local APIC timer, with a timeout of
   6.539 - * 'clocks' APIC bus clock. During calibration we actually call
   6.540 - * this function with a very large value and read the current time after
   6.541 - * a well defined period of time as expired.
   6.542 - *
   6.543 - * Calibration is only performed once, for CPU0!
   6.544 - *
   6.545 - * We do reads before writes even if unnecessary, to get around the
   6.546 - * P5 APIC double write bug.
   6.547 - */
   6.548 -#define APIC_DIVISOR 1
   6.549 -static void __setup_APIC_LVTT(unsigned int clocks)
   6.550 -{
   6.551 -    unsigned int lvtt1_value, tmp_value;
   6.552 -    lvtt1_value = SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV)|LOCAL_TIMER_VECTOR;
   6.553 -    apic_write_around(APIC_LVTT, lvtt1_value);
   6.554 -    tmp_value = apic_read(APIC_TDCR);
   6.555 -    apic_write_around(APIC_TDCR, (tmp_value | APIC_TDR_DIV_1));
   6.556 -    apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
   6.557 -}
   6.558 -
   6.559 -/*
   6.560 - * this is done for every CPU from setup_APIC_clocks() below.
   6.561 - * We setup each local APIC with a zero timeout value for now.
   6.562 - * Unlike Linux, we don't have to wait for slices etc.
   6.563 - */
   6.564 -void setup_APIC_timer(void * data)
   6.565 -{
   6.566 -    unsigned long flags;
   6.567 -    __save_flags(flags);
   6.568 -    __sti();
   6.569 -    __setup_APIC_LVTT(0);
   6.570 -    __restore_flags(flags);
   6.571 -}
   6.572 -
   6.573 -/*
   6.574 - * In this function we calibrate APIC bus clocks to the external timer.
   6.575 - *
   6.576 - * As a result we have the Bys Speed and CPU speed in Hz.
   6.577 - * 
   6.578 - * We want to do the calibration only once (for CPU0).  CPUs connected by the
   6.579 - * same APIC bus have the very same bus frequency.
   6.580 - *
   6.581 - * This bit is a bit shoddy since we use the very same periodic timer interrupt
   6.582 - * we try to eliminate to calibrate the APIC. 
   6.583 - */
   6.584 -
   6.585 -int __init calibrate_APIC_clock(void)
   6.586 -{
   6.587 -    unsigned long long t1 = 0, t2 = 0;
   6.588 -    long tt1, tt2;
   6.589 -    long result;
   6.590 -    int i;
   6.591 -    const int LOOPS = HZ/10;
   6.592 -
   6.593 -    printk("Calibrating APIC timer for CPU%d...\n",  smp_processor_id());
   6.594 -
   6.595 -    /* Put whatever arbitrary (but long enough) timeout
   6.596 -     * value into the APIC clock, we just want to get the
   6.597 -     * counter running for calibration. */
   6.598 -    __setup_APIC_LVTT(1000000000);
   6.599 -
   6.600 -    /* The timer chip counts down to zero. Let's wait
   6.601 -     * for a wraparound to start exact measurement:
   6.602 -     * (the current tick might have been already half done) */
   6.603 -    wait_8254_wraparound();
   6.604 -
   6.605 -    /* We wrapped around just now. Let's start: */
   6.606 -    rdtscll(t1);
   6.607 -    tt1 = apic_read(APIC_TMCCT);
   6.608 -
   6.609 -    /* Let's wait LOOPS wraprounds: */
   6.610 -    for (i = 0; i < LOOPS; i++)
   6.611 -        wait_8254_wraparound();
   6.612 -
   6.613 -    tt2 = apic_read(APIC_TMCCT);
   6.614 -    rdtscll(t2);
   6.615 -
   6.616 -    /* The APIC bus clock counter is 32 bits only, it
   6.617 -     * might have overflown, but note that we use signed
   6.618 -     * longs, thus no extra care needed.
   6.619 -     * underflown to be exact, as the timer counts down ;) */
   6.620 -    result = (tt1-tt2)*APIC_DIVISOR/LOOPS;
   6.621 -
   6.622 -    printk("..... CPU speed is %ld.%04ld MHz.\n",
   6.623 -           ((long)(t2-t1)/LOOPS) / (1000000/HZ), 
   6.624 -           ((long)(t2-t1)/LOOPS) % (1000000/HZ));
   6.625 -
   6.626 -    printk("..... Bus speed is %ld.%04ld MHz.\n",
   6.627 -           result / (1000000/HZ), 
   6.628 -           result % (1000000/HZ));
   6.629 -
   6.630 -    /*
   6.631 -     * KAF: Moved this to time.c where it's calculated relative to the TSC. 
   6.632 -     * Therefore works on machines with no local APIC.
   6.633 -     */
   6.634 -    /*cpu_freq = (u64)(((t2-t1)/LOOPS)*HZ);*/
   6.635 -
   6.636 -    /* set up multipliers for accurate timer code */
   6.637 -    bus_freq   = result*HZ;
   6.638 -    bus_cycle  = (u32) (1000000000000LL/bus_freq); /* in pico seconds */
   6.639 -    bus_scale  = (1000*262144)/bus_cycle;
   6.640 -
   6.641 -    printk("..... bus_scale = 0x%08X\n", bus_scale);
   6.642 -    /* reset APIC to zero timeout value */
   6.643 -    __setup_APIC_LVTT(0);
   6.644 -    return result;
   6.645 -}
   6.646 -
   6.647 -/*
   6.648 - * initialise the APIC timers for all CPUs
   6.649 - * we start with the first and find out processor frequency and bus speed
   6.650 - */
   6.651 -void __init setup_APIC_clocks (void)
   6.652 -{
   6.653 -    printk("Using local APIC timer interrupts.\n");
   6.654 -    using_apic_timer = 1;
   6.655 -    __cli();
   6.656 -    /* calibrate CPU0 for CPU speed and BUS speed */
   6.657 -    bus_freq = calibrate_APIC_clock();
   6.658 -    /* Now set up the timer for real. */
   6.659 -    setup_APIC_timer((void *)bus_freq);
   6.660 -    __sti();
   6.661 -    /* and update all other cpus */
   6.662 -    smp_call_function(setup_APIC_timer, (void *)bus_freq, 1, 1);
   6.663 -}
   6.664 -
   6.665 -#undef APIC_DIVISOR
   6.666 -
   6.667 -/*
   6.668 - * reprogram the APIC timer. Timeoutvalue is in ns from start of boot
   6.669 - * returns 1 on success
   6.670 - * returns 0 if the timeout value is too small or in the past.
   6.671 - */
   6.672 -int reprogram_ac_timer(s_time_t timeout)
   6.673 -{
   6.674 -    s_time_t    now;
   6.675 -    s_time_t    expire;
   6.676 -    u64         apic_tmict;
   6.677 -
   6.678 -    /*
   6.679 -     * We use this value because we don't trust zero (we think it may just
   6.680 -     * cause an immediate interrupt). At least this is guaranteed to hold it
   6.681 -     * off for ages (esp. since the clock ticks on bus clock, not cpu clock!).
   6.682 -     */
   6.683 -    if ( timeout == 0 )
   6.684 -    {
   6.685 -        apic_tmict = 0xffffffff;
   6.686 -        goto reprogram;
   6.687 -    }
   6.688 -
   6.689 -    now = NOW();
   6.690 -    expire = timeout - now; /* value from now */
   6.691 -
   6.692 -    if ( expire <= 0 )
   6.693 -    {
   6.694 -        Dprintk("APICT[%02d] Timeout in the past 0x%08X%08X > 0x%08X%08X\n", 
   6.695 -                smp_processor_id(), (u32)(now>>32), 
   6.696 -                (u32)now, (u32)(timeout>>32),(u32)timeout);
   6.697 -        return 0;
   6.698 -    }
   6.699 -
   6.700 -    /*
   6.701 -     * If we don't have local APIC then we just poll the timer list off the
   6.702 -     * PIT interrupt. Cheesy but good enough to work on eg. VMware :-)
   6.703 -     */
   6.704 -    if ( !cpu_has_apic )
   6.705 -        return 1;
   6.706 -
   6.707 -    /* conversion to bus units */
   6.708 -    apic_tmict = (((u64)bus_scale) * expire)>>18;
   6.709 -
   6.710 -    if ( apic_tmict >= 0xffffffff )
   6.711 -    {
   6.712 -        Dprintk("APICT[%02d] Timeout value too large\n", smp_processor_id());
   6.713 -        apic_tmict = 0xffffffff;
   6.714 -    }
   6.715 -
   6.716 -    if ( apic_tmict == 0 )
   6.717 -    {
   6.718 -        Dprintk("APICT[%02d] timeout value too small\n", smp_processor_id());
   6.719 -        return 0;
   6.720 -    }
   6.721 -
   6.722 - reprogram:
   6.723 -    /* Program the timer. */
   6.724 -    apic_write(APIC_TMICT, (unsigned long)apic_tmict);
   6.725 -
   6.726 -    return 1;
   6.727 -}
   6.728 -
   6.729 -unsigned int apic_timer_irqs [NR_CPUS];
   6.730 -
   6.731 -void smp_apic_timer_interrupt(struct pt_regs * regs)
   6.732 -{
   6.733 -    int cpu = smp_processor_id();
   6.734 -
   6.735 -    ack_APIC_irq();
   6.736 -
   6.737 -    apic_timer_irqs[cpu]++;
   6.738 -    perfc_incrc(apic_timer);
   6.739 -
   6.740 -    __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
   6.741 -}
   6.742 -
   6.743 -/*
   6.744 - * This interrupt should _never_ happen with our APIC/SMP architecture
   6.745 - */
   6.746 -asmlinkage void smp_spurious_interrupt(void)
   6.747 -{
   6.748 -    unsigned long v;
   6.749 -
   6.750 -    /*
   6.751 -     * Check if this really is a spurious interrupt and ACK it
   6.752 -     * if it is a vectored one.  Just in case...
   6.753 -     * Spurious interrupts should not be ACKed.
   6.754 -     */
   6.755 -    v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
   6.756 -    if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
   6.757 -        ack_APIC_irq();
   6.758 -
   6.759 -    /* see sw-dev-man vol 3, chapter 7.4.13.5 */
   6.760 -    printk("spurious APIC interrupt on CPU#%d, should never happen.\n",
   6.761 -           smp_processor_id());
   6.762 -}
   6.763 -
   6.764 -/*
   6.765 - * This interrupt should never happen with our APIC/SMP architecture
   6.766 - */
   6.767 -
   6.768 -asmlinkage void smp_error_interrupt(void)
   6.769 -{
   6.770 -    unsigned long v, v1;
   6.771 -
   6.772 -    /* First tickle the hardware, only then report what went on. -- REW */
   6.773 -    v = apic_read(APIC_ESR);
   6.774 -    apic_write(APIC_ESR, 0);
   6.775 -    v1 = apic_read(APIC_ESR);
   6.776 -    ack_APIC_irq();
   6.777 -    atomic_inc(&irq_err_count);
   6.778 -
   6.779 -    /* Here is what the APIC error bits mean:
   6.780 -       0: Send CS error
   6.781 -       1: Receive CS error
   6.782 -       2: Send accept error
   6.783 -       3: Receive accept error
   6.784 -       4: Reserved
   6.785 -       5: Send illegal vector
   6.786 -       6: Received illegal vector
   6.787 -       7: Illegal register address
   6.788 -    */
   6.789 -    printk ("APIC error on CPU%d: %02lx(%02lx)\n",
   6.790 -            smp_processor_id(), v , v1);
   6.791 -}
   6.792 -
   6.793 -/*
   6.794 - * This initializes the IO-APIC and APIC hardware if this is
   6.795 - * a UP kernel.
   6.796 - */
   6.797 -int __init APIC_init_uniprocessor (void)
   6.798 -{
   6.799 -    if (!smp_found_config && !cpu_has_apic)
   6.800 -        return -1;
   6.801 -
   6.802 -    /*
   6.803 -     * Complain if the BIOS pretends there is one.
   6.804 -     */
   6.805 -    if (!cpu_has_apic&&APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]))
   6.806 -    {
   6.807 -        printk("BIOS bug, local APIC #%d not detected!...\n",
   6.808 -               boot_cpu_physical_apicid);
   6.809 -        return -1;
   6.810 -    }
   6.811 -
   6.812 -    verify_local_APIC();
   6.813 -
   6.814 -    connect_bsp_APIC();
   6.815 -
   6.816 -#ifdef CONFIG_SMP
   6.817 -    cpu_online_map = 1;
   6.818 -#endif
   6.819 -    phys_cpu_present_map = 1;
   6.820 -    apic_write_around(APIC_ID, boot_cpu_physical_apicid);
   6.821 -
   6.822 -    apic_pm_init2();
   6.823 -
   6.824 -    setup_local_APIC();
   6.825 -
   6.826 -#ifdef CONFIG_X86_IO_APIC
   6.827 -    if (smp_found_config && nr_ioapics)
   6.828 -        setup_IO_APIC();
   6.829 -#endif
   6.830 -    setup_APIC_clocks();
   6.831 -
   6.832 -    return 0;
   6.833 -}
     7.1 --- a/xen/arch/i386/boot/boot.S	Thu Jun 10 14:24:30 2004 +0000
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,249 +0,0 @@
     7.4 -#include <xen/config.h>
     7.5 -#include <hypervisor-ifs/hypervisor-if.h>
     7.6 -#include <asm/page.h>
     7.7 -
     7.8 -#define  SECONDARY_CPU_FLAG 0xA5A5A5A5
     7.9 -                
    7.10 -       	.text
    7.11 -
    7.12 -ENTRY(start)
    7.13 -        jmp hal_entry
    7.14 -
    7.15 -        .align	4
    7.16 -
    7.17 -/*** MULTIBOOT HEADER ****/
    7.18 -        /* Magic number indicating a Multiboot header. */
    7.19 -	.long	0x1BADB002
    7.20 -	/* Flags to bootloader (see Multiboot spec). */
    7.21 -	.long	0x00000002
    7.22 -	/* Checksum: must be the negated sum of the first two fields. */
    7.23 -	.long	-0x1BADB004
    7.24 -        
    7.25 -hal_entry:
    7.26 -        /* Set up a few descriptors: on entry only CS is guaranteed good. */
    7.27 -        lgdt    %cs:nopaging_gdt_descr-__PAGE_OFFSET
    7.28 -        mov     $(__HYPERVISOR_DS),%ecx
    7.29 -        mov     %ecx,%ds
    7.30 -        mov     %ecx,%es
    7.31 -        mov     %ecx,%fs
    7.32 -        mov     %ecx,%gs
    7.33 -        ljmp    $(__HYPERVISOR_CS),$(1f)-__PAGE_OFFSET
    7.34 -1:      lss     stack_start-__PAGE_OFFSET,%esp
    7.35 -
    7.36 -        /* Reset EFLAGS (subsumes CLI and CLD). */
    7.37 -	pushl	$0
    7.38 -	popf
    7.39 -
    7.40 -        /* CPU type checks. We need P6+. */
    7.41 -        mov     $0x200000,%edx
    7.42 -        pushfl
    7.43 -        pop     %ecx
    7.44 -        and     %edx,%ecx
    7.45 -        jne     bad_cpu            # ID bit should be clear
    7.46 -        pushl   %edx
    7.47 -        popfl
    7.48 -        pushfl
    7.49 -        pop     %ecx
    7.50 -        and     %edx,%ecx
    7.51 -        je      bad_cpu            # ID bit should be set
    7.52 -
    7.53 -        /* Set up CR0. */
    7.54 -        mov     %cr0,%ecx
    7.55 -        and     $0x00000011,%ecx   # save ET and PE
    7.56 -        or      $0x00050022,%ecx   # set AM, WP, NE and MP
    7.57 -        mov     %ecx,%cr0
    7.58 -
    7.59 -        /* Set up FPU. */
    7.60 -        fninit
    7.61 -        
    7.62 -        /* Set up CR4, except global flag which Intel requires should be     */
    7.63 -        /* left until after paging is enabled (IA32 Manual Vol. 3, Sec. 2.5) */
    7.64 -        mov     %cr4,%ecx
    7.65 -        or      mmu_cr4_features-__PAGE_OFFSET,%ecx
    7.66 -        mov     %ecx,mmu_cr4_features-__PAGE_OFFSET
    7.67 -        and     $0x7f,%ecx /* disable GLOBAL bit */
    7.68 -        mov     %ecx,%cr4
    7.69 -                
    7.70 -#ifdef CONFIG_SMP
    7.71 -        /* Is this a non-boot processor? */
    7.72 -        cmp     $(SECONDARY_CPU_FLAG),%ebx
    7.73 -        jne     continue_boot_cpu
    7.74 -        
    7.75 -        call    start_paging
    7.76 -        lidt    idt_descr                        
    7.77 -        jmp     start_secondary
    7.78 -#endif
    7.79 -        
    7.80 -continue_boot_cpu:
    7.81 -	add     $__PAGE_OFFSET,%ebx
    7.82 -        push    %ebx /* Multiboot info struct */
    7.83 -        push    %eax /* Multiboot magic value */
    7.84 -
    7.85 -        /* Initialize BSS (no nasty surprises!) */
    7.86 -        mov     $__bss_start-__PAGE_OFFSET,%edi
    7.87 -        mov     $_end-__PAGE_OFFSET,%ecx
    7.88 -        sub     %edi,%ecx
    7.89 -        xor     %eax,%eax
    7.90 -        rep     stosb
    7.91 -
    7.92 -        /* Copy all modules (dom0 + initrd if present) out of the Xen heap */
    7.93 -        mov     (%esp),%eax
    7.94 -        cmp     $0x2BADB002,%eax
    7.95 -        jne     skip_dom0_copy
    7.96 -        sub     $__PAGE_OFFSET,%ebx          /* turn back into a phys addr */
    7.97 -        mov     0x14(%ebx),%edi              /* mbi->mods_count */
    7.98 -        dec     %edi                         /* mbi->mods_count-- */
    7.99 -        jb      skip_dom0_copy               /* skip if no modules */
   7.100 -        mov     0x18(%ebx),%eax              /* mbi->mods_addr */
   7.101 -        mov     (%eax),%ebx                  /* %ebx = mod[0]->mod_start */
   7.102 -        shl     $4,%edi                    
   7.103 -        add     %edi,%eax
   7.104 -        mov     0x4(%eax),%eax               /* %eax = mod[mod_count-1]->end */
   7.105 -        mov     %eax,%ecx
   7.106 -        sub     %ebx,%ecx                    /* %ecx = byte len of all mods */
   7.107 -        mov     $(MAX_DIRECTMAP_ADDRESS), %edi
   7.108 -        add     %ecx, %edi                   /* %edi = src + length */        
   7.109 -        shr     $2,%ecx                      /* %ecx = length/4 */
   7.110 -1:      sub     $4,%eax                      /* %eax = src, %edi = dst */
   7.111 -        sub     $4,%edi
   7.112 -        mov     (%eax),%ebx
   7.113 -        mov     %ebx,(%edi)
   7.114 -        loop 1b
   7.115 -skip_dom0_copy:              
   7.116 -
   7.117 -        /* Initialize low and high mappings of all memory with 4MB pages */
   7.118 -        mov     $idle_pg_table-__PAGE_OFFSET,%edi
   7.119 -        mov     $0x1e3,%eax                  /* PRESENT+RW+A+D+4MB+GLOBAL */
   7.120 -1:      mov     %eax,__PAGE_OFFSET>>20(%edi) /* high mapping */
   7.121 -        stosl                                /* low mapping */
   7.122 -        add     $(1<<L2_PAGETABLE_SHIFT),%eax
   7.123 -        cmp     $MAX_DIRECTMAP_ADDRESS+0x1e3,%eax
   7.124 -        jne     1b
   7.125 -
   7.126 -        call    start_paging        
   7.127 -        call    setup_idt
   7.128 -        lidt    idt_descr
   7.129 -                
   7.130 -        /* Call into main C routine. This should never return.*/
   7.131 -       	call	cmain
   7.132 -        ud2     /* Force a panic (invalid opcode). */
   7.133 -
   7.134 -start_paging:
   7.135 -        mov     $idle_pg_table-__PAGE_OFFSET,%eax
   7.136 -        mov     %eax,%cr3
   7.137 -        mov     %cr0,%eax
   7.138 -        or      $0x80010000,%eax /* set PG and WP bits */
   7.139 -        mov     %eax,%cr0
   7.140 -        jmp     1f
   7.141 -1:      /* Install relocated selectors (FS/GS unused). */
   7.142 -        lgdt    gdt_descr
   7.143 -        mov     $(__HYPERVISOR_DS),%ecx
   7.144 -        mov     %ecx,%ds
   7.145 -        mov     %ecx,%es
   7.146 -        mov     %ecx,%ss
   7.147 -        ljmp    $(__HYPERVISOR_CS),$1f
   7.148 -1:      /* Paging enabled, so we can now enable GLOBAL mappings in CR4. */
   7.149 -        movl    mmu_cr4_features,%ecx
   7.150 -        movl    %ecx,%cr4
   7.151 -        /* Relocate ESP */
   7.152 -        add     $__PAGE_OFFSET,%esp
   7.153 -        /* Relocate EIP via return jump */
   7.154 -        pop     %ecx
   7.155 -        add     $__PAGE_OFFSET,%ecx
   7.156 -        jmp     *%ecx
   7.157 -    
   7.158 -            
   7.159 -/*** INTERRUPT INITIALISATION ***/
   7.160 -        
   7.161 -setup_idt:
   7.162 -        lea     ignore_int,%edx
   7.163 -        mov     $(__HYPERVISOR_CS << 16),%eax
   7.164 -        mov     %dx,%ax            /* selector = 0x0010 = cs */
   7.165 -        mov     $0x8E00,%dx        /* interrupt gate - dpl=0, present */
   7.166 -
   7.167 -        lea     SYMBOL_NAME(idt_table),%edi
   7.168 -        mov     $256,%ecx
   7.169 -1:      mov     %eax,(%edi)
   7.170 -        mov     %edx,4(%edi)
   7.171 -        add     $8,%edi
   7.172 -        loop    1b
   7.173 -        ret
   7.174 -
   7.175 -/* This is the default interrupt handler. */
   7.176 -int_msg:
   7.177 -        .asciz "Unknown interrupt\n"
   7.178 -        ALIGN
   7.179 -ignore_int:
   7.180 -        cld
   7.181 -        push    %eax
   7.182 -        push    %ecx
   7.183 -        push    %edx
   7.184 -        pushl   %es
   7.185 -        pushl   %ds
   7.186 -        mov     $(__HYPERVISOR_DS),%eax
   7.187 -        mov     %eax,%ds
   7.188 -        mov     %eax,%es
   7.189 -        pushl   $int_msg
   7.190 -        call    SYMBOL_NAME(printf)
   7.191 -1:      jmp     1b
   7.192 -
   7.193 -bad_cpu_msg:
   7.194 -        .asciz  "Bad CPU type. Need P6+."
   7.195 -        ALIGN
   7.196 -bad_cpu:
   7.197 -        pushl   $bad_cpu_msg
   7.198 -        call    SYMBOL_NAME(printf)
   7.199 -1:      jmp     1b
   7.200 -        
   7.201 -/*** STACK LOCATION ***/
   7.202 -        
   7.203 -ENTRY(stack_start)
   7.204 -        .long SYMBOL_NAME(cpu0_stack) + 8100 - __PAGE_OFFSET
   7.205 -        .long __HYPERVISOR_DS
   7.206 -        
   7.207 -/*** DESCRIPTOR TABLES ***/
   7.208 -
   7.209 -.globl SYMBOL_NAME(idt)
   7.210 -.globl SYMBOL_NAME(gdt)        
   7.211 -
   7.212 -        ALIGN
   7.213 -        
   7.214 -        .word   0    
   7.215 -idt_descr:
   7.216 -	.word	256*8-1
   7.217 -SYMBOL_NAME(idt):
   7.218 -        .long	SYMBOL_NAME(idt_table)
   7.219 -
   7.220 -        .word   0
   7.221 -gdt_descr:
   7.222 -	.word	(LAST_RESERVED_GDT_ENTRY*8)+7
   7.223 -SYMBOL_NAME(gdt):       
   7.224 -        .long   SYMBOL_NAME(gdt_table)	/* gdt base */
   7.225 -
   7.226 -        .word   0
   7.227 -nopaging_gdt_descr:
   7.228 -        .word   (LAST_RESERVED_GDT_ENTRY*8)+7
   7.229 -        .long   SYMBOL_NAME(gdt_table)-__PAGE_OFFSET
   7.230 -        
   7.231 -        ALIGN
   7.232 -/* NB. Rings != 0 get access up to 0xFC400000. This allows access to the */
   7.233 -/*     machine->physical mapping table. Ring 0 can access all memory.    */
   7.234 -ENTRY(gdt_table)
   7.235 -        .fill FIRST_RESERVED_GDT_ENTRY,8,0
   7.236 -        .quad 0x0000000000000000     /* unused */
   7.237 -        .quad 0x00cf9a000000ffff     /* 0x0808 ring 0 4.00GB code at 0x0 */
   7.238 -        .quad 0x00cf92000000ffff     /* 0x0810 ring 0 4.00GB data at 0x0 */
   7.239 -        .quad 0x00cfba000000c3ff     /* 0x0819 ring 1 3.95GB code at 0x0 */
   7.240 -        .quad 0x00cfb2000000c3ff     /* 0x0821 ring 1 3.95GB data at 0x0 */
   7.241 -        .quad 0x00cffa000000c3ff     /* 0x082b ring 3 3.95GB code at 0x0 */
   7.242 -        .quad 0x00cff2000000c3ff     /* 0x0833 ring 3 3.95GB data at 0x0 */
   7.243 -        .quad 0x0000000000000000     /* unused                           */
   7.244 -        .fill 2*NR_CPUS,8,0          /* space for TSS and LDT per CPU    */
   7.245 -
   7.246 -        .org 0x1000
   7.247 -ENTRY(idle_pg_table) # Initial page directory is 4kB
   7.248 -        .org 0x2000
   7.249 -ENTRY(cpu0_stack)    # Initial stack is 8kB
   7.250 -        .org 0x4000
   7.251 -ENTRY(stext)
   7.252 -ENTRY(_stext)
     8.1 --- a/xen/arch/i386/delay.c	Thu Jun 10 14:24:30 2004 +0000
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,29 +0,0 @@
     8.4 -/*
     8.5 - *	Precise Delay Loops for i386
     8.6 - *
     8.7 - *	Copyright (C) 1993 Linus Torvalds
     8.8 - *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
     8.9 - *
    8.10 - *	The __delay function must _NOT_ be inlined as its execution time
    8.11 - *	depends wildly on alignment on many x86 processors. The additional
    8.12 - *	jump magic is needed to get the timing stable on all the CPU's
    8.13 - *	we have to worry about.
    8.14 - */
    8.15 -
    8.16 -#include <xen/config.h>
    8.17 -#include <xen/delay.h>
    8.18 -#include <asm/msr.h>
    8.19 -#include <asm/processor.h>
    8.20 -
    8.21 -void __udelay(unsigned long usecs)
    8.22 -{
    8.23 -    unsigned long ticks = usecs * ticks_per_usec;
    8.24 -    unsigned long s, e;
    8.25 -
    8.26 -    rdtscl(s);
    8.27 -    do
    8.28 -    {
    8.29 -        rep_nop();
    8.30 -        rdtscl(e);
    8.31 -    } while ((e-s) < ticks);
    8.32 -}
     9.1 --- a/xen/arch/i386/domain_page.c	Thu Jun 10 14:24:30 2004 +0000
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,81 +0,0 @@
     9.4 -/******************************************************************************
     9.5 - * domain_page.h
     9.6 - * 
     9.7 - * Allow temporary mapping of domain pages. Based on ideas from the
     9.8 - * Linux PKMAP code -- the copyrights and credits are retained below.
     9.9 - */
    9.10 -
    9.11 -/*
    9.12 - * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
    9.13 - *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de *
    9.14 - * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
    9.15 - */
    9.16 -
    9.17 -#include <xen/config.h>
    9.18 -#include <xen/sched.h>
    9.19 -#include <xen/mm.h>
    9.20 -#include <xen/perfc.h>
    9.21 -#include <asm/domain_page.h>
    9.22 -#include <asm/pgalloc.h>
    9.23 -
    9.24 -unsigned long *mapcache;
    9.25 -static unsigned int map_idx, shadow_map_idx[NR_CPUS];
    9.26 -static spinlock_t map_lock = SPIN_LOCK_UNLOCKED;
    9.27 -
    9.28 -/* Use a spare PTE bit to mark entries ready for recycling. */
    9.29 -#define READY_FOR_TLB_FLUSH (1<<10)
    9.30 -
    9.31 -static void flush_all_ready_maps(void)
    9.32 -{
    9.33 -    unsigned long *cache = mapcache;
    9.34 -
    9.35 -    /* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */
    9.36 -    do { if ( (*cache & READY_FOR_TLB_FLUSH) ) *cache = 0; }
    9.37 -    while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 );
    9.38 -
    9.39 -    perfc_incrc(domain_page_tlb_flush);
    9.40 -    local_flush_tlb();
    9.41 -}
    9.42 -
    9.43 -
    9.44 -void *map_domain_mem(unsigned long pa)
    9.45 -{
    9.46 -    unsigned long va;
    9.47 -    unsigned int idx, cpu = smp_processor_id();
    9.48 -    unsigned long *cache = mapcache;
    9.49 -    unsigned long flags;
    9.50 -
    9.51 -    perfc_incrc(map_domain_mem_count);
    9.52 -
    9.53 -    spin_lock_irqsave(&map_lock, flags);
    9.54 -
    9.55 -    /* Has some other CPU caused a wrap? We must flush if so. */
    9.56 -    if ( map_idx < shadow_map_idx[cpu] )
    9.57 -    {
    9.58 -        perfc_incrc(domain_page_tlb_flush);
    9.59 -        local_flush_tlb();
    9.60 -    }
    9.61 -
    9.62 -    for ( ; ; )
    9.63 -    {
    9.64 -        idx = map_idx = (map_idx + 1) & (MAPCACHE_ENTRIES - 1);
    9.65 -        if ( idx == 0 ) flush_all_ready_maps();
    9.66 -        if ( cache[idx] == 0 ) break;
    9.67 -    }
    9.68 -
    9.69 -    cache[idx] = (pa & PAGE_MASK) | __PAGE_HYPERVISOR;
    9.70 -
    9.71 -    spin_unlock_irqrestore(&map_lock, flags);
    9.72 -
    9.73 -    shadow_map_idx[cpu] = idx;
    9.74 -
    9.75 -    va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK);
    9.76 -    return (void *)va;
    9.77 -}
    9.78 -
    9.79 -void unmap_domain_mem(void *va)
    9.80 -{
    9.81 -    unsigned int idx;
    9.82 -    idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
    9.83 -    mapcache[idx] |= READY_FOR_TLB_FLUSH;
    9.84 -}
    10.1 --- a/xen/arch/i386/entry.S	Thu Jun 10 14:24:30 2004 +0000
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,736 +0,0 @@
    10.4 -/*
    10.5 - *  linux/arch/i386/entry.S
    10.6 - *
    10.7 - *  Copyright (C) 1991, 1992  Linus Torvalds
    10.8 - */
    10.9 -
   10.10 -/*
   10.11 - * entry.S contains the system-call and fault low-level handling routines.
   10.12 - * This also contains the timer-interrupt handler, as well as all interrupts
   10.13 - * and faults that can result in a task-switch.
   10.14 - *
   10.15 - * Stack layout in 'ret_from_system_call':
   10.16 - *	 0(%esp) - %ebx
   10.17 - *	 4(%esp) - %ecx
   10.18 - *	 8(%esp) - %edx
   10.19 - *       C(%esp) - %esi
   10.20 - *	10(%esp) - %edi
   10.21 - *	14(%esp) - %ebp
   10.22 - *	18(%esp) - %eax
   10.23 - *	1C(%esp) - %ds
   10.24 - *	20(%esp) - %es
   10.25 - *	24(%esp) - %fs
   10.26 - *	28(%esp) - %gs
   10.27 - *	2C(%esp) - orig_eax
   10.28 - *	30(%esp) - %eip
   10.29 - *	34(%esp) - %cs
   10.30 - *	38(%esp) - %eflags
   10.31 - *	3C(%esp) - %oldesp
   10.32 - *	40(%esp) - %oldss
   10.33 - *
   10.34 - * "current" is in register %ebx during any slow entries.
   10.35 - */
   10.36 -/* The idea for callbacks from monitor -> guest OS.
   10.37 - * 
   10.38 - * First, we require that all callbacks (either via a supplied
   10.39 - * interrupt-descriptor-table, or via the special event or failsafe callbacks
   10.40 - * in the shared-info-structure) are to ring 1. This just makes life easier,
   10.41 - * in that it means we don't have to do messy GDT/LDT lookups to find
   10.42 - * out which the privilege-level of the return code-selector. That code
   10.43 - * would just be a hassle to write, and would need to account for running
   10.44 - * off the end of the GDT/LDT, for example. For all callbacks we check
   10.45 - * that the provided
   10.46 - * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
   10.47 - * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
   10.48 - * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
   10.49 - * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
   10.50 - * than the correct ring) and bad things are bound to ensue -- IRET is
   10.51 - * likely to fault, and we may end up killing the domain (no harm can
   10.52 - * come to the hypervisor itself, though).
   10.53 - *      
   10.54 - * When doing a callback, we check if the return CS is in ring 0. If so,
   10.55 - * callback is delayed until next return to ring != 0.
   10.56 - * If return CS is in ring 1, then we create a callback frame
   10.57 - * starting at return SS/ESP. The base of the frame does an intra-privilege
   10.58 - * interrupt-return.
   10.59 - * If return CS is in ring > 1, we create a callback frame starting
   10.60 - * at SS/ESP taken from appropriate section of the current TSS. The base
   10.61 - * of the frame does an inter-privilege interrupt-return.
   10.62 - * 
   10.63 - * Note that the "failsafe callback" uses a special stackframe:
   10.64 - * { return_DS, return_ES, return_FS, return_GS, return_EIP,
   10.65 - *   return_CS, return_EFLAGS[, return_ESP, return_SS] }
   10.66 - * That is, original values for DS/ES/FS/GS are placed on stack rather than
   10.67 - * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
   10.68 - * saved/restored in guest OS. Furthermore, if we load them we may cause
   10.69 - * a fault if they are invalid, which is a hassle to deal with. We avoid
   10.70 - * that problem if we don't load them :-) This property allows us to use
   10.71 - * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
   10.72 - * on return to ring != 0, we can simply package it up as a return via
   10.73 - * the failsafe callback, and let the guest OS sort it out (perhaps by
   10.74 - * killing an application process). Note that we also do this for any
   10.75 - * faulting IRET -- just let the guest OS handle it via the event
   10.76 - * callback.
   10.77 - *
   10.78 - * We terminate a domain in the following cases:
   10.79 - *  - creating a callback stack frame (due to bad ring-1 stack).
   10.80 - *  - faulting IRET on entry to failsafe callback handler.
   10.81 - * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
   10.82 - * handler in good order (absolutely no faults allowed!).
   10.83 - */
   10.84 -
   10.85 -#include <xen/config.h>
   10.86 -#include <xen/errno.h>
   10.87 -#include <hypervisor-ifs/hypervisor-if.h>
   10.88 -
   10.89 -EBX		= 0x00
   10.90 -ECX		= 0x04
   10.91 -EDX		= 0x08
   10.92 -ESI		= 0x0C
   10.93 -EDI		= 0x10
   10.94 -EBP		= 0x14
   10.95 -EAX		= 0x18
   10.96 -DS		= 0x1C
   10.97 -ES		= 0x20
   10.98 -FS              = 0x24
   10.99 -GS              = 0x28
  10.100 -ORIG_EAX	= 0x2C
  10.101 -EIP		= 0x30
  10.102 -CS		= 0x34
  10.103 -EFLAGS		= 0x38
  10.104 -OLDESP		= 0x3C
  10.105 -OLDSS		= 0x40
  10.106 -
  10.107 -/* Offsets in task_struct */
  10.108 -PROCESSOR       =  0
  10.109 -HYP_EVENTS      =  2
  10.110 -SHARED_INFO     =  4
  10.111 -EVENT_SEL       =  8
  10.112 -EVENT_ADDR      = 12
  10.113 -FAILSAFE_BUFFER = 16
  10.114 -FAILSAFE_SEL    = 32
  10.115 -FAILSAFE_ADDR   = 36
  10.116 -
  10.117 -/* Offsets in shared_info_t */
  10.118 -#define UPCALL_PENDING /* 0 */
  10.119 -#define UPCALL_MASK       1
  10.120 -
  10.121 -/* Offsets in guest_trap_bounce */
  10.122 -GTB_ERROR_CODE   =  0
  10.123 -GTB_CR2          =  4
  10.124 -GTB_FLAGS        =  8
  10.125 -GTB_CS           = 10
  10.126 -GTB_EIP          = 12
  10.127 -GTBF_TRAP        =  1
  10.128 -GTBF_TRAP_NOCODE =  2
  10.129 -GTBF_TRAP_CR2    =  4
  10.130 -                        
  10.131 -CF_MASK		= 0x00000001
  10.132 -IF_MASK		= 0x00000200
  10.133 -NT_MASK		= 0x00004000
  10.134 -
  10.135 -
  10.136 -        
  10.137 -#define SAVE_ALL_NOSEGREGS \
  10.138 -        cld; \
  10.139 -        pushl %gs; \
  10.140 -        pushl %fs; \
  10.141 -        pushl %es; \
  10.142 -        pushl %ds; \
  10.143 -        pushl %eax; \
  10.144 -        pushl %ebp; \
  10.145 -        pushl %edi; \
  10.146 -        pushl %esi; \
  10.147 -        pushl %edx; \
  10.148 -        pushl %ecx; \
  10.149 -        pushl %ebx; \
  10.150 -
  10.151 -#define SAVE_ALL \
  10.152 -        SAVE_ALL_NOSEGREGS \
  10.153 -        movl $(__HYPERVISOR_DS),%edx; \
  10.154 -        movl %edx,%ds; \
  10.155 -        movl %edx,%es; \
  10.156 -        movl %edx,%fs; \
  10.157 -        movl %edx,%gs; \
  10.158 -        sti;
  10.159 -
  10.160 -#define GET_CURRENT(reg)   \
  10.161 -        movl $4096-4, reg; \
  10.162 -        orl  %esp, reg;    \
  10.163 -        andl $~3,reg;      \
  10.164 -        movl (reg),reg;
  10.165 -
  10.166 -ENTRY(continue_nonidle_task)
  10.167 -        GET_CURRENT(%ebx)
  10.168 -        jmp test_all_events
  10.169 -
  10.170 -        ALIGN
  10.171 -/*
  10.172 - * HYPERVISOR_multicall(call_list, nr_calls)
  10.173 - *   Execute a list of 'nr_calls' system calls, pointed at by 'call_list'.
  10.174 - *   This is fairly easy except that:
  10.175 - *   1. We may fault reading the call list, and must patch that up; and
  10.176 - *   2. We cannot recursively call HYPERVISOR_multicall, or a malicious
  10.177 - *      caller could cause our stack to blow up.
  10.178 - */
  10.179 -do_multicall:
  10.180 -        popl  %eax
  10.181 -        cmpl  $SYMBOL_NAME(multicall_return_from_call),%eax
  10.182 -        je    multicall_return_from_call
  10.183 -        pushl %ebx
  10.184 -        movl  4(%esp),%ebx   /* EBX == call_list */
  10.185 -        movl  8(%esp),%ecx   /* ECX == nr_calls  */
  10.186 -multicall_loop:
  10.187 -        pushl %ecx
  10.188 -multicall_fault1: 
  10.189 -        pushl 20(%ebx)      # args[4]
  10.190 -multicall_fault2: 
  10.191 -        pushl 16(%ebx)      # args[3]
  10.192 -multicall_fault3: 
  10.193 -        pushl 12(%ebx)      # args[2]
  10.194 -multicall_fault4: 
  10.195 -        pushl 8(%ebx)       # args[1]
  10.196 -multicall_fault5: 
  10.197 -        pushl 4(%ebx)       # args[0]
  10.198 -multicall_fault6: 
  10.199 -        movl  (%ebx),%eax   # op
  10.200 -        andl  $255,%eax
  10.201 -        call  *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
  10.202 -multicall_return_from_call:
  10.203 -multicall_fault7:
  10.204 -        movl  %eax,24(%ebx) # args[5] == result
  10.205 -        addl  $20,%esp
  10.206 -        popl  %ecx
  10.207 -        addl  $(ARGS_PER_MULTICALL_ENTRY*4),%ebx
  10.208 -        loop  multicall_loop
  10.209 -        popl  %ebx
  10.210 -        xorl  %eax,%eax
  10.211 -        jmp   ret_from_hypervisor_call
  10.212 -
  10.213 -.section __ex_table,"a"
  10.214 -        .align 4
  10.215 -        .long multicall_fault1, multicall_fixup1
  10.216 -        .long multicall_fault2, multicall_fixup2
  10.217 -        .long multicall_fault3, multicall_fixup3
  10.218 -        .long multicall_fault4, multicall_fixup4
  10.219 -        .long multicall_fault5, multicall_fixup5
  10.220 -        .long multicall_fault6, multicall_fixup6
  10.221 -.previous
  10.222 -               
  10.223 -.section .fixup,"ax"
  10.224 -multicall_fixup6: 
  10.225 -        addl  $4,%esp
  10.226 -multicall_fixup5: 
  10.227 -        addl  $4,%esp
  10.228 -multicall_fixup4: 
  10.229 -        addl  $4,%esp
  10.230 -multicall_fixup3: 
  10.231 -        addl  $4,%esp
  10.232 -multicall_fixup2: 
  10.233 -        addl  $4,%esp
  10.234 -multicall_fixup1:
  10.235 -        addl  $4,%esp
  10.236 -        popl  %ebx
  10.237 -        movl  $-EFAULT,%eax
  10.238 -        jmp   ret_from_hypervisor_call
  10.239 -.previous        
  10.240 -                
  10.241 -        ALIGN
  10.242 -restore_all_guest:
  10.243 -        # First, may need to restore %ds if clobbered by create_bounce_frame
  10.244 -        pushl %ss
  10.245 -        popl  %ds
  10.246 -        # Second, create a failsafe copy of DS,ES,FS,GS in case any are bad
  10.247 -        leal  DS(%esp),%esi
  10.248 -        leal  FAILSAFE_BUFFER(%ebx),%edi
  10.249 -        movsl
  10.250 -        movsl
  10.251 -        movsl
  10.252 -        movsl
  10.253 -        # Finally, restore guest registers -- faults will cause failsafe
  10.254 -        popl %ebx
  10.255 -	popl %ecx
  10.256 -	popl %edx
  10.257 -	popl %esi
  10.258 -	popl %edi
  10.259 -	popl %ebp
  10.260 -	popl %eax
  10.261 -1:	popl %ds
  10.262 -2:	popl %es
  10.263 -3:	popl %fs
  10.264 -4:	popl %gs
  10.265 -        addl $4,%esp
  10.266 -5:      iret
  10.267 -.section .fixup,"ax"
  10.268 -10:     subl $4,%esp
  10.269 -        pushl %gs
  10.270 -9:      pushl %fs
  10.271 -8:      pushl %es
  10.272 -7:      pushl %ds
  10.273 -6:      pushl %eax
  10.274 -	pushl %ebp
  10.275 -	pushl %edi
  10.276 -	pushl %esi
  10.277 -	pushl %edx
  10.278 -	pushl %ecx
  10.279 -	pushl %ebx
  10.280 -	pushl %ss
  10.281 -	popl  %ds
  10.282 -	pushl %ss
  10.283 -	popl  %es
  10.284 -	jmp  failsafe_callback
  10.285 -.previous
  10.286 -.section __ex_table,"a"
  10.287 -	.align 4
  10.288 -	.long 1b,6b
  10.289 -	.long 2b,7b
  10.290 -	.long 3b,8b
  10.291 -	.long 4b,9b
  10.292 -	.long 5b,10b
  10.293 -.previous
  10.294 -
  10.295 -/* No special register assumptions */
  10.296 -failsafe_callback:
  10.297 -        GET_CURRENT(%ebx)
  10.298 -        movzwl PROCESSOR(%ebx),%eax
  10.299 -        shl  $4,%eax
  10.300 -        lea  guest_trap_bounce(%eax),%edx
  10.301 -        movl FAILSAFE_ADDR(%ebx),%eax
  10.302 -        movl %eax,GTB_EIP(%edx)
  10.303 -        movl FAILSAFE_SEL(%ebx),%eax
  10.304 -        movw %ax,GTB_CS(%edx)
  10.305 -        call create_bounce_frame
  10.306 -        subl $16,%esi                # add DS/ES/FS/GS to failsafe stack frame
  10.307 -        leal FAILSAFE_BUFFER(%ebx),%ebp
  10.308 -        movl  0(%ebp),%eax           # DS
  10.309 -FAULT1: movl %eax,(%esi) 
  10.310 -        movl  4(%ebp),%eax           # ES
  10.311 -FAULT2: movl %eax,4(%esi)
  10.312 -        movl  8(%ebp),%eax           # FS
  10.313 -FAULT3: movl %eax,8(%esi) 
  10.314 -        movl 12(%ebp),%eax           # GS
  10.315 -FAULT4: movl %eax,12(%esi)
  10.316 -        movl %esi,OLDESP(%esp)
  10.317 -        popl %ebx
  10.318 -        popl %ecx
  10.319 -        popl %edx
  10.320 -        popl %esi
  10.321 -        popl %edi
  10.322 -        popl %ebp
  10.323 -        popl %eax
  10.324 -        addl $20,%esp                # skip DS/ES/FS/GS/ORIG_EAX
  10.325 -FAULT5: iret 
  10.326 -
  10.327 -
  10.328 -        ALIGN
  10.329 -# Simple restore -- we should never fault as we we will only interrupt ring 0
  10.330 -# when sane values have been placed in all registers. The only exception is
  10.331 -# NMI, which may interrupt before good values have been placed in DS-GS.
  10.332 -# The NMI return code deals with this problem itself.
  10.333 -restore_all_xen:
  10.334 -	popl %ebx
  10.335 -	popl %ecx
  10.336 -	popl %edx
  10.337 -	popl %esi
  10.338 -	popl %edi
  10.339 -	popl %ebp
  10.340 -	popl %eax
  10.341 -	popl %ds
  10.342 -	popl %es
  10.343 -	popl %fs
  10.344 -	popl %gs
  10.345 -        addl $4,%esp
  10.346 -        iret
  10.347 -
  10.348 -        ALIGN
  10.349 -ENTRY(hypervisor_call)
  10.350 -        pushl %eax			# save orig_eax
  10.351 -	SAVE_ALL
  10.352 -	GET_CURRENT(%ebx)
  10.353 -	andl $255,%eax
  10.354 -	call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4)
  10.355 -
  10.356 -ret_from_hypervisor_call:
  10.357 -        movl %eax,EAX(%esp)		# save the return value
  10.358 -
  10.359 -test_all_events:
  10.360 -        xorl %ecx,%ecx
  10.361 -        notl %ecx
  10.362 -        cli                             # tests must not race interrupts
  10.363 -/*test_softirqs:*/  
  10.364 -        movzwl PROCESSOR(%ebx),%eax
  10.365 -        shl  $6,%eax                    # sizeof(irq_cpustat) == 64
  10.366 -        test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
  10.367 -        jnz  process_softirqs
  10.368 -/*test_hyp_events:*/
  10.369 -        testw %cx, HYP_EVENTS(%ebx)
  10.370 -        jnz  process_hyp_events
  10.371 -/*test_guest_events:*/
  10.372 -        movl SHARED_INFO(%ebx),%eax
  10.373 -        testb $0xFF,UPCALL_MASK(%eax)
  10.374 -        jnz  restore_all_guest
  10.375 -        testb $0xFF,UPCALL_PENDING(%eax)
  10.376 -        jz   restore_all_guest
  10.377 -        movb $1,UPCALL_MASK(%eax)       # Upcalls are masked during delivery
  10.378 -/*process_guest_events:*/
  10.379 -        movzwl PROCESSOR(%ebx),%edx
  10.380 -        shl  $4,%edx                    # sizeof(guest_trap_bounce) == 16
  10.381 -        lea  guest_trap_bounce(%edx),%edx
  10.382 -        movl EVENT_ADDR(%ebx),%eax
  10.383 -        movl %eax,GTB_EIP(%edx)
  10.384 -        movl EVENT_SEL(%ebx),%eax
  10.385 -        movw %ax,GTB_CS(%edx)
  10.386 -        call create_bounce_frame
  10.387 -        jmp  restore_all_guest
  10.388 -
  10.389 -        ALIGN
  10.390 -process_softirqs:
  10.391 -        sti       
  10.392 -        call SYMBOL_NAME(do_softirq)
  10.393 -        jmp  test_all_events
  10.394 -        
  10.395 -        ALIGN
  10.396 -process_hyp_events:
  10.397 -        sti
  10.398 -        call SYMBOL_NAME(do_hyp_events)
  10.399 -        jmp  test_all_events
  10.400 -        
  10.401 -/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:         */
  10.402 -/*   {EIP, CS, EFLAGS, [ESP, SS]}                                     */
  10.403 -/* %edx == guest_trap_bounce, %ebx == task_struct                     */
  10.404 -/* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP.        */
  10.405 -create_bounce_frame:        
  10.406 -        mov  CS+4(%esp),%cl
  10.407 -        test $2,%cl
  10.408 -        jz   1f /* jump if returning to an existing ring-1 activation */
  10.409 -        /* obtain ss/esp from TSS -- no current ring-1 activations */
  10.410 -        movzwl PROCESSOR(%ebx),%eax
  10.411 -        /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
  10.412 -        movl %eax, %ecx
  10.413 -        shll $7, %ecx
  10.414 -        shll $13, %eax
  10.415 -        addl %ecx,%eax
  10.416 -        addl $init_tss + 12,%eax
  10.417 -        movl (%eax),%esi /* tss->esp1 */
  10.418 -FAULT6: movl 4(%eax),%ds /* tss->ss1  */
  10.419 -        /* base of stack frame must contain ss/esp (inter-priv iret) */
  10.420 -        subl $8,%esi
  10.421 -        movl OLDESP+4(%esp),%eax
  10.422 -FAULT7: movl %eax,(%esi) 
  10.423 -        movl OLDSS+4(%esp),%eax
  10.424 -FAULT8: movl %eax,4(%esi) 
  10.425 -        jmp 2f
  10.426 -1:      /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
  10.427 -        movl OLDESP+4(%esp),%esi
  10.428 -FAULT9: movl OLDSS+4(%esp),%ds 
  10.429 -2:      /* Construct a stack frame: EFLAGS, CS/EIP */
  10.430 -        subl $12,%esi
  10.431 -        movl EIP+4(%esp),%eax
  10.432 -FAULT10:movl %eax,(%esi) 
  10.433 -        movl CS+4(%esp),%eax
  10.434 -FAULT11:movl %eax,4(%esi) 
  10.435 -        movl EFLAGS+4(%esp),%eax
  10.436 -FAULT12:movl %eax,8(%esi)
  10.437 -        /* Rewrite our stack frame and return to ring 1. */
  10.438 -        /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
  10.439 -        andl $0xfffcbeff,%eax
  10.440 -        movl %eax,EFLAGS+4(%esp)
  10.441 -        movl %ds,OLDSS+4(%esp)
  10.442 -        movl %esi,OLDESP+4(%esp)
  10.443 -        movzwl %es:GTB_CS(%edx),%eax
  10.444 -        movl %eax,CS+4(%esp)
  10.445 -        movl %es:GTB_EIP(%edx),%eax
  10.446 -        movl %eax,EIP+4(%esp)
  10.447 -        ret
  10.448 -        
  10.449 -                              
  10.450 -.section __ex_table,"a"
  10.451 -        .align 4
  10.452 -        .long FAULT1, crash_domain_fixup3 # Fault writing to ring-1 stack
  10.453 -        .long FAULT2, crash_domain_fixup3 # Fault writing to ring-1 stack
  10.454 -        .long FAULT3, crash_domain_fixup3 # Fault writing to ring-1 stack
  10.455 -        .long FAULT4, crash_domain_fixup3 # Fault writing to ring-1 stack
  10.456 -        .long FAULT5, crash_domain_fixup1 # Fault executing failsafe iret
  10.457 -        .long FAULT6, crash_domain_fixup2 # Fault loading ring-1 stack selector
  10.458 -        .long FAULT7, crash_domain_fixup2 # Fault writing to ring-1 stack
  10.459 -        .long FAULT8, crash_domain_fixup2 # Fault writing to ring-1 stack
  10.460 -        .long FAULT9, crash_domain_fixup2 # Fault loading ring-1 stack selector
  10.461 -        .long FAULT10,crash_domain_fixup2 # Fault writing to ring-1 stack
  10.462 -        .long FAULT11,crash_domain_fixup2 # Fault writing to ring-1 stack
  10.463 -        .long FAULT12,crash_domain_fixup2 # Fault writing to ring-1 stack
  10.464 -        .long FAULT13,crash_domain_fixup3 # Fault writing to ring-1 stack
  10.465 -        .long FAULT14,crash_domain_fixup3 # Fault writing to ring-1 stack
  10.466 -.previous
  10.467 -               
  10.468 -# This handler kills domains which experience unrecoverable faults.
  10.469 -.section .fixup,"ax"
  10.470 -crash_domain_fixup1:
  10.471 -        subl  $4,%esp
  10.472 -        SAVE_ALL
  10.473 -        jmp   crash_domain
  10.474 -crash_domain_fixup2:
  10.475 -        addl  $4,%esp                     
  10.476 -crash_domain_fixup3:
  10.477 -        pushl %ss
  10.478 -        popl  %ds
  10.479 -        jmp   crash_domain
  10.480 -.previous
  10.481 -
  10.482 -        ALIGN
  10.483 -process_guest_exception_and_events:        
  10.484 -        movzwl PROCESSOR(%ebx),%eax
  10.485 -        shl  $4,%eax
  10.486 -        lea  guest_trap_bounce(%eax),%edx
  10.487 -        testb $~0,GTB_FLAGS(%edx)
  10.488 -        jz   test_all_events
  10.489 -        call create_bounce_frame        # just the basic frame
  10.490 -        mov  %es:GTB_FLAGS(%edx),%cl
  10.491 -        test $GTBF_TRAP_NOCODE,%cl
  10.492 -        jnz  2f
  10.493 -        subl $4,%esi                    # push error_code onto guest frame
  10.494 -        movl %es:GTB_ERROR_CODE(%edx),%eax
  10.495 -FAULT13:movl %eax,(%esi)
  10.496 -        test $GTBF_TRAP_CR2,%cl
  10.497 -        jz   1f
  10.498 -        subl $4,%esi                    # push %cr2 onto guest frame
  10.499 -        movl %es:GTB_CR2(%edx),%eax
  10.500 -FAULT14:movl %eax,(%esi)
  10.501 -1:      movl %esi,OLDESP(%esp)        
  10.502 -2:      push %es                        # unclobber %ds
  10.503 -        pop  %ds 
  10.504 -        movb $0,GTB_FLAGS(%edx)
  10.505 -        jmp  test_all_events
  10.506 -
  10.507 -        ALIGN
  10.508 -ENTRY(ret_from_intr)
  10.509 -	GET_CURRENT(%ebx)
  10.510 -        movb CS(%esp),%al
  10.511 -	testb $3,%al	# return to non-supervisor?
  10.512 -	jne test_all_events
  10.513 -	jmp restore_all_xen
  10.514 -
  10.515 -ENTRY(divide_error)
  10.516 -	pushl $0		# no error code
  10.517 -	pushl $ SYMBOL_NAME(do_divide_error)
  10.518 -	ALIGN
  10.519 -error_code:
  10.520 -	pushl %fs
  10.521 -	pushl %es
  10.522 -	pushl %ds
  10.523 -	pushl %eax
  10.524 -	xorl  %eax,%eax
  10.525 -	pushl %ebp
  10.526 -	pushl %edi
  10.527 -	pushl %esi
  10.528 -	pushl %edx
  10.529 -	decl  %eax			# eax = -1
  10.530 -	pushl %ecx
  10.531 -	pushl %ebx
  10.532 -	cld
  10.533 -	movl  %gs,%ecx
  10.534 -	movl  ORIG_EAX(%esp), %esi	# get the error code
  10.535 -	movl  GS(%esp), %edi		# get the function address
  10.536 -	movl  %eax, ORIG_EAX(%esp)
  10.537 -	movl  %ecx, GS(%esp)
  10.538 -	movl  $(__HYPERVISOR_DS),%edx
  10.539 -	movl  %edx,%ds
  10.540 -	movl  %edx,%es
  10.541 -	movl  %edx,%fs
  10.542 -	movl  %edx,%gs
  10.543 -	movl  %esp,%edx
  10.544 -	pushl %esi			# push the error code
  10.545 -	pushl %edx			# push the pt_regs pointer
  10.546 -	GET_CURRENT(%ebx)
  10.547 -	call  *%edi
  10.548 -        addl  $8,%esp
  10.549 -        movb  CS(%esp),%al
  10.550 -	testb $3,%al
  10.551 -	je    restore_all_xen
  10.552 -        jmp   process_guest_exception_and_events
  10.553 -
  10.554 -ENTRY(coprocessor_error)
  10.555 -	pushl $0
  10.556 -	pushl $ SYMBOL_NAME(do_coprocessor_error)
  10.557 -	jmp error_code
  10.558 -
  10.559 -ENTRY(simd_coprocessor_error)
  10.560 -	pushl $0
  10.561 -	pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
  10.562 -	jmp error_code
  10.563 -
  10.564 -ENTRY(device_not_available)
  10.565 -	pushl $0
  10.566 -        pushl $SYMBOL_NAME(math_state_restore)
  10.567 -        jmp   error_code
  10.568 -
  10.569 -ENTRY(debug)
  10.570 -	pushl $0
  10.571 -	pushl $ SYMBOL_NAME(do_debug)
  10.572 -	jmp error_code
  10.573 -
  10.574 -ENTRY(int3)
  10.575 -	pushl $0
  10.576 -	pushl $ SYMBOL_NAME(do_int3)
  10.577 -	jmp error_code
  10.578 -
  10.579 -ENTRY(overflow)
  10.580 -	pushl $0
  10.581 -	pushl $ SYMBOL_NAME(do_overflow)
  10.582 -	jmp error_code
  10.583 -
  10.584 -ENTRY(bounds)
  10.585 -	pushl $0
  10.586 -	pushl $ SYMBOL_NAME(do_bounds)
  10.587 -	jmp error_code
  10.588 -
  10.589 -ENTRY(invalid_op)
  10.590 -	pushl $0
  10.591 -	pushl $ SYMBOL_NAME(do_invalid_op)
  10.592 -	jmp error_code
  10.593 -
  10.594 -ENTRY(coprocessor_segment_overrun)
  10.595 -	pushl $0
  10.596 -	pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
  10.597 -	jmp error_code
  10.598 -
  10.599 -ENTRY(invalid_TSS)
  10.600 -	pushl $ SYMBOL_NAME(do_invalid_TSS)
  10.601 -	jmp error_code
  10.602 -
  10.603 -ENTRY(segment_not_present)
  10.604 -	pushl $ SYMBOL_NAME(do_segment_not_present)
  10.605 -	jmp error_code
  10.606 -
  10.607 -ENTRY(stack_segment)
  10.608 -	pushl $ SYMBOL_NAME(do_stack_segment)
  10.609 -	jmp error_code
  10.610 -
  10.611 -ENTRY(general_protection)
  10.612 -	pushl $ SYMBOL_NAME(do_general_protection)
  10.613 -	jmp error_code
  10.614 -
  10.615 -ENTRY(alignment_check)
  10.616 -	pushl $ SYMBOL_NAME(do_alignment_check)
  10.617 -	jmp error_code
  10.618 -
  10.619 -ENTRY(page_fault)
  10.620 -	pushl $ SYMBOL_NAME(do_page_fault)
  10.621 -	jmp error_code
  10.622 -
  10.623 -ENTRY(machine_check)
  10.624 -	pushl $0
  10.625 -	pushl $ SYMBOL_NAME(do_machine_check)
  10.626 -	jmp error_code
  10.627 -
  10.628 -ENTRY(spurious_interrupt_bug)
  10.629 -	pushl $0
  10.630 -	pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
  10.631 -	jmp error_code
  10.632 -
  10.633 -ENTRY(nmi)
  10.634 -        # Save state but do not trash the segment registers!
  10.635 -        # We may otherwise be unable to reload them or copy them to ring 1. 
  10.636 -	pushl %eax
  10.637 -	SAVE_ALL_NOSEGREGS
  10.638 -
  10.639 -        # Check for hardware problems. These are always fatal so we can
  10.640 -        # reload DS and ES when handling them.
  10.641 -        inb   $0x61,%al
  10.642 -        testb $0x80,%al
  10.643 -        jne   nmi_parity_err
  10.644 -        testb $0x40,%al
  10.645 -        jne   nmi_io_err
  10.646 -        movl  %eax,%ebx
  10.647 -        
  10.648 -        # Okay, its almost a normal NMI tick. We can only process it if:
  10.649 -        #  A. We are the outermost Xen activation (in which case we have
  10.650 -        #     the selectors safely saved on our stack)
  10.651 -        #  B. DS-GS all contain sane Xen values.
  10.652 -        # In all other cases we bail without touching DS-GS, as we have
  10.653 -        # interrupted an enclosing Xen activation in tricky prologue or
  10.654 -        # epilogue code.
  10.655 -        movb  CS(%esp),%al
  10.656 -	testb $3,%al
  10.657 -        jne   do_watchdog_tick
  10.658 -        movl  DS(%esp),%eax
  10.659 -        cmpw  $(__HYPERVISOR_DS),%ax
  10.660 -        jne   nmi_badseg
  10.661 -        movl  ES(%esp),%eax
  10.662 -        cmpw  $(__HYPERVISOR_DS),%ax
  10.663 -        jne   nmi_badseg
  10.664 -        movl  FS(%esp),%eax
  10.665 -        cmpw  $(__HYPERVISOR_DS),%ax
  10.666 -        jne   nmi_badseg
  10.667 -        movl  GS(%esp),%eax
  10.668 -        cmpw  $(__HYPERVISOR_DS),%ax
  10.669 -        jne   nmi_badseg
  10.670 -
  10.671 -do_watchdog_tick:
  10.672 -        movl  $(__HYPERVISOR_DS),%edx
  10.673 -        movl  %edx,%ds
  10.674 -        movl  %edx,%es
  10.675 -        movl  %esp,%edx
  10.676 -	pushl %ebx   # reason
  10.677 -	pushl %edx   # regs
  10.678 -        call  SYMBOL_NAME(do_nmi)
  10.679 -	addl  $8,%esp
  10.680 -        movb  CS(%esp),%al
  10.681 -	testb $3,%al
  10.682 -	je    restore_all_xen
  10.683 -        GET_CURRENT(%ebx)
  10.684 -        jmp   restore_all_guest
  10.685 -
  10.686 -nmi_badseg:
  10.687 -	popl %ebx
  10.688 -	popl %ecx
  10.689 -	popl %edx
  10.690 -	popl %esi
  10.691 -	popl %edi
  10.692 -	popl %ebp
  10.693 -	popl %eax
  10.694 -        addl $20,%esp
  10.695 -        iret
  10.696 -
  10.697 -nmi_parity_err: 
  10.698 -        movl $(__HYPERVISOR_DS),%edx
  10.699 -        movl %edx,%ds
  10.700 -        movl %edx,%es
  10.701 -        jmp  SYMBOL_NAME(mem_parity_error)
  10.702 -        
  10.703 -nmi_io_err: 
  10.704 -        movl $(__HYPERVISOR_DS),%edx
  10.705 -        movl %edx,%ds
  10.706 -        movl %edx,%es
  10.707 -        jmp  SYMBOL_NAME(io_check_error)                        
  10.708 -        
  10.709 -.data
  10.710 -ENTRY(hypervisor_call_table)
  10.711 -        .long SYMBOL_NAME(do_set_trap_table)     /*  0 */
  10.712 -        .long SYMBOL_NAME(do_mmu_update)
  10.713 -        .long SYMBOL_NAME(do_console_write)
  10.714 -        .long SYMBOL_NAME(do_set_gdt)
  10.715 -        .long SYMBOL_NAME(do_stack_switch)
  10.716 -        .long SYMBOL_NAME(do_set_callbacks)      /*  5 */
  10.717 -        .long SYMBOL_NAME(do_ni_syscall)                    # do_net_io_op
  10.718 -        .long SYMBOL_NAME(do_fpu_taskswitch)
  10.719 -        .long SYMBOL_NAME(do_sched_op)
  10.720 -        .long SYMBOL_NAME(do_dom0_op)
  10.721 -        .long SYMBOL_NAME(do_ni_syscall)         /* 10 */   # do_network_op
  10.722 -        .long SYMBOL_NAME(do_ni_syscall)                    # do_block_io_op
  10.723 -        .long SYMBOL_NAME(do_set_debugreg)
  10.724 -        .long SYMBOL_NAME(do_get_debugreg)
  10.725 -        .long SYMBOL_NAME(do_update_descriptor)
  10.726 -        .long SYMBOL_NAME(do_set_fast_trap)      /* 15 */
  10.727 -        .long SYMBOL_NAME(do_dom_mem_op)
  10.728 -        .long SYMBOL_NAME(do_multicall)
  10.729 -        .long SYMBOL_NAME(do_kbd_op)
  10.730 -        .long SYMBOL_NAME(do_update_va_mapping)
  10.731 -        .long SYMBOL_NAME(do_set_timer_op)       /* 20 */
  10.732 -        .long SYMBOL_NAME(do_event_channel_op)
  10.733 -        .long SYMBOL_NAME(do_xen_version)
  10.734 -        .long SYMBOL_NAME(do_console_io)
  10.735 -        .long SYMBOL_NAME(do_physdev_op)
  10.736 -        .long SYMBOL_NAME(do_update_va_mapping_otherdomain) /* 25 */
  10.737 -        .rept NR_syscalls-((.-hypervisor_call_table)/4)
  10.738 -        .long SYMBOL_NAME(do_ni_syscall)
  10.739 -        .endr
    11.1 --- a/xen/arch/i386/extable.c	Thu Jun 10 14:24:30 2004 +0000
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,62 +0,0 @@
    11.4 -/*
    11.5 - * linux/arch/i386/mm/extable.c
    11.6 - */
    11.7 -
    11.8 -#include <xen/config.h>
    11.9 -#include <xen/module.h>
   11.10 -#include <xen/spinlock.h>
   11.11 -#include <asm/uaccess.h>
   11.12 -
   11.13 -extern const struct exception_table_entry __start___ex_table[];
   11.14 -extern const struct exception_table_entry __stop___ex_table[];
   11.15 -
   11.16 -static inline unsigned long
   11.17 -search_one_table(const struct exception_table_entry *first,
   11.18 -		 const struct exception_table_entry *last,
   11.19 -		 unsigned long value)
   11.20 -{
   11.21 -        while (first <= last) {
   11.22 -		const struct exception_table_entry *mid;
   11.23 -		long diff;
   11.24 -
   11.25 -		mid = (last - first) / 2 + first;
   11.26 -		diff = mid->insn - value;
   11.27 -                if (diff == 0)
   11.28 -                        return mid->fixup;
   11.29 -                else if (diff < 0)
   11.30 -                        first = mid+1;
   11.31 -                else
   11.32 -                        last = mid-1;
   11.33 -        }
   11.34 -        return 0;
   11.35 -}
   11.36 -
   11.37 -extern spinlock_t modlist_lock;
   11.38 -
   11.39 -unsigned long
   11.40 -search_exception_table(unsigned long addr)
   11.41 -{
   11.42 -	unsigned long ret = 0;
   11.43 -	
   11.44 -#ifndef CONFIG_MODULES
   11.45 -	/* There is only the kernel to search.  */
   11.46 -	ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
   11.47 -	return ret;
   11.48 -#else
   11.49 -	unsigned long flags;
   11.50 -	/* The kernel is the last "module" -- no need to treat it special.  */
   11.51 -	struct module *mp;
   11.52 -
   11.53 -	spin_lock_irqsave(&modlist_lock, flags);
   11.54 -	for (mp = module_list; mp != NULL; mp = mp->next) {
   11.55 -		if (mp->ex_table_start == NULL || !(mp->flags&(MOD_RUNNING|MOD_INITIALIZING)))
   11.56 -			continue;
   11.57 -		ret = search_one_table(mp->ex_table_start,
   11.58 -				       mp->ex_table_end - 1, addr);
   11.59 -		if (ret)
   11.60 -			break;
   11.61 -	}
   11.62 -	spin_unlock_irqrestore(&modlist_lock, flags);
   11.63 -	return ret;
   11.64 -#endif
   11.65 -}
    12.1 --- a/xen/arch/i386/flushtlb.c	Thu Jun 10 14:24:30 2004 +0000
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,40 +0,0 @@
    12.4 -/******************************************************************************
    12.5 - * flushtlb.c
    12.6 - * 
    12.7 - * TLB flushes are timestamped using a global virtual 'clock' which ticks
    12.8 - * on any TLB flush on any processor.
    12.9 - * 
   12.10 - * Copyright (c) 2003, K A Fraser
   12.11 - */
   12.12 -
   12.13 -#include <xen/config.h>
   12.14 -#include <xen/sched.h>
   12.15 -#include <xen/interrupt.h>
   12.16 -#include <asm/flushtlb.h>
   12.17 -
   12.18 -u32 tlbflush_clock;
   12.19 -u32 tlbflush_time[NR_CPUS];
   12.20 -
   12.21 -void tlb_clocktick(void)
   12.22 -{
   12.23 -    u32 y, ny;
   12.24 -
   12.25 -    /* Tick the clock. 'y' contains the current time after the tick. */
   12.26 -    ny = tlbflush_clock;
   12.27 -    do {
   12.28 -#ifdef CONFIG_SMP
   12.29 -        if ( unlikely(((y = ny+1) & TLBCLOCK_EPOCH_MASK) == 0) )
   12.30 -        {
   12.31 -            raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
   12.32 -            y = tlbflush_clock;
   12.33 -            break;
   12.34 -        }
   12.35 -#else
   12.36 -        y = ny+1;
   12.37 -#endif
   12.38 -    }
   12.39 -    while ( unlikely((ny = cmpxchg(&tlbflush_clock, y-1, y)) != y-1) );
   12.40 -
   12.41 -    /* Update this CPU's timestamp to new time. */
   12.42 -    tlbflush_time[smp_processor_id()] = y;
   12.43 -}
    13.1 --- a/xen/arch/i386/i387.c	Thu Jun 10 14:24:30 2004 +0000
    13.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.3 @@ -1,56 +0,0 @@
    13.4 -/*
    13.5 - *  linux/arch/i386/kernel/i387.c
    13.6 - *
    13.7 - *  Copyright (C) 1994 Linus Torvalds
    13.8 - *
    13.9 - *  Pentium III FXSR, SSE support
   13.10 - *  General FPU state handling cleanups
   13.11 - *	Gareth Hughes <gareth@valinux.com>, May 2000
   13.12 - */
   13.13 -
   13.14 -#include <xen/config.h>
   13.15 -#include <xen/sched.h>
   13.16 -#include <asm/processor.h>
   13.17 -#include <asm/i387.h>
   13.18 -
   13.19 -void init_fpu(void)
   13.20 -{
   13.21 -    __asm__("fninit");
   13.22 -    if ( cpu_has_xmm ) load_mxcsr(0x1f80);
   13.23 -    set_bit(PF_DONEFPUINIT, &current->flags);
   13.24 -}
   13.25 -
   13.26 -static inline void __save_init_fpu( struct task_struct *tsk )
   13.27 -{
   13.28 -    if ( cpu_has_fxsr ) {
   13.29 -        asm volatile( "fxsave %0 ; fnclex"
   13.30 -                      : "=m" (tsk->thread.i387.fxsave) );
   13.31 -    } else {
   13.32 -        asm volatile( "fnsave %0 ; fwait"
   13.33 -                      : "=m" (tsk->thread.i387.fsave) );
   13.34 -    }
   13.35 -    clear_bit(PF_USEDFPU, &tsk->flags);
   13.36 -}
   13.37 -
   13.38 -void save_init_fpu( struct task_struct *tsk )
   13.39 -{
   13.40 -    /*
   13.41 -     * The guest OS may have set the 'virtual STTS' flag.
   13.42 -     * This causes us to set the real flag, so we'll need
   13.43 -     * to temporarily clear it while saving f-p state.
   13.44 -     */
   13.45 -    if ( test_bit(PF_GUEST_STTS, &tsk->flags) ) clts();
   13.46 -    __save_init_fpu(tsk);
   13.47 -    stts();
   13.48 -}
   13.49 -
   13.50 -void restore_fpu( struct task_struct *tsk )
   13.51 -{
   13.52 -    if ( cpu_has_fxsr ) {
   13.53 -        asm volatile( "fxrstor %0"
   13.54 -                      : : "m" (tsk->thread.i387.fxsave) );
   13.55 -    } else {
   13.56 -        asm volatile( "frstor %0"
   13.57 -                      : : "m" (tsk->thread.i387.fsave) );
   13.58 -    }
   13.59 -}
    14.1 --- a/xen/arch/i386/i8259.c	Thu Jun 10 14:24:30 2004 +0000
    14.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.3 @@ -1,470 +0,0 @@
    14.4 -/******************************************************************************
    14.5 - * i8259.c
    14.6 - * 
    14.7 - * Well, this is required for SMP systems as well, as it build interrupt
    14.8 - * tables for IO APICS as well as uniprocessor 8259-alikes.
    14.9 - */
   14.10 -
   14.11 -#include <xen/config.h>
   14.12 -#include <xen/init.h>
   14.13 -#include <asm/ptrace.h>
   14.14 -#include <xen/errno.h>
   14.15 -#include <xen/sched.h>
   14.16 -#include <xen/interrupt.h>
   14.17 -#include <xen/irq.h>
   14.18 -
   14.19 -#include <asm/atomic.h>
   14.20 -#include <asm/system.h>
   14.21 -#include <asm/io.h>
   14.22 -#include <asm/desc.h>
   14.23 -#include <asm/bitops.h>
   14.24 -#include <xen/delay.h>
   14.25 -#include <asm/apic.h>
   14.26 -
   14.27 -
   14.28 -/*
   14.29 - * Common place to define all x86 IRQ vectors
   14.30 - *
   14.31 - * This builds up the IRQ handler stubs using some ugly macros in irq.h
   14.32 - *
   14.33 - * These macros create the low-level assembly IRQ routines that save
   14.34 - * register context and call do_IRQ(). do_IRQ() then does all the
   14.35 - * operations that are needed to keep the AT (or SMP IOAPIC)
   14.36 - * interrupt-controller happy.
   14.37 - */
   14.38 -
   14.39 -BUILD_COMMON_IRQ()
   14.40 -
   14.41 -#define BI(x,y) \
   14.42 -	BUILD_IRQ(x##y)
   14.43 -
   14.44 -#define BUILD_16_IRQS(x) \
   14.45 -	BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
   14.46 -	BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
   14.47 -	BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
   14.48 -	BI(x,c) BI(x,d) BI(x,e) BI(x,f)
   14.49 -
   14.50 -/*
   14.51 - * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
   14.52 - * (these are usually mapped to vectors 0x30-0x3f)
   14.53 - */
   14.54 -    BUILD_16_IRQS(0x0)
   14.55 -
   14.56 -#ifdef CONFIG_X86_IO_APIC
   14.57 -/*
   14.58 - * The IO-APIC gives us many more interrupt sources. Most of these 
   14.59 - * are unused but an SMP system is supposed to have enough memory ...
   14.60 - * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
   14.61 - * across the spectrum, so we really want to be prepared to get all
   14.62 - * of these. Plus, more powerful systems might have more than 64
   14.63 - * IO-APIC registers.
   14.64 - *
   14.65 - * (these are usually mapped into the 0x30-0xff vector range)
   14.66 - */
   14.67 -    BUILD_16_IRQS(0x1) BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
   14.68 -    BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
   14.69 -    BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
   14.70 -    BUILD_16_IRQS(0xc)
   14.71 -#endif
   14.72 -
   14.73 -#undef BUILD_16_IRQS
   14.74 -#undef BI
   14.75 -
   14.76 -
   14.77 -/*
   14.78 - * The following vectors are part of the Linux architecture, there
   14.79 - * is no hardware IRQ pin equivalent for them, they are triggered
   14.80 - * through the ICC by us (IPIs)
   14.81 - */
   14.82 -#ifdef CONFIG_SMP
   14.83 -    BUILD_SMP_INTERRUPT(event_check_interrupt,EVENT_CHECK_VECTOR)
   14.84 -    BUILD_SMP_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
   14.85 -    BUILD_SMP_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
   14.86 -#endif
   14.87 -
   14.88 -/*
   14.89 - * every pentium local APIC has two 'local interrupts', with a
   14.90 - * soft-definable vector attached to both interrupts, one of
   14.91 - * which is a timer interrupt, the other one is error counter
   14.92 - * overflow. Linux uses the local APIC timer interrupt to get
   14.93 - * a much simpler SMP time architecture:
   14.94 - */
   14.95 -#ifdef CONFIG_X86_LOCAL_APIC
   14.96 -    BUILD_SMP_TIMER_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
   14.97 -    BUILD_SMP_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
   14.98 -    BUILD_SMP_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
   14.99 -#endif
  14.100 -
  14.101 -#define IRQ(x,y) \
  14.102 -	IRQ##x##y##_interrupt
  14.103 -
  14.104 -#define IRQLIST_16(x) \
  14.105 -	IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
  14.106 -	IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
  14.107 -	IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
  14.108 -	IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
  14.109 -
  14.110 -    void (*interrupt[NR_IRQS])(void) = {
  14.111 -	IRQLIST_16(0x0),
  14.112 -
  14.113 -#ifdef CONFIG_X86_IO_APIC
  14.114 -        IRQLIST_16(0x1), IRQLIST_16(0x2), IRQLIST_16(0x3),
  14.115 -	IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
  14.116 -	IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
  14.117 -	IRQLIST_16(0xc)
  14.118 -#endif
  14.119 -    };
  14.120 -
  14.121 -#undef IRQ
  14.122 -#undef IRQLIST_16
  14.123 -
  14.124 -/*
  14.125 - * This is the 'legacy' 8259A Programmable Interrupt Controller,
  14.126 - * present in the majority of PC/AT boxes.
  14.127 - * plus some generic x86 specific things if generic specifics makes
  14.128 - * any sense at all.
  14.129 - * this file should become arch/i386/kernel/irq.c when the old irq.c
  14.130 - * moves to arch independent land
  14.131 - */
  14.132 -
  14.133 -spinlock_t i8259A_lock = SPIN_LOCK_UNLOCKED;
  14.134 -
  14.135 -static void end_8259A_irq (unsigned int irq)
  14.136 -{
  14.137 -    if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
  14.138 -        enable_8259A_irq(irq);
  14.139 -}
  14.140 -
  14.141 -#define shutdown_8259A_irq	disable_8259A_irq
  14.142 -
  14.143 -void mask_and_ack_8259A(unsigned int);
  14.144 -
  14.145 -static unsigned int startup_8259A_irq(unsigned int irq)
  14.146 -{ 
  14.147 -    enable_8259A_irq(irq);
  14.148 -    return 0; /* never anything pending */
  14.149 -}
  14.150 -
  14.151 -static struct hw_interrupt_type i8259A_irq_type = {
  14.152 -    "XT-PIC",
  14.153 -    startup_8259A_irq,
  14.154 -    shutdown_8259A_irq,
  14.155 -    enable_8259A_irq,
  14.156 -    disable_8259A_irq,
  14.157 -    mask_and_ack_8259A,
  14.158 -    end_8259A_irq,
  14.159 -    NULL
  14.160 -};
  14.161 -
  14.162 -/*
  14.163 - * 8259A PIC functions to handle ISA devices:
  14.164 - */
  14.165 -
  14.166 -/*
  14.167 - * This contains the irq mask for both 8259A irq controllers,
  14.168 - */
  14.169 -static unsigned int cached_irq_mask = 0xffff;
  14.170 -
  14.171 -#define __byte(x,y) 	(((unsigned char *)&(y))[x])
  14.172 -#define cached_21	(__byte(0,cached_irq_mask))
  14.173 -#define cached_A1	(__byte(1,cached_irq_mask))
  14.174 -
  14.175 -/*
  14.176 - * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
  14.177 - * boards the timer interrupt is not really connected to any IO-APIC pin,
  14.178 - * it's fed to the master 8259A's IR0 line only.
  14.179 - *
  14.180 - * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
  14.181 - * this 'mixed mode' IRQ handling costs nothing because it's only used
  14.182 - * at IRQ setup time.
  14.183 - */
  14.184 -unsigned long io_apic_irqs;
  14.185 -
  14.186 -void disable_8259A_irq(unsigned int irq)
  14.187 -{
  14.188 -    unsigned int mask = 1 << irq;
  14.189 -    unsigned long flags;
  14.190 -
  14.191 -    spin_lock_irqsave(&i8259A_lock, flags);
  14.192 -    cached_irq_mask |= mask;
  14.193 -    if (irq & 8)
  14.194 -        outb(cached_A1,0xA1);
  14.195 -    else
  14.196 -        outb(cached_21,0x21);
  14.197 -    spin_unlock_irqrestore(&i8259A_lock, flags);
  14.198 -}
  14.199 -
  14.200 -void enable_8259A_irq(unsigned int irq)
  14.201 -{
  14.202 -    unsigned int mask = ~(1 << irq);
  14.203 -    unsigned long flags;
  14.204 -
  14.205 -    spin_lock_irqsave(&i8259A_lock, flags);
  14.206 -    cached_irq_mask &= mask;
  14.207 -    if (irq & 8)
  14.208 -        outb(cached_A1,0xA1);
  14.209 -    else
  14.210 -        outb(cached_21,0x21);
  14.211 -    spin_unlock_irqrestore(&i8259A_lock, flags);
  14.212 -}
  14.213 -
  14.214 -int i8259A_irq_pending(unsigned int irq)
  14.215 -{
  14.216 -    unsigned int mask = 1<<irq;
  14.217 -    unsigned long flags;
  14.218 -    int ret;
  14.219 -
  14.220 -    spin_lock_irqsave(&i8259A_lock, flags);
  14.221 -    if (irq < 8)
  14.222 -        ret = inb(0x20) & mask;
  14.223 -    else
  14.224 -        ret = inb(0xA0) & (mask >> 8);
  14.225 -    spin_unlock_irqrestore(&i8259A_lock, flags);
  14.226 -
  14.227 -    return ret;
  14.228 -}
  14.229 -
  14.230 -void make_8259A_irq(unsigned int irq)
  14.231 -{
  14.232 -    disable_irq_nosync(irq);
  14.233 -    io_apic_irqs &= ~(1<<irq);
  14.234 -    irq_desc[irq].handler = &i8259A_irq_type;
  14.235 -    enable_irq(irq);
  14.236 -}
  14.237 -
  14.238 -/*
  14.239 - * This function assumes to be called rarely. Switching between
  14.240 - * 8259A registers is slow.
  14.241 - * This has to be protected by the irq controller spinlock
  14.242 - * before being called.
  14.243 - */
  14.244 -static inline int i8259A_irq_real(unsigned int irq)
  14.245 -{
  14.246 -    int value;
  14.247 -    int irqmask = 1<<irq;
  14.248 -
  14.249 -    if (irq < 8) {
  14.250 -        outb(0x0B,0x20);		/* ISR register */
  14.251 -        value = inb(0x20) & irqmask;
  14.252 -        outb(0x0A,0x20);		/* back to the IRR register */
  14.253 -        return value;
  14.254 -    }
  14.255 -    outb(0x0B,0xA0);		/* ISR register */
  14.256 -    value = inb(0xA0) & (irqmask >> 8);
  14.257 -    outb(0x0A,0xA0);		/* back to the IRR register */
  14.258 -    return value;
  14.259 -}
  14.260 -
  14.261 -/*
  14.262 - * Careful! The 8259A is a fragile beast, it pretty
  14.263 - * much _has_ to be done exactly like this (mask it
  14.264 - * first, _then_ send the EOI, and the order of EOI
  14.265 - * to the two 8259s is important!
  14.266 - */
  14.267 -void mask_and_ack_8259A(unsigned int irq)
  14.268 -{
  14.269 -    unsigned int irqmask = 1 << irq;
  14.270 -    unsigned long flags;
  14.271 -
  14.272 -    spin_lock_irqsave(&i8259A_lock, flags);
  14.273 -    /*
  14.274 -     * Lightweight spurious IRQ detection. We do not want
  14.275 -     * to overdo spurious IRQ handling - it's usually a sign
  14.276 -     * of hardware problems, so we only do the checks we can
  14.277 -     * do without slowing down good hardware unnecesserily.
  14.278 -     *
  14.279 -     * Note that IRQ7 and IRQ15 (the two spurious IRQs
  14.280 -     * usually resulting from the 8259A-1|2 PICs) occur
  14.281 -     * even if the IRQ is masked in the 8259A. Thus we
  14.282 -     * can check spurious 8259A IRQs without doing the
  14.283 -     * quite slow i8259A_irq_real() call for every IRQ.
  14.284 -     * This does not cover 100% of spurious interrupts,
  14.285 -     * but should be enough to warn the user that there
  14.286 -     * is something bad going on ...
  14.287 -     */
  14.288 -    if (cached_irq_mask & irqmask)
  14.289 -        goto spurious_8259A_irq;
  14.290 -    cached_irq_mask |= irqmask;
  14.291 -
  14.292 - handle_real_irq:
  14.293 -    if (irq & 8) {
  14.294 -        inb(0xA1);		/* DUMMY - (do we need this?) */
  14.295 -        outb(cached_A1,0xA1);
  14.296 -        outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
  14.297 -        outb(0x62,0x20);	/* 'Specific EOI' to master-IRQ2 */
  14.298 -    } else {
  14.299 -        inb(0x21);		/* DUMMY - (do we need this?) */
  14.300 -        outb(cached_21,0x21);
  14.301 -        outb(0x60+irq,0x20);	/* 'Specific EOI' to master */
  14.302 -    }
  14.303 -    spin_unlock_irqrestore(&i8259A_lock, flags);
  14.304 -    return;
  14.305 -
  14.306 - spurious_8259A_irq:
  14.307 -    /*
  14.308 -     * this is the slow path - should happen rarely.
  14.309 -     */
  14.310 -    if (i8259A_irq_real(irq))
  14.311 -        /*
  14.312 -         * oops, the IRQ _is_ in service according to the
  14.313 -         * 8259A - not spurious, go handle it.
  14.314 -         */
  14.315 -        goto handle_real_irq;
  14.316 -
  14.317 -    {
  14.318 -        static int spurious_irq_mask;
  14.319 -        /*
  14.320 -         * At this point we can be sure the IRQ is spurious,
  14.321 -         * lets ACK and report it. [once per IRQ]
  14.322 -         */
  14.323 -        if (!(spurious_irq_mask & irqmask)) {
  14.324 -            printk("spurious 8259A interrupt: IRQ%d.\n", irq);
  14.325 -            spurious_irq_mask |= irqmask;
  14.326 -        }
  14.327 -        atomic_inc(&irq_err_count);
  14.328 -        /*
  14.329 -         * Theoretically we do not have to handle this IRQ,
  14.330 -         * but in Linux this does not cause problems and is
  14.331 -         * simpler for us.
  14.332 -         */
  14.333 -        goto handle_real_irq;
  14.334 -    }
  14.335 -}
  14.336 -
  14.337 -void __init init_8259A(int auto_eoi)
  14.338 -{
  14.339 -    unsigned long flags;
  14.340 -
  14.341 -    spin_lock_irqsave(&i8259A_lock, flags);
  14.342 -
  14.343 -    outb(0xff, 0x21);	/* mask all of 8259A-1 */
  14.344 -    outb(0xff, 0xA1);	/* mask all of 8259A-2 */
  14.345 -
  14.346 -    /*
  14.347 -     * outb_p - this has to work on a wide range of PC hardware.
  14.348 -     */
  14.349 -    outb_p(0x11, 0x20);	/* ICW1: select 8259A-1 init */
  14.350 -    outb_p(0x30 + 0, 0x21);	/* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
  14.351 -    outb_p(0x04, 0x21);	/* 8259A-1 (the master) has a slave on IR2 */
  14.352 -    if (auto_eoi)
  14.353 -        outb_p(0x03, 0x21);	/* master does Auto EOI */
  14.354 -    else
  14.355 -        outb_p(0x01, 0x21);	/* master expects normal EOI */
  14.356 -
  14.357 -    outb_p(0x11, 0xA0);	/* ICW1: select 8259A-2 init */
  14.358 -    outb_p(0x30 + 8, 0xA1);	/* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
  14.359 -    outb_p(0x02, 0xA1);	/* 8259A-2 is a slave on master's IR2 */
  14.360 -    outb_p(0x01, 0xA1);	/* (slave's support for AEOI in flat mode
  14.361 -                           is to be investigated) */
  14.362 -
  14.363 -    if (auto_eoi)
  14.364 -        /*
  14.365 -         * in AEOI mode we just have to mask the interrupt
  14.366 -         * when acking.
  14.367 -         */
  14.368 -        i8259A_irq_type.ack = disable_8259A_irq;
  14.369 -    else
  14.370 -        i8259A_irq_type.ack = mask_and_ack_8259A;
  14.371 -
  14.372 -    udelay(100);		/* wait for 8259A to initialize */
  14.373 -
  14.374 -    outb(cached_21, 0x21);	/* restore master IRQ mask */
  14.375 -    outb(cached_A1, 0xA1);	/* restore slave IRQ mask */
  14.376 -
  14.377 -    spin_unlock_irqrestore(&i8259A_lock, flags);
  14.378 -}
  14.379 -
  14.380 -
  14.381 -/*
  14.382 - * IRQ2 is cascade interrupt to second interrupt controller
  14.383 - */
  14.384 -
  14.385 -static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL};
  14.386 -
  14.387 -void __init init_ISA_irqs (void)
  14.388 -{
  14.389 -    int i;
  14.390 -
  14.391 -#ifdef CONFIG_X86_LOCAL_APIC
  14.392 -    init_bsp_APIC();
  14.393 -#endif
  14.394 -    init_8259A(0);
  14.395 -
  14.396 -    for (i = 0; i < NR_IRQS; i++) {
  14.397 -        irq_desc[i].status = IRQ_DISABLED;
  14.398 -        irq_desc[i].action = 0;
  14.399 -        irq_desc[i].depth = 1;
  14.400 -
  14.401 -        if (i < 16) {
  14.402 -            /*
  14.403 -             * 16 old-style INTA-cycle interrupts:
  14.404 -             */
  14.405 -            irq_desc[i].handler = &i8259A_irq_type;
  14.406 -        } else {
  14.407 -            /*
  14.408 -             * 'high' PCI IRQs filled in on demand
  14.409 -             */
  14.410 -            irq_desc[i].handler = &no_irq_type;
  14.411 -        }
  14.412 -    }
  14.413 -}
  14.414 -
  14.415 -void __init init_IRQ(void)
  14.416 -{
  14.417 -    int i;
  14.418 -
  14.419 -    init_ISA_irqs();
  14.420 -
  14.421 -    /*
  14.422 -     * Cover the whole vector space, no vector can escape
  14.423 -     * us. (some of these will be overridden and become
  14.424 -     * 'special' SMP interrupts)
  14.425 -     */
  14.426 -    for (i = 0; i < NR_IRQS; i++) {
  14.427 -        int vector = FIRST_EXTERNAL_VECTOR + i;
  14.428 -        if (vector != HYPERVISOR_CALL_VECTOR) 
  14.429 -            set_intr_gate(vector, interrupt[i]);
  14.430 -    }
  14.431 -
  14.432 -#ifdef CONFIG_SMP
  14.433 -    /*
  14.434 -     * IRQ0 must be given a fixed assignment and initialized,
  14.435 -     * because it's used before the IO-APIC is set up.
  14.436 -     */
  14.437 -    set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
  14.438 -
  14.439 -    /*
  14.440 -     * The reschedule interrupt is a CPU-to-CPU reschedule-helper
  14.441 -     * IPI, driven by wakeup.
  14.442 -     */
  14.443 -    set_intr_gate(EVENT_CHECK_VECTOR, event_check_interrupt);
  14.444 -
  14.445 -    /* IPI for invalidation */
  14.446 -    set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
  14.447 -
  14.448 -    /* IPI for generic function call */
  14.449 -    set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
  14.450 -#endif	
  14.451 -
  14.452 -#ifdef CONFIG_X86_LOCAL_APIC
  14.453 -    /* self generated IPI for local APIC timer */
  14.454 -    set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
  14.455 -
  14.456 -    /* IPI vectors for APIC spurious and error interrupts */
  14.457 -    set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
  14.458 -    set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
  14.459 -#endif
  14.460 -
  14.461 -    /*
  14.462 -     * Set the clock to HZ Hz, we already have a valid
  14.463 -     * vector now:
  14.464 -     */
  14.465 -#define CLOCK_TICK_RATE 1193180 /* crystal freq (Hz) */
  14.466 -#define LATCH (((CLOCK_TICK_RATE)+(HZ/2))/HZ)
  14.467 -    outb_p(0x34,0x43);		/* binary, mode 2, LSB/MSB, ch 0 */
  14.468 -    outb_p(LATCH & 0xff , 0x40);	/* LSB */
  14.469 -    outb(LATCH >> 8 , 0x40);	/* MSB */
  14.470 -
  14.471 -    setup_irq(2, &irq2);
  14.472 -}
  14.473 -
    15.1 --- a/xen/arch/i386/idle0_task.c	Thu Jun 10 14:24:30 2004 +0000
    15.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.3 @@ -1,15 +0,0 @@
    15.4 -#include <xen/config.h>
    15.5 -#include <xen/sched.h>
    15.6 -#include <asm/desc.h>
    15.7 -
    15.8 -struct task_struct idle0_task = IDLE0_TASK(idle0_task);
    15.9 -
   15.10 -/*
   15.11 - * per-CPU TSS segments. Threads are completely 'soft' on Linux,
   15.12 - * no more per-task TSS's. The TSS size is kept cacheline-aligned
   15.13 - * so they are allowed to end up in the .data.cacheline_aligned
   15.14 - * section. Since TSS's are completely CPU-local, we want them
   15.15 - * on exact cacheline boundaries, to eliminate cacheline ping-pong.
   15.16 - */ 
   15.17 -struct tss_struct init_tss[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = INIT_TSS };
   15.18 -
    16.1 --- a/xen/arch/i386/io_apic.c	Thu Jun 10 14:24:30 2004 +0000
    16.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.3 @@ -1,1944 +0,0 @@
    16.4 -/*
    16.5 - *	Intel IO-APIC support for multi-Pentium hosts.
    16.6 - *
    16.7 - *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
    16.8 - *
    16.9 - *	Many thanks to Stig Venaas for trying out countless experimental
   16.10 - *	patches and reporting/debugging problems patiently!
   16.11 - *
   16.12 - *	(c) 1999, Multiple IO-APIC support, developed by
   16.13 - *	Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
   16.14 - *      Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
   16.15 - *	further tested and cleaned up by Zach Brown <zab@redhat.com>
   16.16 - *	and Ingo Molnar <mingo@redhat.com>
   16.17 - *
   16.18 - *	Fixes
   16.19 - *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
   16.20 - *					thanks to Eric Gilmore
   16.21 - *					and Rolf G. Tews
   16.22 - *					for testing these extensively
   16.23 - *	Paul Diefenbaugh	:	Added full ACPI support
   16.24 - */
   16.25 -
   16.26 -#include <xen/config.h>
   16.27 -#include <xen/init.h>
   16.28 -#include <xen/interrupt.h>
   16.29 -#include <xen/irq.h>
   16.30 -#include <xen/delay.h>
   16.31 -#include <xen/sched.h>
   16.32 -#include <xen/config.h>
   16.33 -#include <asm/mc146818rtc.h>
   16.34 -#include <asm/io.h>
   16.35 -#include <asm/mpspec.h>
   16.36 -#include <asm/io_apic.h>
   16.37 -#include <asm/smp.h>
   16.38 -#include <asm/desc.h>
   16.39 -#include <asm/smpboot.h>
   16.40 -
   16.41 -#ifdef CONFIG_X86_IO_APIC
   16.42 -
   16.43 -#undef APIC_LOCKUP_DEBUG
   16.44 -
   16.45 -#define APIC_LOCKUP_DEBUG
   16.46 -
   16.47 -static spinlock_t ioapic_lock = SPIN_LOCK_UNLOCKED;
   16.48 -
   16.49 -unsigned int int_dest_addr_mode = APIC_DEST_LOGICAL;
   16.50 -unsigned char int_delivery_mode = dest_LowestPrio;
   16.51 -
   16.52 -
   16.53 -/*
   16.54 - * # of IRQ routing registers
   16.55 - */
   16.56 -int nr_ioapic_registers[MAX_IO_APICS];
   16.57 -
   16.58 -/*
   16.59 - * Rough estimation of how many shared IRQs there are, can
   16.60 - * be changed anytime.
   16.61 - */
   16.62 -#define MAX_PLUS_SHARED_IRQS NR_IRQS
   16.63 -#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
   16.64 -
   16.65 -/*
   16.66 - * This is performance-critical, we want to do it O(1)
   16.67 - *
   16.68 - * the indexing order of this array favors 1:1 mappings
   16.69 - * between pins and IRQs.
   16.70 - */
   16.71 -
   16.72 -static struct irq_pin_list {
   16.73 -	int apic, pin, next;
   16.74 -} irq_2_pin[PIN_MAP_SIZE];
   16.75 -
   16.76 -/*
   16.77 - * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
   16.78 - * shared ISA-space IRQs, so we have to support them. We are super
   16.79 - * fast in the common case, and fast for shared ISA-space IRQs.
   16.80 - */
   16.81 -static void __init add_pin_to_irq(unsigned int irq, int apic, int pin)
   16.82 -{
   16.83 -	static int first_free_entry = NR_IRQS;
   16.84 -	struct irq_pin_list *entry = irq_2_pin + irq;
   16.85 -
   16.86 -	while (entry->next)
   16.87 -		entry = irq_2_pin + entry->next;
   16.88 -
   16.89 -	if (entry->pin != -1) {
   16.90 -		entry->next = first_free_entry;
   16.91 -		entry = irq_2_pin + entry->next;
   16.92 -		if (++first_free_entry >= PIN_MAP_SIZE)
   16.93 -			panic("io_apic.c: whoops");
   16.94 -	}
   16.95 -	entry->apic = apic;
   16.96 -	entry->pin = pin;
   16.97 -}
   16.98 -
   16.99 -/*
  16.100 - * Reroute an IRQ to a different pin.
  16.101 - */
  16.102 -static void __init replace_pin_at_irq(unsigned int irq,
  16.103 -				      int oldapic, int oldpin,
  16.104 -				      int newapic, int newpin)
  16.105 -{
  16.106 -	struct irq_pin_list *entry = irq_2_pin + irq;
  16.107 -
  16.108 -	while (1) {
  16.109 -		if (entry->apic == oldapic && entry->pin == oldpin) {
  16.110 -			entry->apic = newapic;
  16.111 -			entry->pin = newpin;
  16.112 -		}
  16.113 -		if (!entry->next)
  16.114 -			break;
  16.115 -		entry = irq_2_pin + entry->next;
  16.116 -	}
  16.117 -}
  16.118 -
  16.119 -#define __DO_ACTION(R, ACTION, FINAL)					\
  16.120 -									\
  16.121 -{									\
  16.122 -	int pin;							\
  16.123 -	struct irq_pin_list *entry = irq_2_pin + irq;			\
  16.124 -									\
  16.125 -	for (;;) {							\
  16.126 -		unsigned int reg;					\
  16.127 -		pin = entry->pin;					\
  16.128 -		if (pin == -1)						\
  16.129 -			break;						\
  16.130 -		reg = io_apic_read(entry->apic, 0x10 + R + pin*2);	\
  16.131 -		reg ACTION;						\
  16.132 -		io_apic_write(entry->apic, 0x10 + R + pin*2, reg);	\
  16.133 -		if (!entry->next)					\
  16.134 -			break;						\
  16.135 -		entry = irq_2_pin + entry->next;			\
  16.136 -	}								\
  16.137 -	FINAL;								\
  16.138 -}
  16.139 -
  16.140 -#define DO_ACTION(name,R,ACTION, FINAL)					\
  16.141 -									\
  16.142 -	static void name##_IO_APIC_irq (unsigned int irq)		\
  16.143 -	__DO_ACTION(R, ACTION, FINAL)
  16.144 -
  16.145 -DO_ACTION( __mask,    0, |= 0x00010000, io_apic_sync(entry->apic) )
  16.146 -DO_ACTION( __unmask,  0, &= 0xfffeffff, )
  16.147 -DO_ACTION( __edge,    0, &= 0xffff7fff, )
  16.148 -DO_ACTION( __level,   0, |= 0x00008000, )
  16.149 -
  16.150 -static void mask_IO_APIC_irq (unsigned int irq)
  16.151 -{
  16.152 -	unsigned long flags;
  16.153 -
  16.154 -	spin_lock_irqsave(&ioapic_lock, flags);
  16.155 -	__mask_IO_APIC_irq(irq);
  16.156 -	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.157 -}
  16.158 -
  16.159 -static void unmask_IO_APIC_irq (unsigned int irq)
  16.160 -{
  16.161 -	unsigned long flags;
  16.162 -
  16.163 -	spin_lock_irqsave(&ioapic_lock, flags);
  16.164 -	__unmask_IO_APIC_irq(irq);
  16.165 -	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.166 -}
  16.167 -
  16.168 -void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
  16.169 -{
  16.170 -	struct IO_APIC_route_entry entry;
  16.171 -	unsigned long flags;
  16.172 -
  16.173 -	/* Check delivery_mode to be sure we're not clearing an SMI pin */
  16.174 -	spin_lock_irqsave(&ioapic_lock, flags);
  16.175 -	*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
  16.176 -	*(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
  16.177 -	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.178 -	if (entry.delivery_mode == dest_SMI)
  16.179 -		return;
  16.180 -
  16.181 -	/*
  16.182 -	 * Disable it in the IO-APIC irq-routing table:
  16.183 -	 */
  16.184 -	memset(&entry, 0, sizeof(entry));
  16.185 -	entry.mask = 1;
  16.186 -	spin_lock_irqsave(&ioapic_lock, flags);
  16.187 -	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
  16.188 -	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
  16.189 -	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.190 -}
  16.191 -
  16.192 -static void clear_IO_APIC (void)
  16.193 -{
  16.194 -	int apic, pin;
  16.195 -
  16.196 -	for (apic = 0; apic < nr_ioapics; apic++)
  16.197 -		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
  16.198 -			clear_IO_APIC_pin(apic, pin);
  16.199 -}
  16.200 -
  16.201 -static void set_ioapic_affinity (unsigned int irq, unsigned long mask)
  16.202 -{
  16.203 -	unsigned long flags;
  16.204 -
  16.205 -	/*
  16.206 -	 * Only the first 8 bits are valid.
  16.207 -	 */
  16.208 -	mask = mask << 24;
  16.209 -	spin_lock_irqsave(&ioapic_lock, flags);
  16.210 -	__DO_ACTION(1, = mask, )
  16.211 -	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.212 -}
  16.213 -
  16.214 -#define balance_irq(_irq) ((void)0)
  16.215 -
  16.216 -/*
  16.217 - * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
  16.218 - * specific CPU-side IRQs.
  16.219 - */
  16.220 -
  16.221 -#define MAX_PIRQS 8
  16.222 -int pirq_entries [MAX_PIRQS];
  16.223 -int pirqs_enabled;
  16.224 -
  16.225 -int skip_ioapic_setup;
  16.226 -#if 0
  16.227 -
  16.228 -static int __init noioapic_setup(char *str)
  16.229 -{
  16.230 -	skip_ioapic_setup = 1;
  16.231 -	return 1;
  16.232 -}
  16.233 -
  16.234 -__setup("noapic", noioapic_setup);
  16.235 -
  16.236 -static int __init ioapic_setup(char *str)
  16.237 -{
  16.238 -	skip_ioapic_setup = 0;
  16.239 -	return 1;
  16.240 -}
  16.241 -
  16.242 -__setup("apic", ioapic_setup);
  16.243 -
  16.244 -
  16.245 -
  16.246 -static int __init ioapic_pirq_setup(char *str)
  16.247 -{
  16.248 -	int i, max;
  16.249 -	int ints[MAX_PIRQS+1];
  16.250 -
  16.251 -	get_options(str, ARRAY_SIZE(ints), ints);
  16.252 -
  16.253 -	for (i = 0; i < MAX_PIRQS; i++)
  16.254 -		pirq_entries[i] = -1;
  16.255 -
  16.256 -	pirqs_enabled = 1;
  16.257 -	printk(KERN_INFO "PIRQ redirection, working around broken MP-BIOS.\n");
  16.258 -	max = MAX_PIRQS;
  16.259 -	if (ints[0] < MAX_PIRQS)
  16.260 -		max = ints[0];
  16.261 -
  16.262 -	for (i = 0; i < max; i++) {
  16.263 -		printk(KERN_DEBUG "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
  16.264 -		/*
  16.265 -		 * PIRQs are mapped upside down, usually.
  16.266 -		 */
  16.267 -		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
  16.268 -	}
  16.269 -	return 1;
  16.270 -}
  16.271 -
  16.272 -__setup("pirq=", ioapic_pirq_setup);
  16.273 -
  16.274 -#endif
  16.275 -
  16.276 -/*
  16.277 - * Find the IRQ entry number of a certain pin.
  16.278 - */
  16.279 -static int __init find_irq_entry(int apic, int pin, int type)
  16.280 -{
  16.281 -	int i;
  16.282 -
  16.283 -	for (i = 0; i < mp_irq_entries; i++)
  16.284 -		if (mp_irqs[i].mpc_irqtype == type &&
  16.285 -		    (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
  16.286 -		     mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
  16.287 -		    mp_irqs[i].mpc_dstirq == pin)
  16.288 -			return i;
  16.289 -
  16.290 -	return -1;
  16.291 -}
  16.292 -
  16.293 -/*
  16.294 - * Find the pin to which IRQ[irq] (ISA) is connected
  16.295 - */
  16.296 -static int __init find_isa_irq_pin(int irq, int type)
  16.297 -{
  16.298 -	int i;
  16.299 -
  16.300 -	for (i = 0; i < mp_irq_entries; i++) {
  16.301 -		int lbus = mp_irqs[i].mpc_srcbus;
  16.302 -
  16.303 -		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
  16.304 -		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
  16.305 -		     mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
  16.306 -		    (mp_irqs[i].mpc_irqtype == type) &&
  16.307 -		    (mp_irqs[i].mpc_srcbusirq == irq))
  16.308 -
  16.309 -			return mp_irqs[i].mpc_dstirq;
  16.310 -	}
  16.311 -	return -1;
  16.312 -}
  16.313 -
  16.314 -/*
  16.315 - * Find a specific PCI IRQ entry.
  16.316 - * Not an __init, possibly needed by modules
  16.317 - */
  16.318 -static int pin_2_irq(int idx, int apic, int pin);
  16.319 -
  16.320 -int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
  16.321 -{
  16.322 -	int apic, i, best_guess = -1;
  16.323 -
  16.324 -	Dprintk("querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
  16.325 -		bus, slot, pin);
  16.326 -	if ((mp_bus_id_to_pci_bus==NULL) || (mp_bus_id_to_pci_bus[bus] == -1)) {
  16.327 -		printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
  16.328 -		return -1;
  16.329 -	}
  16.330 -	for (i = 0; i < mp_irq_entries; i++) {
  16.331 -		int lbus = mp_irqs[i].mpc_srcbus;
  16.332 -
  16.333 -		for (apic = 0; apic < nr_ioapics; apic++)
  16.334 -			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
  16.335 -			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
  16.336 -				break;
  16.337 -
  16.338 -		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
  16.339 -		    !mp_irqs[i].mpc_irqtype &&
  16.340 -		    (bus == lbus) &&
  16.341 -		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
  16.342 -			int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
  16.343 -
  16.344 -			if (!(apic || IO_APIC_IRQ(irq)))
  16.345 -				continue;
  16.346 -
  16.347 -			if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
  16.348 -				return irq;
  16.349 -			/*
  16.350 -			 * Use the first all-but-pin matching entry as a
  16.351 -			 * best-guess fuzzy result for broken mptables.
  16.352 -			 */
  16.353 -			if (best_guess < 0)
  16.354 -				best_guess = irq;
  16.355 -		}
  16.356 -	}
  16.357 -	return best_guess;
  16.358 -}
  16.359 -
  16.360 -/*
  16.361 - * EISA Edge/Level control register, ELCR
  16.362 - */
  16.363 -static int __init EISA_ELCR(unsigned int irq)
  16.364 -{
  16.365 -	if (irq < 16) {
  16.366 -		unsigned int port = 0x4d0 + (irq >> 3);
  16.367 -		return (inb(port) >> (irq & 7)) & 1;
  16.368 -	}
  16.369 -	printk(KERN_INFO "Broken MPtable reports ISA irq %d\n", irq);
  16.370 -	return 0;
  16.371 -}
  16.372 -
  16.373 -/* EISA interrupts are always polarity zero and can be edge or level
  16.374 - * trigger depending on the ELCR value.  If an interrupt is listed as
  16.375 - * EISA conforming in the MP table, that means its trigger type must
  16.376 - * be read in from the ELCR */
  16.377 -
  16.378 -#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
  16.379 -#define default_EISA_polarity(idx)	(0)
  16.380 -
  16.381 -/* ISA interrupts are always polarity zero edge triggered,
  16.382 - * when listed as conforming in the MP table. */
  16.383 -
  16.384 -#define default_ISA_trigger(idx)	(0)
  16.385 -#define default_ISA_polarity(idx)	(0)
  16.386 -
  16.387 -/* PCI interrupts are always polarity one level triggered,
  16.388 - * when listed as conforming in the MP table. */
  16.389 -
  16.390 -#define default_PCI_trigger(idx)	(1)
  16.391 -#define default_PCI_polarity(idx)	(1)
  16.392 -
  16.393 -/* MCA interrupts are always polarity zero level triggered,
  16.394 - * when listed as conforming in the MP table. */
  16.395 -
  16.396 -#define default_MCA_trigger(idx)	(1)
  16.397 -#define default_MCA_polarity(idx)	(0)
  16.398 -
  16.399 -static int __init MPBIOS_polarity(int idx)
  16.400 -{
  16.401 -	int bus = mp_irqs[idx].mpc_srcbus;
  16.402 -	int polarity;
  16.403 -
  16.404 -	/*
  16.405 -	 * Determine IRQ line polarity (high active or low active):
  16.406 -	 */
  16.407 -	switch (mp_irqs[idx].mpc_irqflag & 3)
  16.408 -	{
  16.409 -		case 0: /* conforms, ie. bus-type dependent polarity */
  16.410 -		{
  16.411 -			switch (mp_bus_id_to_type[bus])
  16.412 -			{
  16.413 -				case MP_BUS_ISA: /* ISA pin */
  16.414 -				{
  16.415 -					polarity = default_ISA_polarity(idx);
  16.416 -					break;
  16.417 -				}
  16.418 -				case MP_BUS_EISA: /* EISA pin */
  16.419 -				{
  16.420 -					polarity = default_EISA_polarity(idx);
  16.421 -					break;
  16.422 -				}
  16.423 -				case MP_BUS_PCI: /* PCI pin */
  16.424 -				{
  16.425 -					polarity = default_PCI_polarity(idx);
  16.426 -					break;
  16.427 -				}
  16.428 -				case MP_BUS_MCA: /* MCA pin */
  16.429 -				{
  16.430 -					polarity = default_MCA_polarity(idx);
  16.431 -					break;
  16.432 -				}
  16.433 -				default:
  16.434 -				{
  16.435 -					printk(KERN_WARNING "broken BIOS!!\n");
  16.436 -					polarity = 1;
  16.437 -					break;
  16.438 -				}
  16.439 -			}
  16.440 -			break;
  16.441 -		}
  16.442 -		case 1: /* high active */
  16.443 -		{
  16.444 -			polarity = 0;
  16.445 -			break;
  16.446 -		}
  16.447 -		case 2: /* reserved */
  16.448 -		{
  16.449 -			printk(KERN_WARNING "broken BIOS!!\n");
  16.450 -			polarity = 1;
  16.451 -			break;
  16.452 -		}
  16.453 -		case 3: /* low active */
  16.454 -		{
  16.455 -			polarity = 1;
  16.456 -			break;
  16.457 -		}
  16.458 -		default: /* invalid */
  16.459 -		{
  16.460 -			printk(KERN_WARNING "broken BIOS!!\n");
  16.461 -			polarity = 1;
  16.462 -			break;
  16.463 -		}
  16.464 -	}
  16.465 -	return polarity;
  16.466 -}
  16.467 -
  16.468 -static int __init MPBIOS_trigger(int idx)
  16.469 -{
  16.470 -	int bus = mp_irqs[idx].mpc_srcbus;
  16.471 -	int trigger;
  16.472 -
  16.473 -	/*
  16.474 -	 * Determine IRQ trigger mode (edge or level sensitive):
  16.475 -	 */
  16.476 -	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
  16.477 -	{
  16.478 -		case 0: /* conforms, ie. bus-type dependent */
  16.479 -		{
  16.480 -			switch (mp_bus_id_to_type[bus])
  16.481 -			{
  16.482 -				case MP_BUS_ISA: /* ISA pin */
  16.483 -				{
  16.484 -					trigger = default_ISA_trigger(idx);
  16.485 -					break;
  16.486 -				}
  16.487 -				case MP_BUS_EISA: /* EISA pin */
  16.488 -				{
  16.489 -					trigger = default_EISA_trigger(idx);
  16.490 -					break;
  16.491 -				}
  16.492 -				case MP_BUS_PCI: /* PCI pin */
  16.493 -				{
  16.494 -					trigger = default_PCI_trigger(idx);
  16.495 -					break;
  16.496 -				}
  16.497 -				case MP_BUS_MCA: /* MCA pin */
  16.498 -				{
  16.499 -					trigger = default_MCA_trigger(idx);
  16.500 -					break;
  16.501 -				}
  16.502 -				default:
  16.503 -				{
  16.504 -					printk(KERN_WARNING "broken BIOS!!\n");
  16.505 -					trigger = 1;
  16.506 -					break;
  16.507 -				}
  16.508 -			}
  16.509 -			break;
  16.510 -		}
  16.511 -		case 1: /* edge */
  16.512 -		{
  16.513 -			trigger = 0;
  16.514 -			break;
  16.515 -		}
  16.516 -		case 2: /* reserved */
  16.517 -		{
  16.518 -			printk(KERN_WARNING "broken BIOS!!\n");
  16.519 -			trigger = 1;
  16.520 -			break;
  16.521 -		}
  16.522 -		case 3: /* level */
  16.523 -		{
  16.524 -			trigger = 1;
  16.525 -			break;
  16.526 -		}
  16.527 -		default: /* invalid */
  16.528 -		{
  16.529 -			printk(KERN_WARNING "broken BIOS!!\n");
  16.530 -			trigger = 0;
  16.531 -			break;
  16.532 -		}
  16.533 -	}
  16.534 -	return trigger;
  16.535 -}
  16.536 -
  16.537 -static inline int irq_polarity(int idx)
  16.538 -{
  16.539 -	return MPBIOS_polarity(idx);
  16.540 -}
  16.541 -
  16.542 -static inline int irq_trigger(int idx)
  16.543 -{
  16.544 -	return MPBIOS_trigger(idx);
  16.545 -}
  16.546 -
  16.547 -static int pin_2_irq(int idx, int apic, int pin)
  16.548 -{
  16.549 -	int irq, i;
  16.550 -	int bus = mp_irqs[idx].mpc_srcbus;
  16.551 -
  16.552 -	/*
  16.553 -	 * Debugging check, we are in big trouble if this message pops up!
  16.554 -	 */
  16.555 -	if (mp_irqs[idx].mpc_dstirq != pin)
  16.556 -		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
  16.557 -
  16.558 -	switch (mp_bus_id_to_type[bus])
  16.559 -	{
  16.560 -		case MP_BUS_ISA: /* ISA pin */
  16.561 -		case MP_BUS_EISA:
  16.562 -		case MP_BUS_MCA:
  16.563 -		{
  16.564 -			irq = mp_irqs[idx].mpc_srcbusirq;
  16.565 -			break;
  16.566 -		}
  16.567 -		case MP_BUS_PCI: /* PCI pin */
  16.568 -		{
  16.569 -			/*
  16.570 -			 * PCI IRQs are mapped in order
  16.571 -			 */
  16.572 -			i = irq = 0;
  16.573 -			while (i < apic)
  16.574 -				irq += nr_ioapic_registers[i++];
  16.575 -			irq += pin;
  16.576 -			break;
  16.577 -		}
  16.578 -		default:
  16.579 -		{
  16.580 -			printk(KERN_ERR "unknown bus type %d.\n",bus); 
  16.581 -			irq = 0;
  16.582 -			break;
  16.583 -		}
  16.584 -	}
  16.585 -
  16.586 -	/*
  16.587 -	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
  16.588 -	 */
  16.589 -	if ((pin >= 16) && (pin <= 23)) {
  16.590 -		if (pirq_entries[pin-16] != -1) {
  16.591 -			if (!pirq_entries[pin-16]) {
  16.592 -				printk(KERN_DEBUG "disabling PIRQ%d\n", pin-16);
  16.593 -			} else {
  16.594 -				irq = pirq_entries[pin-16];
  16.595 -				printk(KERN_DEBUG "using PIRQ%d -> IRQ %d\n",
  16.596 -						pin-16, irq);
  16.597 -			}
  16.598 -		}
  16.599 -	}
  16.600 -	return irq;
  16.601 -}
  16.602 -
  16.603 -static inline int IO_APIC_irq_trigger(int irq)
  16.604 -{
  16.605 -	int apic, idx, pin;
  16.606 -
  16.607 -	for (apic = 0; apic < nr_ioapics; apic++) {
  16.608 -		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
  16.609 -			idx = find_irq_entry(apic,pin,mp_INT);
  16.610 -			if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
  16.611 -				return irq_trigger(idx);
  16.612 -		}
  16.613 -	}
  16.614 -	/*
  16.615 -	 * nonexistent IRQs are edge default
  16.616 -	 */
  16.617 -	return 0;
  16.618 -}
  16.619 -
  16.620 -int irq_vector[NR_IRQS] = { FIRST_DEVICE_VECTOR , 0 };
  16.621 -
  16.622 -static int __init assign_irq_vector(int irq)
  16.623 -{
  16.624 -	static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
  16.625 -	if (IO_APIC_VECTOR(irq) > 0)
  16.626 -		return IO_APIC_VECTOR(irq);
  16.627 -next:
  16.628 -	current_vector += 8;
  16.629 -
  16.630 -        /* XXX Skip the guestOS -> Xen syscall vector! XXX */
  16.631 -	if (current_vector == HYPERVISOR_CALL_VECTOR) goto next;
  16.632 -        /* XXX Skip the Linux/BSD fast-trap vector! XXX */
  16.633 -        if (current_vector == 0x80) goto next;
  16.634 -
  16.635 -	if (current_vector > FIRST_SYSTEM_VECTOR) {
  16.636 -		offset++;
  16.637 -		current_vector = FIRST_DEVICE_VECTOR + offset;
  16.638 -	}
  16.639 -
  16.640 -	if (current_vector == FIRST_SYSTEM_VECTOR)
  16.641 -		panic("ran out of interrupt sources!");
  16.642 -
  16.643 -	IO_APIC_VECTOR(irq) = current_vector;
  16.644 -	return current_vector;
  16.645 -}
  16.646 -
  16.647 -extern void (*interrupt[NR_IRQS])(void);
  16.648 -
  16.649 -/*
  16.650 - * Level and edge triggered IO-APIC interrupts need different handling,
  16.651 - * so we use two separate IRQ descriptors. Edge triggered IRQs can be
  16.652 - * handled with the level-triggered descriptor, but that one has slightly
  16.653 - * more overhead. Level-triggered interrupts cannot be handled with the
  16.654 - * edge-triggered handler, without risking IRQ storms and other ugly
  16.655 - * races.
  16.656 - */
  16.657 -
  16.658 -static unsigned int startup_edge_ioapic_irq(unsigned int irq);
  16.659 -#define shutdown_edge_ioapic_irq  disable_edge_ioapic_irq
  16.660 -#define enable_edge_ioapic_irq    unmask_IO_APIC_irq
  16.661 -static void disable_edge_ioapic_irq (unsigned int irq);
  16.662 -static void ack_edge_ioapic_irq(unsigned int irq);
  16.663 -static void end_edge_ioapic_irq (unsigned int i);
  16.664 -static struct hw_interrupt_type ioapic_edge_irq_type = {
  16.665 -	"IO-APIC-edge",
  16.666 -	startup_edge_ioapic_irq,
  16.667 -	shutdown_edge_ioapic_irq,
  16.668 -	enable_edge_ioapic_irq,
  16.669 -	disable_edge_ioapic_irq,
  16.670 -	ack_edge_ioapic_irq,
  16.671 -	end_edge_ioapic_irq,
  16.672 -	set_ioapic_affinity,
  16.673 -};
  16.674 -
  16.675 -static unsigned int startup_level_ioapic_irq (unsigned int irq);
  16.676 -#define shutdown_level_ioapic_irq mask_IO_APIC_irq
  16.677 -#define enable_level_ioapic_irq   unmask_IO_APIC_irq
  16.678 -#define disable_level_ioapic_irq  mask_IO_APIC_irq
  16.679 -static void mask_and_ack_level_ioapic_irq (unsigned int irq);
  16.680 -static void end_level_ioapic_irq (unsigned int irq);
  16.681 -static struct hw_interrupt_type ioapic_level_irq_type = {
  16.682 -	"IO-APIC-level",
  16.683 -	startup_level_ioapic_irq,
  16.684 -	shutdown_level_ioapic_irq,
  16.685 -	enable_level_ioapic_irq,
  16.686 -	disable_level_ioapic_irq,
  16.687 -	mask_and_ack_level_ioapic_irq,
  16.688 -	end_level_ioapic_irq,
  16.689 -	set_ioapic_affinity,
  16.690 -};
  16.691 -
  16.692 -void __init setup_IO_APIC_irqs(void)
  16.693 -{
  16.694 -	struct IO_APIC_route_entry entry;
  16.695 -	int apic, pin, idx, irq, first_notcon = 1, vector;
  16.696 -	unsigned long flags;
  16.697 -
  16.698 -	printk(KERN_DEBUG "init IO_APIC IRQs\n");
  16.699 -
  16.700 -	for (apic = 0; apic < nr_ioapics; apic++) {
  16.701 -	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
  16.702 -
  16.703 -		/*
  16.704 -		 * add it to the IO-APIC irq-routing table:
  16.705 -		 */
  16.706 -		memset(&entry,0,sizeof(entry));
  16.707 -
  16.708 -		entry.delivery_mode = INT_DELIVERY_MODE;
  16.709 -		entry.dest_mode = (INT_DEST_ADDR_MODE != 0);
  16.710 -		entry.mask = 0;				/* enable IRQ */
  16.711 -		entry.dest.logical.logical_dest = target_cpus();
  16.712 -
  16.713 -		idx = find_irq_entry(apic,pin,mp_INT);
  16.714 -		if (idx == -1) {
  16.715 -			if (first_notcon) {
  16.716 -				printk(KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
  16.717 -				first_notcon = 0;
  16.718 -			} else
  16.719 -				printk(", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
  16.720 -			continue;
  16.721 -		}
  16.722 -
  16.723 -		entry.trigger = irq_trigger(idx);
  16.724 -		entry.polarity = irq_polarity(idx);
  16.725 -
  16.726 -		if (irq_trigger(idx)) {
  16.727 -			entry.trigger = 1;
  16.728 -			entry.mask = 1;
  16.729 -		}
  16.730 -
  16.731 -		irq = pin_2_irq(idx, apic, pin);
  16.732 -		/*
  16.733 -		 * skip adding the timer int on secondary nodes, which causes
  16.734 -		 * a small but painful rift in the time-space continuum
  16.735 -		 */
  16.736 -		if ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ) 
  16.737 -			&& (apic != 0) && (irq == 0))
  16.738 -			continue;
  16.739 -		else
  16.740 -			add_pin_to_irq(irq, apic, pin);
  16.741 -
  16.742 -		if (!apic && !IO_APIC_IRQ(irq))
  16.743 -			continue;
  16.744 -
  16.745 -		if (IO_APIC_IRQ(irq)) {
  16.746 -			vector = assign_irq_vector(irq);
  16.747 -			entry.vector = vector;
  16.748 -
  16.749 -			if (IO_APIC_irq_trigger(irq))
  16.750 -				irq_desc[irq].handler = &ioapic_level_irq_type;
  16.751 -			else
  16.752 -				irq_desc[irq].handler = &ioapic_edge_irq_type;
  16.753 -
  16.754 -			set_intr_gate(vector, interrupt[irq]);
  16.755 -		
  16.756 -			if (!apic && (irq < 16))
  16.757 -				disable_8259A_irq(irq);
  16.758 -		}
  16.759 -		spin_lock_irqsave(&ioapic_lock, flags);
  16.760 -		io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
  16.761 -		io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
  16.762 -		spin_unlock_irqrestore(&ioapic_lock, flags);
  16.763 -	}
  16.764 -	}
  16.765 -
  16.766 -	if (!first_notcon)
  16.767 -		printk(" not connected.\n");
  16.768 -}
  16.769 -
  16.770 -/*
  16.771 - * Set up the 8259A-master output pin as broadcast to all
  16.772 - * CPUs.
  16.773 - */
  16.774 -void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
  16.775 -{
  16.776 -	struct IO_APIC_route_entry entry;
  16.777 -	unsigned long flags;
  16.778 -
  16.779 -	memset(&entry,0,sizeof(entry));
  16.780 -
  16.781 -	disable_8259A_irq(0);
  16.782 -
  16.783 -	/* mask LVT0 */
  16.784 -	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
  16.785 -
  16.786 -	/*
  16.787 -	 * We use logical delivery to get the timer IRQ
  16.788 -	 * to the first CPU.
  16.789 -	 */
  16.790 -	entry.dest_mode = (INT_DEST_ADDR_MODE != 0);
  16.791 -	entry.mask = 0;					/* unmask IRQ now */
  16.792 -	entry.dest.logical.logical_dest = target_cpus();
  16.793 -	entry.delivery_mode = INT_DELIVERY_MODE;
  16.794 -	entry.polarity = 0;
  16.795 -	entry.trigger = 0;
  16.796 -	entry.vector = vector;
  16.797 -
  16.798 -	/*
  16.799 -	 * The timer IRQ doesn't have to know that behind the
  16.800 -	 * scene we have a 8259A-master in AEOI mode ...
  16.801 -	 */
  16.802 -	irq_desc[0].handler = &ioapic_edge_irq_type;
  16.803 -
  16.804 -	/*
  16.805 -	 * Add it to the IO-APIC irq-routing table:
  16.806 -	 */
  16.807 -	spin_lock_irqsave(&ioapic_lock, flags);
  16.808 -	io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
  16.809 -	io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
  16.810 -	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.811 -
  16.812 -	enable_8259A_irq(0);
  16.813 -}
  16.814 -
  16.815 -void __init UNEXPECTED_IO_APIC(void)
  16.816 -{
  16.817 -	printk(KERN_WARNING 
  16.818 -		"An unexpected IO-APIC was found. If this kernel release is less than\n"
  16.819 -		"three months old please report this to linux-smp@vger.kernel.org\n");
  16.820 -}
  16.821 -
  16.822 -void __init print_IO_APIC(void)
  16.823 -{
  16.824 -#ifndef NDEBUG
  16.825 -	int apic, i;
  16.826 -	struct IO_APIC_reg_00 reg_00;
  16.827 -	struct IO_APIC_reg_01 reg_01;
  16.828 -	struct IO_APIC_reg_02 reg_02;
  16.829 -	struct IO_APIC_reg_03 reg_03;
  16.830 -	unsigned long flags;
  16.831 -
  16.832 - 	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
  16.833 -	for (i = 0; i < nr_ioapics; i++)
  16.834 -		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
  16.835 -		       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
  16.836 -
  16.837 -	/*
  16.838 -	 * We are a bit conservative about what we expect.  We have to
  16.839 -	 * know about every hardware change ASAP.
  16.840 -	 */
  16.841 -	printk(KERN_INFO "testing the IO APIC.......................\n");
  16.842 -
  16.843 -	for (apic = 0; apic < nr_ioapics; apic++) {
  16.844 -
  16.845 -	spin_lock_irqsave(&ioapic_lock, flags);
  16.846 -	*(int *)&reg_00 = io_apic_read(apic, 0);
  16.847 -	*(int *)&reg_01 = io_apic_read(apic, 1);
  16.848 -	if (reg_01.version >= 0x10)
  16.849 -		*(int *)&reg_02 = io_apic_read(apic, 2);
  16.850 -	if (reg_01.version >= 0x20)
  16.851 -		*(int *)&reg_03 = io_apic_read(apic, 3);
  16.852 -	spin_unlock_irqrestore(&ioapic_lock, flags);
  16.853 -
  16.854 -	printk("\n");
  16.855 -	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
  16.856 -	printk(KERN_DEBUG ".... register #00: %08X\n", *(int *)&reg_00);
  16.857 -	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.ID);
  16.858 -	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.delivery_type);
  16.859 -	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.LTS);
  16.860 -	if (reg_00.__reserved_0 || reg_00.__reserved_1 || reg_00.__reserved_2)
  16.861 -		UNEXPECTED_IO_APIC();
  16.862 -
  16.863 -	printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
  16.864 -	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.entries);
  16.865 -	if (	(reg_01.entries != 0x0f) && /* older (Neptune) boards */
  16.866 -		(reg_01.entries != 0x17) && /* typical ISA+PCI boards */
  16.867 -		(reg_01.entries != 0x1b) && /* Compaq Proliant boards */
  16.868 -		(reg_01.entries != 0x1f) && /* dual Xeon boards */
  16.869 -		(reg_01.entries != 0x22) && /* bigger Xeon boards */
  16.870 -		(reg_01.entries != 0x2E) &&
  16.871 -		(reg_01.entries != 0x3F)
  16.872 -	)
  16.873 -		UNEXPECTED_IO_APIC();
  16.874 -
  16.875 -	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.PRQ);
  16.876 -	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.version);
  16.877 -	if (	(reg_01.version != 0x01) && /* 82489DX IO-APICs */
  16.878 -		(reg_01.version != 0x02) && /* VIA */
  16.879 -		(reg_01.version != 0x03) && /* later VIA */
  16.880 -		(reg_01.version != 0x10) && /* oldest IO-APICs */
  16.881 -		(reg_01.version != 0x11) && /* Pentium/Pro IO-APICs */
  16.882 -		(reg_01.version != 0x13) && /* Xeon IO-APICs */
  16.883 -		(reg_01.version != 0x20)    /* Intel P64H (82806 AA) */
  16.884 -	)
  16.885 -		UNEXPECTED_IO_APIC();
  16.886 -	if (reg_01.__reserved_1 || reg_01.__reserved_2)
  16.887 -		UNEXPECTED_IO_APIC();
  16.888 -
  16.889 -	/*
  16.890 -	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
  16.891 -	 * but the value of reg_02 is read as the previous read register
  16.892 -	 * value, so ignore it if reg_02 == reg_01.
  16.893 -	 */
  16.894 -	if (reg_01.version >= 0x10 && *(int *)&reg_02 != *(int *)&reg_01) {
  16.895 -		printk(KERN_DEBUG ".... register #02: %08X\n", *(int *)&reg_02);
  16.896 -		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.arbitration);
  16.897 -		if (reg_02.__reserved_1 || reg_02.__reserved_2)
  16.898 -			UNEXPECTED_IO_APIC();
  16.899 -	}
  16.900 -
  16.901 -	/*
  16.902 -	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
  16.903 -	 * or reg_03, but the value of reg_0[23] is read as the previous read
  16.904 -	 * register value, so ignore it if reg_03 == reg_0[12].
  16.905 -	 */
  16.906 -	if (reg_01.version >= 0x20 && *(int *)&reg_03 != *(int *)&reg_02 &&
  16.907 -	    *(int *)&reg_03 != *(int *)&reg_01) {
  16.908 -		printk(KERN_DEBUG ".... register #03: %08X\n", *(int *)&reg_03);
  16.909 -		printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.boot_DT);
  16.910 -		if (reg_03.__reserved_1)
  16.911 -			UNEXPECTED_IO_APIC();
  16.912 -	}
  16.913 -
  16.914 -	printk(KERN_DEBUG ".... IRQ redirection table:\n");
  16.915 -
  16.916 -	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
  16.917 -			  " Stat Dest Deli Vect:   \n");
  16.918 -
  16.919 -	for (i = 0; i <= reg_01.entries; i++) {
  16.920 -		struct IO_APIC_route_entry entry;
  16.921 -
  16.922 -		spin_lock_irqsave(&ioapic_lock, flags);
  16.923 -		*(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
  16.924 -		*(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
  16.925 -		spin_unlock_irqrestore(&ioapic_lock, flags);
  16.926 -
  16.927 -		printk(KERN_DEBUG " %02x %03X %02X  ",
  16.928 -			i,
  16.929 -			entry.dest.logical.logical_dest,
  16.930 -			entry.dest.physical.physical_dest
  16.931 -		);
  16.932 -
  16.933 -		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
  16.934 -			entry.mask,
  16.935 -			entry.trigger,
  16.936 -			entry.irr,
  16.937 -			entry.polarity,
  16.938 -			entry.delivery_status,
  16.939 -			entry.dest_mode,
  16.940 -			entry.delivery_mode,
  16.941 -			entry.vector
  16.942 -		);
  16.943 -	}
  16.944 -	}
  16.945 -	printk(KERN_DEBUG "IRQ to pin mappings:\n");
  16.946 -	for (i = 0; i < NR_IRQS; i++) {
  16.947 -		struct irq_pin_list *entry = irq_2_pin + i;
  16.948 -		if (entry->pin < 0)
  16.949 -			continue;
  16.950 -		printk(KERN_DEBUG "IRQ%d ", i);
  16.951 -		for (;;) {
  16.952 -			printk("-> %d:%d", entry->apic, entry->pin);
  16.953 -			if (!entry->next)
  16.954 -				break;
  16.955 -			entry = irq_2_pin + entry->next;
  16.956 -		}
  16.957 -		printk("\n");
  16.958 -	}
  16.959 -
  16.960 -	printk(KERN_INFO ".................................... done.\n");
  16.961 -#endif
  16.962 -}
  16.963 -
  16.964 -
  16.965 -#if 0 /* Maybe useful for debugging, but not currently used anywhere. */
  16.966 -
  16.967 -static void print_APIC_bitfield (int base)
  16.968 -{
  16.969 -	unsigned int v;
  16.970 -	int i, j;
  16.971 -
  16.972 -	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
  16.973 -	for (i = 0; i < 8; i++) {
  16.974 -		v = apic_read(base + i*0x10);
  16.975 -		for (j = 0; j < 32; j++) {
  16.976 -			if (v & (1<<j))
  16.977 -				printk("1");
  16.978 -			else
  16.979 -				printk("0");
  16.980 -		}
  16.981 -		printk("\n");
  16.982 -	}
  16.983 -}
  16.984 -
  16.985 -
  16.986 -void /*__init*/ print_local_APIC(void * dummy)
  16.987 -{
  16.988 -	unsigned int v, ver, maxlvt;
  16.989 -
  16.990 -	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
  16.991 -		smp_processor_id(), hard_smp_processor_id());
  16.992 -	v = apic_read(APIC_ID);
  16.993 -	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
  16.994 -	v = apic_read(APIC_LVR);
  16.995 -	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
  16.996 -	ver = GET_APIC_VERSION(v);
  16.997 -	maxlvt = get_maxlvt();
  16.998 -
  16.999 -	v = apic_read(APIC_TASKPRI);
 16.1000 -	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
 16.1001 -
 16.1002 -	if (APIC_INTEGRATED(ver)) {			/* !82489DX */
 16.1003 -		v = apic_read(APIC_ARBPRI);
 16.1004 -		printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
 16.1005 -			v & APIC_ARBPRI_MASK);
 16.1006 -		v = apic_read(APIC_PROCPRI);
 16.1007 -		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
 16.1008 -	}
 16.1009 -
 16.1010 -	v = apic_read(APIC_EOI);
 16.1011 -	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
 16.1012 -	v = apic_read(APIC_RRR);
 16.1013 -	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
 16.1014 -	v = apic_read(APIC_LDR);
 16.1015 -	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
 16.1016 -	v = apic_read(APIC_DFR);
 16.1017 -	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
 16.1018 -	v = apic_read(APIC_SPIV);
 16.1019 -	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
 16.1020 -
 16.1021 -	printk(KERN_DEBUG "... APIC ISR field:\n");
 16.1022 -	print_APIC_bitfield(APIC_ISR);
 16.1023 -	printk(KERN_DEBUG "... APIC TMR field:\n");
 16.1024 -	print_APIC_bitfield(APIC_TMR);
 16.1025 -	printk(KERN_DEBUG "... APIC IRR field:\n");
 16.1026 -	print_APIC_bitfield(APIC_IRR);
 16.1027 -
 16.1028 -	if (APIC_INTEGRATED(ver)) {		/* !82489DX */
 16.1029 -		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
 16.1030 -			apic_write(APIC_ESR, 0);
 16.1031 -		v = apic_read(APIC_ESR);
 16.1032 -		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
 16.1033 -	}
 16.1034 -
 16.1035 -	v = apic_read(APIC_ICR);
 16.1036 -	printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
 16.1037 -	v = apic_read(APIC_ICR2);
 16.1038 -	printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
 16.1039 -
 16.1040 -	v = apic_read(APIC_LVTT);
 16.1041 -	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
 16.1042 -
 16.1043 -	if (maxlvt > 3) {                       /* PC is LVT#4. */
 16.1044 -		v = apic_read(APIC_LVTPC);
 16.1045 -		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
 16.1046 -	}
 16.1047 -	v = apic_read(APIC_LVT0);
 16.1048 -	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
 16.1049 -	v = apic_read(APIC_LVT1);
 16.1050 -	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
 16.1051 -
 16.1052 -	if (maxlvt > 2) {			/* ERR is LVT#3. */
 16.1053 -		v = apic_read(APIC_LVTERR);
 16.1054 -		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
 16.1055 -	}
 16.1056 -
 16.1057 -	v = apic_read(APIC_TMICT);
 16.1058 -	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
 16.1059 -	v = apic_read(APIC_TMCCT);
 16.1060 -	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
 16.1061 -	v = apic_read(APIC_TDCR);
 16.1062 -	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
 16.1063 -	printk("\n");
 16.1064 -}
 16.1065 -
 16.1066 -void print_all_local_APICs (void)
 16.1067 -{
 16.1068 -	smp_call_function(print_local_APIC, NULL, 1, 1);
 16.1069 -	print_local_APIC(NULL);
 16.1070 -}
 16.1071 -
 16.1072 -void /*__init*/ print_PIC(void)
 16.1073 -{
 16.1074 -	extern spinlock_t i8259A_lock;
 16.1075 -	unsigned int v, flags;
 16.1076 -
 16.1077 -	printk(KERN_DEBUG "\nprinting PIC contents\n");
 16.1078 -
 16.1079 -	spin_lock_irqsave(&i8259A_lock, flags);
 16.1080 -
 16.1081 -	v = inb(0xa1) << 8 | inb(0x21);
 16.1082 -	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
 16.1083 -
 16.1084 -	v = inb(0xa0) << 8 | inb(0x20);
 16.1085 -	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
 16.1086 -
 16.1087 -	outb(0x0b,0xa0);
 16.1088 -	outb(0x0b,0x20);
 16.1089 -	v = inb(0xa0) << 8 | inb(0x20);
 16.1090 -	outb(0x0a,0xa0);
 16.1091 -	outb(0x0a,0x20);
 16.1092 -
 16.1093 -	spin_unlock_irqrestore(&i8259A_lock, flags);
 16.1094 -
 16.1095 -	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
 16.1096 -
 16.1097 -	v = inb(0x4d1) << 8 | inb(0x4d0);
 16.1098 -	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
 16.1099 -}
 16.1100 -
 16.1101 -#endif /* 0 */
 16.1102 -
 16.1103 -
 16.1104 -static void __init enable_IO_APIC(void)
 16.1105 -{
 16.1106 -	struct IO_APIC_reg_01 reg_01;
 16.1107 -	int i;
 16.1108 -	unsigned long flags;
 16.1109 -
 16.1110 -	for (i = 0; i < PIN_MAP_SIZE; i++) {
 16.1111 -		irq_2_pin[i].pin = -1;
 16.1112 -		irq_2_pin[i].next = 0;
 16.1113 -	}
 16.1114 -	if (!pirqs_enabled)
 16.1115 -		for (i = 0; i < MAX_PIRQS; i++)
 16.1116 -			pirq_entries[i] = -1;
 16.1117 -
 16.1118 -	/*
 16.1119 -	 * The number of IO-APIC IRQ registers (== #pins):
 16.1120 -	 */
 16.1121 -	for (i = 0; i < nr_ioapics; i++) {
 16.1122 -		spin_lock_irqsave(&ioapic_lock, flags);
 16.1123 -		*(int *)&reg_01 = io_apic_read(i, 1);
 16.1124 -		spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1125 -		nr_ioapic_registers[i] = reg_01.entries+1;
 16.1126 -	}
 16.1127 -
 16.1128 -	/*
 16.1129 -	 * Do not trust the IO-APIC being empty at bootup
 16.1130 -	 */
 16.1131 -	clear_IO_APIC();
 16.1132 -}
 16.1133 -
 16.1134 -/*
 16.1135 - * Not an __init, needed by the reboot code
 16.1136 - */
 16.1137 -void disable_IO_APIC(void)
 16.1138 -{
 16.1139 -	/*
 16.1140 -	 * Clear the IO-APIC before rebooting:
 16.1141 -	 */
 16.1142 -	clear_IO_APIC();
 16.1143 -
 16.1144 -	disconnect_bsp_APIC();
 16.1145 -}
 16.1146 -
 16.1147 -/*
 16.1148 - * function to set the IO-APIC physical IDs based on the
 16.1149 - * values stored in the MPC table.
 16.1150 - *
 16.1151 - * by Matt Domsch <Matt_Domsch@dell.com>  Tue Dec 21 12:25:05 CST 1999
 16.1152 - */
 16.1153 -
 16.1154 -static void __init setup_ioapic_ids_from_mpc (void)
 16.1155 -{
 16.1156 -	struct IO_APIC_reg_00 reg_00;
 16.1157 -	unsigned long phys_id_present_map = phys_cpu_present_map;
 16.1158 -	int apic;
 16.1159 -	int i;
 16.1160 -	unsigned char old_id;
 16.1161 -	unsigned long flags;
 16.1162 -
 16.1163 -	if (clustered_apic_mode)
 16.1164 -		/* We don't have a good way to do this yet - hack */
 16.1165 -		phys_id_present_map = (u_long) 0xf;
 16.1166 -	/*
 16.1167 -	 * Set the IOAPIC ID to the value stored in the MPC table.
 16.1168 -	 */
 16.1169 -	for (apic = 0; apic < nr_ioapics; apic++) {
 16.1170 -
 16.1171 -		/* Read the register 0 value */
 16.1172 -		spin_lock_irqsave(&ioapic_lock, flags);
 16.1173 -		*(int *)&reg_00 = io_apic_read(apic, 0);
 16.1174 -		spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1175 -		
 16.1176 -		old_id = mp_ioapics[apic].mpc_apicid;
 16.1177 -
 16.1178 -		if (mp_ioapics[apic].mpc_apicid >= apic_broadcast_id) {
 16.1179 -			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
 16.1180 -				apic, mp_ioapics[apic].mpc_apicid);
 16.1181 -			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
 16.1182 -				reg_00.ID);
 16.1183 -			mp_ioapics[apic].mpc_apicid = reg_00.ID;
 16.1184 -		}
 16.1185 -
 16.1186 -		/*
 16.1187 -		 * Sanity check, is the ID really free? Every APIC in a
 16.1188 -		 * system must have a unique ID or we get lots of nice
 16.1189 -		 * 'stuck on smp_invalidate_needed IPI wait' messages.
 16.1190 -		 * I/O APIC IDs no longer have any meaning for xAPICs and SAPICs.
 16.1191 -		 */
 16.1192 -		if ((clustered_apic_mode != CLUSTERED_APIC_XAPIC) &&
 16.1193 -		    (phys_id_present_map & (1 << mp_ioapics[apic].mpc_apicid))) {
 16.1194 -			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
 16.1195 -				apic, mp_ioapics[apic].mpc_apicid);
 16.1196 -			for (i = 0; i < 0xf; i++)
 16.1197 -				if (!(phys_id_present_map & (1 << i)))
 16.1198 -					break;
 16.1199 -			if (i >= apic_broadcast_id)
 16.1200 -				panic("Max APIC ID exceeded!\n");
 16.1201 -			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
 16.1202 -				i);
 16.1203 -			phys_id_present_map |= 1 << i;
 16.1204 -			mp_ioapics[apic].mpc_apicid = i;
 16.1205 -		} else {
 16.1206 -			printk("Setting %d in the phys_id_present_map\n", mp_ioapics[apic].mpc_apicid);
 16.1207 -			phys_id_present_map |= 1 << mp_ioapics[apic].mpc_apicid;
 16.1208 -		}
 16.1209 -
 16.1210 -
 16.1211 -		/*
 16.1212 -		 * We need to adjust the IRQ routing table
 16.1213 -		 * if the ID changed.
 16.1214 -		 */
 16.1215 -		if (old_id != mp_ioapics[apic].mpc_apicid)
 16.1216 -			for (i = 0; i < mp_irq_entries; i++)
 16.1217 -				if (mp_irqs[i].mpc_dstapic == old_id)
 16.1218 -					mp_irqs[i].mpc_dstapic
 16.1219 -						= mp_ioapics[apic].mpc_apicid;
 16.1220 -
 16.1221 -		/*
 16.1222 -		 * Read the right value from the MPC table and
 16.1223 -		 * write it into the ID register.
 16.1224 -	 	 */
 16.1225 -		printk(KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
 16.1226 -					mp_ioapics[apic].mpc_apicid);
 16.1227 -
 16.1228 -		reg_00.ID = mp_ioapics[apic].mpc_apicid;
 16.1229 -		spin_lock_irqsave(&ioapic_lock, flags);
 16.1230 -		io_apic_write(apic, 0, *(int *)&reg_00);
 16.1231 -		spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1232 -
 16.1233 -		/*
 16.1234 -		 * Sanity check
 16.1235 -		 */
 16.1236 -		spin_lock_irqsave(&ioapic_lock, flags);
 16.1237 -		*(int *)&reg_00 = io_apic_read(apic, 0);
 16.1238 -		spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1239 -		if (reg_00.ID != mp_ioapics[apic].mpc_apicid)
 16.1240 -			panic("could not set ID!\n");
 16.1241 -		else
 16.1242 -			printk(" ok.\n");
 16.1243 -	}
 16.1244 -}
 16.1245 -
 16.1246 -/*
 16.1247 - * There is a nasty bug in some older SMP boards, their mptable lies
 16.1248 - * about the timer IRQ. We do the following to work around the situation:
 16.1249 - *
 16.1250 - *	- timer IRQ defaults to IO-APIC IRQ
 16.1251 - *	- if this function detects that timer IRQs are defunct, then we fall
 16.1252 - *	  back to ISA timer IRQs
 16.1253 - */
 16.1254 -static int __init timer_irq_works(void)
 16.1255 -{
 16.1256 -	unsigned int t1 = jiffies;
 16.1257 -
 16.1258 -	sti();
 16.1259 -	/* Let ten ticks pass... */
 16.1260 -	mdelay((10 * 1000) / HZ);
 16.1261 -
 16.1262 -	/*
 16.1263 -	 * Expect a few ticks at least, to be sure some possible
 16.1264 -	 * glue logic does not lock up after one or two first
 16.1265 -	 * ticks in a non-ExtINT mode.  Also the local APIC
 16.1266 -	 * might have cached one ExtINT interrupt.  Finally, at
 16.1267 -	 * least one tick may be lost due to delays.
 16.1268 -	 */
 16.1269 -	if (jiffies - t1 > 4)
 16.1270 -		return 1;
 16.1271 -
 16.1272 -	return 0;
 16.1273 -}
 16.1274 -
 16.1275 -static void disable_edge_ioapic_irq (unsigned int irq) { /* nothing */ }
 16.1276 -
 16.1277 -/*
 16.1278 - * Starting up a edge-triggered IO-APIC interrupt is
 16.1279 - * nasty - we need to make sure that we get the edge.
 16.1280 - * If it is already asserted for some reason, we need
 16.1281 - * return 1 to indicate that is was pending.
 16.1282 - *
 16.1283 - * This is not complete - we should be able to fake
 16.1284 - * an edge even if it isn't on the 8259A...
 16.1285 - */
 16.1286 -
 16.1287 -static unsigned int startup_edge_ioapic_irq(unsigned int irq)
 16.1288 -{
 16.1289 -	int was_pending = 0;
 16.1290 -	unsigned long flags;
 16.1291 -
 16.1292 -	spin_lock_irqsave(&ioapic_lock, flags);
 16.1293 -	if (irq < 16) {
 16.1294 -		disable_8259A_irq(irq);
 16.1295 -		if (i8259A_irq_pending(irq))
 16.1296 -			was_pending = 1;
 16.1297 -	}
 16.1298 -	__unmask_IO_APIC_irq(irq);
 16.1299 -	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1300 -
 16.1301 -	return was_pending;
 16.1302 -}
 16.1303 -
 16.1304 -/*
 16.1305 - * Once we have recorded IRQ_PENDING already, we can mask the
 16.1306 - * interrupt for real. This prevents IRQ storms from unhandled
 16.1307 - * devices.
 16.1308 - */
 16.1309 -static void ack_edge_ioapic_irq(unsigned int irq)
 16.1310 -{
 16.1311 -	balance_irq(irq);
 16.1312 -	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
 16.1313 -					== (IRQ_PENDING | IRQ_DISABLED))
 16.1314 -		mask_IO_APIC_irq(irq);
 16.1315 -	ack_APIC_irq();
 16.1316 -}
 16.1317 -
 16.1318 -static void end_edge_ioapic_irq (unsigned int i) { /* nothing */ }
 16.1319 -
 16.1320 -
 16.1321 -/*
 16.1322 - * Level triggered interrupts can just be masked,
 16.1323 - * and shutting down and starting up the interrupt
 16.1324 - * is the same as enabling and disabling them -- except
 16.1325 - * with a startup need to return a "was pending" value.
 16.1326 - *
 16.1327 - * Level triggered interrupts are special because we
 16.1328 - * do not touch any IO-APIC register while handling
 16.1329 - * them. We ack the APIC in the end-IRQ handler, not
 16.1330 - * in the start-IRQ-handler. Protection against reentrance
 16.1331 - * from the same interrupt is still provided, both by the
 16.1332 - * generic IRQ layer and by the fact that an unacked local
 16.1333 - * APIC does not accept IRQs.
 16.1334 - */
 16.1335 -static unsigned int startup_level_ioapic_irq (unsigned int irq)
 16.1336 -{
 16.1337 -	unmask_IO_APIC_irq(irq);
 16.1338 -
 16.1339 -	return 0; /* don't check for pending */
 16.1340 -}
 16.1341 -
 16.1342 -static void mask_and_ack_level_ioapic_irq(unsigned int irq)
 16.1343 -{
 16.1344 -	unsigned long v;
 16.1345 -	int i;
 16.1346 -
 16.1347 -	balance_irq(irq);
 16.1348 -
 16.1349 -	mask_IO_APIC_irq(irq);
 16.1350 -
 16.1351 -/*
 16.1352 - * It appears there is an erratum which affects at least version 0x11
 16.1353 - * of I/O APIC (that's the 82093AA and cores integrated into various
 16.1354 - * chipsets).  Under certain conditions a level-triggered interrupt is
 16.1355 - * erroneously delivered as edge-triggered one but the respective IRR
 16.1356 - * bit gets set nevertheless.  As a result the I/O unit expects an EOI
 16.1357 - * message but it will never arrive and further interrupts are blocked
 16.1358 - * from the source.  The exact reason is so far unknown, but the
 16.1359 - * phenomenon was observed when two consecutive interrupt requests
 16.1360 - * from a given source get delivered to the same CPU and the source is
 16.1361 - * temporarily disabled in between.
 16.1362 - *
 16.1363 - * A workaround is to simulate an EOI message manually.  We achieve it
 16.1364 - * by setting the trigger mode to edge and then to level when the edge
 16.1365 - * trigger mode gets detected in the TMR of a local APIC for a
 16.1366 - * level-triggered interrupt.  We mask the source for the time of the
 16.1367 - * operation to prevent an edge-triggered interrupt escaping meanwhile.
 16.1368 - * The idea is from Manfred Spraul.  --macro
 16.1369 - */
 16.1370 -	i = IO_APIC_VECTOR(irq);
 16.1371 -	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
 16.1372 -
 16.1373 -	ack_APIC_irq();
 16.1374 -
 16.1375 -	if (!(v & (1 << (i & 0x1f)))) {
 16.1376 -#ifdef APIC_LOCKUP_DEBUG
 16.1377 -		struct irq_pin_list *entry;
 16.1378 -#endif
 16.1379 -
 16.1380 -#ifdef APIC_MISMATCH_DEBUG
 16.1381 -		atomic_inc(&irq_mis_count);
 16.1382 -#endif
 16.1383 -		spin_lock(&ioapic_lock);
 16.1384 -		__edge_IO_APIC_irq(irq);
 16.1385 -#ifdef APIC_LOCKUP_DEBUG
 16.1386 -		for (entry = irq_2_pin + irq;;) {
 16.1387 -			unsigned int reg;
 16.1388 -
 16.1389 -			if (entry->pin == -1)
 16.1390 -				break;
 16.1391 -			reg = io_apic_read(entry->apic, 0x10 + entry->pin * 2);
 16.1392 -			if (reg & 0x00004000)
 16.1393 -				printk(KERN_CRIT "Aieee!!!  Remote IRR"
 16.1394 -					" still set after unlock!\n");
 16.1395 -			if (!entry->next)
 16.1396 -				break;
 16.1397 -			entry = irq_2_pin + entry->next;
 16.1398 -		}
 16.1399 -#endif
 16.1400 -		__level_IO_APIC_irq(irq);
 16.1401 -		spin_unlock(&ioapic_lock);
 16.1402 -	}
 16.1403 -}
 16.1404 -
 16.1405 -static void end_level_ioapic_irq(unsigned int irq)
 16.1406 -{
 16.1407 -	unmask_IO_APIC_irq(irq);
 16.1408 -}
 16.1409 -
 16.1410 -static inline void init_IO_APIC_traps(void)
 16.1411 -{
 16.1412 -	int irq;
 16.1413 -
 16.1414 -	/*
 16.1415 -	 * NOTE! The local APIC isn't very good at handling
 16.1416 -	 * multiple interrupts at the same interrupt level.
 16.1417 -	 * As the interrupt level is determined by taking the
 16.1418 -	 * vector number and shifting that right by 4, we
 16.1419 -	 * want to spread these out a bit so that they don't
 16.1420 -	 * all fall in the same interrupt level.
 16.1421 -	 *
 16.1422 -	 * Also, we've got to be careful not to trash gate
 16.1423 -	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
 16.1424 -	 */
 16.1425 -	for (irq = 0; irq < NR_IRQS ; irq++) {
 16.1426 -		if (IO_APIC_IRQ(irq) && !IO_APIC_VECTOR(irq)) {
 16.1427 -			/*
 16.1428 -			 * Hmm.. We don't have an entry for this,
 16.1429 -			 * so default to an old-fashioned 8259
 16.1430 -			 * interrupt if we can..
 16.1431 -			 */
 16.1432 -			if (irq < 16)
 16.1433 -				make_8259A_irq(irq);
 16.1434 -			else
 16.1435 -				/* Strange. Oh, well.. */
 16.1436 -				irq_desc[irq].handler = &no_irq_type;
 16.1437 -		}
 16.1438 -	}
 16.1439 -}
 16.1440 -
 16.1441 -static void enable_lapic_irq (unsigned int irq)
 16.1442 -{
 16.1443 -	unsigned long v;
 16.1444 -
 16.1445 -	v = apic_read(APIC_LVT0);
 16.1446 -	apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
 16.1447 -}
 16.1448 -
 16.1449 -static void disable_lapic_irq (unsigned int irq)
 16.1450 -{
 16.1451 -	unsigned long v;
 16.1452 -
 16.1453 -	v = apic_read(APIC_LVT0);
 16.1454 -	apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
 16.1455 -}
 16.1456 -
 16.1457 -static void ack_lapic_irq (unsigned int irq)
 16.1458 -{
 16.1459 -	ack_APIC_irq();
 16.1460 -}
 16.1461 -
 16.1462 -static void end_lapic_irq (unsigned int i) { /* nothing */ }
 16.1463 -
 16.1464 -static struct hw_interrupt_type lapic_irq_type = {
 16.1465 -	"local-APIC-edge",
 16.1466 -	NULL, /* startup_irq() not used for IRQ0 */
 16.1467 -	NULL, /* shutdown_irq() not used for IRQ0 */
 16.1468 -	enable_lapic_irq,
 16.1469 -	disable_lapic_irq,
 16.1470 -	ack_lapic_irq,
 16.1471 -	end_lapic_irq
 16.1472 -};
 16.1473 -
 16.1474 -/*
 16.1475 - * This looks a bit hackish but it's about the only one way of sending
 16.1476 - * a few INTA cycles to 8259As and any associated glue logic.  ICR does
 16.1477 - * not support the ExtINT mode, unfortunately.  We need to send these
 16.1478 - * cycles as some i82489DX-based boards have glue logic that keeps the
 16.1479 - * 8259A interrupt line asserted until INTA.  --macro
 16.1480 - */
 16.1481 -static inline void unlock_ExtINT_logic(void)
 16.1482 -{
 16.1483 -	int pin, i;
 16.1484 -	struct IO_APIC_route_entry entry0, entry1;
 16.1485 -	unsigned char save_control, save_freq_select;
 16.1486 -	unsigned long flags;
 16.1487 -
 16.1488 -	pin = find_isa_irq_pin(8, mp_INT);
 16.1489 -	if (pin == -1)
 16.1490 -		return;
 16.1491 -
 16.1492 -	spin_lock_irqsave(&ioapic_lock, flags);
 16.1493 -	*(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin);
 16.1494 -	*(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin);
 16.1495 -	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1496 -	clear_IO_APIC_pin(0, pin);
 16.1497 -
 16.1498 -	memset(&entry1, 0, sizeof(entry1));
 16.1499 -
 16.1500 -	entry1.dest_mode = 0;			/* physical delivery */
 16.1501 -	entry1.mask = 0;			/* unmask IRQ now */
 16.1502 -	entry1.dest.physical.physical_dest = hard_smp_processor_id();
 16.1503 -	entry1.delivery_mode = dest_ExtINT;
 16.1504 -	entry1.polarity = entry0.polarity;
 16.1505 -	entry1.trigger = 0;
 16.1506 -	entry1.vector = 0;
 16.1507 -
 16.1508 -	spin_lock_irqsave(&ioapic_lock, flags);
 16.1509 -	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
 16.1510 -	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
 16.1511 -	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1512 -
 16.1513 -	save_control = CMOS_READ(RTC_CONTROL);
 16.1514 -	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
 16.1515 -	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
 16.1516 -		   RTC_FREQ_SELECT);
 16.1517 -	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
 16.1518 -
 16.1519 -	i = 100;
 16.1520 -	while (i-- > 0) {
 16.1521 -		mdelay(10);
 16.1522 -		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
 16.1523 -			i -= 10;
 16.1524 -	}
 16.1525 -
 16.1526 -	CMOS_WRITE(save_control, RTC_CONTROL);
 16.1527 -	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
 16.1528 -	clear_IO_APIC_pin(0, pin);
 16.1529 -
 16.1530 -	spin_lock_irqsave(&ioapic_lock, flags);
 16.1531 -	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
 16.1532 -	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
 16.1533 -	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1534 -}
 16.1535 -
 16.1536 -/*
 16.1537 - * This code may look a bit paranoid, but it's supposed to cooperate with
 16.1538 - * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
 16.1539 - * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
 16.1540 - * fanatically on his truly buggy board.
 16.1541 - */
 16.1542 -static inline void check_timer(void)
 16.1543 -{
 16.1544 -	extern int timer_ack;
 16.1545 -	int pin1, pin2;
 16.1546 -	int vector;
 16.1547 -
 16.1548 -	/*
 16.1549 -	 * get/set the timer IRQ vector:
 16.1550 -	 */
 16.1551 -	disable_8259A_irq(0);
 16.1552 -	vector = assign_irq_vector(0);
 16.1553 -	set_intr_gate(vector, interrupt[0]);
 16.1554 -
 16.1555 -	/*
 16.1556 -	 * Subtle, code in do_timer_interrupt() expects an AEOI
 16.1557 -	 * mode for the 8259A whenever interrupts are routed
 16.1558 -	 * through I/O APICs.  Also IRQ0 has to be enabled in
 16.1559 -	 * the 8259A which implies the virtual wire has to be
 16.1560 -	 * disabled in the local APIC.
 16.1561 -	 */
 16.1562 -	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
 16.1563 -	init_8259A(1);
 16.1564 -	timer_ack = 1;
 16.1565 -	enable_8259A_irq(0);
 16.1566 -
 16.1567 -	pin1 = find_isa_irq_pin(0, mp_INT);
 16.1568 -	pin2 = find_isa_irq_pin(0, mp_ExtINT);
 16.1569 -
 16.1570 -	printk(KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2);
 16.1571 -
 16.1572 -	if (pin1 != -1) {
 16.1573 -		/*
 16.1574 -		 * Ok, does IRQ0 through the IOAPIC work?
 16.1575 -		 */
 16.1576 -		unmask_IO_APIC_irq(0);
 16.1577 -		if (timer_irq_works())
 16.1578 -			return;
 16.1579 -		clear_IO_APIC_pin(0, pin1);
 16.1580 -		printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
 16.1581 -	}
 16.1582 -
 16.1583 -	printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
 16.1584 -	if (pin2 != -1) {
 16.1585 -		printk("\n..... (found pin %d) ...", pin2);
 16.1586 -		/*
 16.1587 -		 * legacy devices should be connected to IO APIC #0
 16.1588 -		 */
 16.1589 -		setup_ExtINT_IRQ0_pin(pin2, vector);
 16.1590 -		if (timer_irq_works()) {
 16.1591 -			printk("works.\n");
 16.1592 -			if (pin1 != -1)
 16.1593 -				replace_pin_at_irq(0, 0, pin1, 0, pin2);
 16.1594 -			else
 16.1595 -				add_pin_to_irq(0, 0, pin2);
 16.1596 -			return;
 16.1597 -		}
 16.1598 -		/*
 16.1599 -		 * Cleanup, just in case ...
 16.1600 -		 */
 16.1601 -		clear_IO_APIC_pin(0, pin2);
 16.1602 -	}
 16.1603 -	printk(" failed.\n");
 16.1604 -
 16.1605 -	printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
 16.1606 -
 16.1607 -	disable_8259A_irq(0);
 16.1608 -	irq_desc[0].handler = &lapic_irq_type;
 16.1609 -	apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
 16.1610 -	enable_8259A_irq(0);
 16.1611 -
 16.1612 -	if (timer_irq_works()) {
 16.1613 -		printk(" works.\n");
 16.1614 -		return;
 16.1615 -	}
 16.1616 -	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
 16.1617 -	printk(" failed.\n");
 16.1618 -
 16.1619 -	printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
 16.1620 -
 16.1621 -	init_8259A(0);
 16.1622 -	make_8259A_irq(0);
 16.1623 -	apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
 16.1624 -
 16.1625 -	unlock_ExtINT_logic();
 16.1626 -
 16.1627 -	if (timer_irq_works()) {
 16.1628 -		printk(" works.\n");
 16.1629 -		return;
 16.1630 -	}
 16.1631 -	printk(" failed :(.\n");
 16.1632 -	panic("IO-APIC + timer doesn't work! pester mingo@redhat.com");
 16.1633 -}
 16.1634 -
 16.1635 -/*
 16.1636 - *
 16.1637 - * IRQ's that are handled by the old PIC in all cases:
 16.1638 - * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
 16.1639 - *   Linux doesn't really care, as it's not actually used
 16.1640 - *   for any interrupt handling anyway.
 16.1641 - * - There used to be IRQ13 here as well, but all
 16.1642 - *   MPS-compliant must not use it for FPU coupling and we
 16.1643 - *   want to use exception 16 anyway.  And there are
 16.1644 - *   systems who connect it to an I/O APIC for other uses.
 16.1645 - *   Thus we don't mark it special any longer.
 16.1646 - *
 16.1647 - * Additionally, something is definitely wrong with irq9
 16.1648 - * on PIIX4 boards.
 16.1649 - */
 16.1650 -#define PIC_IRQS	(1<<2)
 16.1651 -
 16.1652 -void __init setup_IO_APIC(void)
 16.1653 -{
 16.1654 -	enable_IO_APIC();
 16.1655 -
 16.1656 -	io_apic_irqs = ~PIC_IRQS;
 16.1657 -	printk("ENABLING IO-APIC IRQs\n");
 16.1658 -
 16.1659 -	/*
 16.1660 -	 * Set up IO-APIC IRQ routing.
 16.1661 -	 */
 16.1662 -	if (!acpi_ioapic)
 16.1663 -		setup_ioapic_ids_from_mpc();
 16.1664 -	sync_Arb_IDs();
 16.1665 -	setup_IO_APIC_irqs();
 16.1666 -	init_IO_APIC_traps();
 16.1667 -	check_timer();
 16.1668 -	if (!acpi_ioapic)
 16.1669 -		print_IO_APIC();
 16.1670 -}
 16.1671 -
 16.1672 -#endif /* CONFIG_X86_IO_APIC */
 16.1673 -
 16.1674 -
 16.1675 -
 16.1676 -/* --------------------------------------------------------------------------
 16.1677 -                          ACPI-based IOAPIC Configuration
 16.1678 -   -------------------------------------------------------------------------- */
 16.1679 -
 16.1680 -#ifdef CONFIG_ACPI_BOOT
 16.1681 -
 16.1682 -#define IO_APIC_MAX_ID		15
 16.1683 -
 16.1684 -int __init io_apic_get_unique_id (int ioapic, int apic_id)
 16.1685 -{
 16.1686 -	struct IO_APIC_reg_00 reg_00;
 16.1687 -	static unsigned long apic_id_map = 0;
 16.1688 -	unsigned long flags;
 16.1689 -	int i = 0;
 16.1690 -
 16.1691 -	/*
 16.1692 -	 * The P4 platform supports up to 256 APIC IDs on two separate APIC 
 16.1693 -	 * buses (one for LAPICs, one for IOAPICs), where predecessors only 
 16.1694 -	 * supports up to 16 on one shared APIC bus.
 16.1695 -	 * 
 16.1696 -	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
 16.1697 -	 *      advantage of new APIC bus architecture.
 16.1698 -	 */
 16.1699 -
 16.1700 -	if (!apic_id_map)
 16.1701 -		apic_id_map = phys_cpu_present_map;
 16.1702 -
 16.1703 -	spin_lock_irqsave(&ioapic_lock, flags);
 16.1704 -	*(int *)&reg_00 = io_apic_read(ioapic, 0);
 16.1705 -	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1706 -
 16.1707 -	if (apic_id >= IO_APIC_MAX_ID) {
 16.1708 -		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
 16.1709 -			"%d\n", ioapic, apic_id, reg_00.ID);
 16.1710 -		apic_id = reg_00.ID;
 16.1711 -	}
 16.1712 -
 16.1713 -	/* XAPICs do not need unique IDs */
 16.1714 -	if (clustered_apic_mode == CLUSTERED_APIC_XAPIC){
 16.1715 -		printk(KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", 
 16.1716 -			ioapic, apic_id);
 16.1717 -		return apic_id;
 16.1718 -	}
 16.1719 -
 16.1720 -	/*
 16.1721 -	 * Every APIC in a system must have a unique ID or we get lots of nice 
 16.1722 -	 * 'stuck on smp_invalidate_needed IPI wait' messages.
 16.1723 -	 */
 16.1724 -	if (apic_id_map & (1 << apic_id)) {
 16.1725 -
 16.1726 -		for (i = 0; i < IO_APIC_MAX_ID; i++) {
 16.1727 -			if (!(apic_id_map & (1 << i)))
 16.1728 -				break;
 16.1729 -		}
 16.1730 -
 16.1731 -		if (i == IO_APIC_MAX_ID)
 16.1732 -			panic("Max apic_id exceeded!\n");
 16.1733 -
 16.1734 -		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
 16.1735 -			"trying %d\n", ioapic, apic_id, i);
 16.1736 -
 16.1737 -		apic_id = i;
 16.1738 -	} 
 16.1739 -
 16.1740 -	apic_id_map |= (1 << apic_id);
 16.1741 -
 16.1742 -	if (reg_00.ID != apic_id) {
 16.1743 -		reg_00.ID = apic_id;
 16.1744 -
 16.1745 -		spin_lock_irqsave(&ioapic_lock, flags);
 16.1746 -		io_apic_write(ioapic, 0, *(int *)&reg_00);
 16.1747 -		*(int *)&reg_00 = io_apic_read(ioapic, 0);
 16.1748 -		spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1749 -
 16.1750 -		/* Sanity check */
 16.1751 -		if (reg_00.ID != apic_id)
 16.1752 -			panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic);
 16.1753 -	}
 16.1754 -
 16.1755 -	printk(KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
 16.1756 -
 16.1757 -	return apic_id;
 16.1758 -}
 16.1759 -
 16.1760 -
 16.1761 -int __init io_apic_get_version (int ioapic)
 16.1762 -{
 16.1763 -	struct IO_APIC_reg_01	reg_01;
 16.1764 -	unsigned long flags;
 16.1765 -
 16.1766 -	spin_lock_irqsave(&ioapic_lock, flags);
 16.1767 -	*(int *)&reg_01 = io_apic_read(ioapic, 1);
 16.1768 -	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1769 -
 16.1770 -	return reg_01.version;
 16.1771 -}
 16.1772 -
 16.1773 -
 16.1774 -int __init io_apic_get_redir_entries (int ioapic)
 16.1775 -{
 16.1776 -	struct IO_APIC_reg_01	reg_01;
 16.1777 -	unsigned long flags;
 16.1778 -
 16.1779 -	spin_lock_irqsave(&ioapic_lock, flags);
 16.1780 -	*(int *)&reg_01 = io_apic_read(ioapic, 1);
 16.1781 -	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1782 -
 16.1783 -	return reg_01.entries;
 16.1784 -}
 16.1785 -
 16.1786 -
 16.1787 -int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
 16.1788 -{
 16.1789 -	struct IO_APIC_route_entry entry;
 16.1790 -	unsigned long flags;
 16.1791 -
 16.1792 -	if (!IO_APIC_IRQ(irq)) {
 16.1793 -		printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0/n", 
 16.1794 -			ioapic);
 16.1795 -		return -EINVAL;
 16.1796 -	}
 16.1797 -
 16.1798 -	/*
 16.1799 -	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
 16.1800 -	 * Note that we mask (disable) IRQs now -- these get enabled when the
 16.1801 -	 * corresponding device driver registers for this IRQ.
 16.1802 -	 */
 16.1803 -
 16.1804 -	memset(&entry,0,sizeof(entry));
 16.1805 -
 16.1806 -	entry.delivery_mode = dest_LowestPrio;
 16.1807 -	entry.dest_mode = INT_DELIVERY_MODE;
 16.1808 -	entry.dest.logical.logical_dest = target_cpus();
 16.1809 -	entry.mask = 1;					 /* Disabled (masked) */
 16.1810 -	entry.trigger = edge_level;
 16.1811 -	entry.polarity = active_high_low;
 16.1812 -
 16.1813 -	add_pin_to_irq(irq, ioapic, pin);
 16.1814 -
 16.1815 -	entry.vector = assign_irq_vector(irq);
 16.1816 -
 16.1817 -	printk(KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
 16.1818 -		"IRQ %d Mode:%i Active:%i)\n", ioapic,
 16.1819 -		mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, edge_level, active_high_low);
 16.1820 -
 16.1821 -	if (edge_level) {
 16.1822 -		irq_desc[irq].handler = &ioapic_level_irq_type;
 16.1823 -	} else {
 16.1824 -		irq_desc[irq].handler = &ioapic_edge_irq_type;
 16.1825 -	}
 16.1826 -
 16.1827 -	set_intr_gate(entry.vector, interrupt[irq]);
 16.1828 -
 16.1829 -	if (!ioapic && (irq < 16))
 16.1830 -		disable_8259A_irq(irq);
 16.1831 -
 16.1832 -	spin_lock_irqsave(&ioapic_lock, flags);
 16.1833 -	io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
 16.1834 -	io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
 16.1835 -	spin_unlock_irqrestore(&ioapic_lock, flags);
 16.1836 -
 16.1837 -	return 0;
 16.1838 -}
 16.1839 -
 16.1840 -#endif /*CONFIG_ACPI_BOOT*/
 16.1841 -
 16.1842 -extern char opt_leveltrigger[], opt_edgetrigger[];
 16.1843 -
 16.1844 -static int __init ioapic_trigger_setup(void)
 16.1845 -{
 16.1846 -    char       *p;
 16.1847 -    irq_desc_t *desc;
 16.1848 -    long        irq;
 16.1849 -
 16.1850 -    p = opt_leveltrigger;
 16.1851 -    while ( *p != '\0' )
 16.1852 -    {
 16.1853 -        irq = simple_strtol(p, &p, 10);
 16.1854 -        if ( (irq <= 0) || (irq >= NR_IRQS) )
 16.1855 -        {
 16.1856 -            printk("IRQ '%ld' out of range in level-trigger list '%s'\n",
 16.1857 -                   irq, opt_leveltrigger);
 16.1858 -            break;
 16.1859 -        }
 16.1860 -
 16.1861 -        printk("Forcing IRQ %ld to level-trigger: ", irq);
 16.1862 -
 16.1863 -        desc = &irq_desc[irq];
 16.1864 -        spin_lock_irq(&desc->lock);
 16.1865 -
 16.1866 -        if ( desc->handler == &ioapic_level_irq_type )
 16.1867 -        {
 16.1868 -            printk("already level-triggered (no force applied).\n");
 16.1869 -        }
 16.1870 -        else if ( desc->handler != &ioapic_edge_irq_type )
 16.1871 -        {
 16.1872 -            printk("cannot force (can only force IO-APIC-edge IRQs).\n");
 16.1873 -        }
 16.1874 -        else
 16.1875 -        {
 16.1876 -            desc->handler = &ioapic_level_irq_type;
 16.1877 -            __mask_IO_APIC_irq(irq);
 16.1878 -            __level_IO_APIC_irq(irq);        
 16.1879 -            printk("done.\n");
 16.1880 -        }
 16.1881 -
 16.1882 -        spin_unlock_irq(&desc->lock);
 16.1883 -
 16.1884 -        if ( *p == '\0' )
 16.1885 -            break;
 16.1886 -
 16.1887 -        if ( *p != ',' )
 16.1888 -        {
 16.1889 -            printk("Unexpected character '%c' in level-trigger list '%s'\n",
 16.1890 -                   *p, opt_leveltrigger);
 16.1891 -            break;
 16.1892 -        }
 16.1893 -
 16.1894 -        p++;
 16.1895 -    }
 16.1896 -
 16.1897 -    p = opt_edgetrigger;
 16.1898 -    while ( *p != '\0' )
 16.1899 -    {
 16.1900 -        irq = simple_strtol(p, &p, 10);
 16.1901 -        if ( (irq <= 0) || (irq >= NR_IRQS) )
 16.1902 -        {
 16.1903 -            printk("IRQ '%ld' out of range in edge-trigger list '%s'\n",
 16.1904 -                   irq, opt_edgetrigger);
 16.1905 -            break;
 16.1906 -        }
 16.1907 -
 16.1908 -        printk("Forcing IRQ %ld to edge-trigger: ", irq);
 16.1909 -
 16.1910 -        desc = &irq_desc[irq];
 16.1911 -        spin_lock_irq(&desc->lock);
 16.1912 -
 16.1913 -        if ( desc->handler == &ioapic_edge_irq_type )
 16.1914 -        {
 16.1915 -            printk("already edge-triggered (no force applied).\n");
 16.1916 -        }
 16.1917 -        else if ( desc->handler != &ioapic_level_irq_type )
 16.1918 -        {
 16.1919 -            printk("cannot force (can only force IO-APIC-level IRQs).\n");
 16.1920 -        }
 16.1921 -        else
 16.1922 -        {
 16.1923 -            desc->handler = &ioapic_edge_irq_type;
 16.1924 -            __edge_IO_APIC_irq(irq);        
 16.1925 -            desc->status |= IRQ_PENDING; /* may have lost a masked edge */
 16.1926 -            printk("done.\n");
 16.1927 -        }
 16.1928 -
 16.1929 -        spin_unlock_irq(&desc->lock);
 16.1930 -
 16.1931 -        if ( *p == '\0' )
 16.1932 -            break;
 16.1933 -
 16.1934 -        if ( *p != ',' )
 16.1935 -        {
 16.1936 -            printk("Unexpected character '%c' in edge-trigger list '%s'\n",
 16.1937 -                   *p, opt_edgetrigger);
 16.1938 -            break;
 16.1939 -        }
 16.1940 -
 16.1941 -        p++;
 16.1942 -    }
 16.1943 -
 16.1944 -    return 0;
 16.1945 -}
 16.1946 -
 16.1947 -__initcall(ioapic_trigger_setup);
    17.1 --- a/xen/arch/i386/ioremap.c	Thu Jun 10 14:24:30 2004 +0000
    17.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.3 @@ -1,67 +0,0 @@
    17.4 -/*
    17.5 - * arch/i386/mm/ioremap.c
    17.6 - *
    17.7 - * Re-map IO memory to kernel address space so that we can access it.
    17.8 - * This is needed for high PCI addresses that aren't mapped in the
    17.9 - * 640k-1MB IO memory area on PC's
   17.10 - *
   17.11 - * (C) Copyright 1995 1996 Linus Torvalds
   17.12 - */
   17.13 -
   17.14 -#include <xen/config.h>
   17.15 -#include <xen/lib.h>
   17.16 -#include <xen/mm.h>
   17.17 -#include <asm/io.h>
   17.18 -#include <asm/pgalloc.h>
   17.19 -#include <asm/page.h>
   17.20 -
   17.21 -static unsigned long remap_base = IOREMAP_VIRT_START;
   17.22 -
   17.23 -#define PAGE_ALIGN(addr)    (((addr)+PAGE_SIZE-1)&PAGE_MASK)
   17.24 -
   17.25 -void * __ioremap(unsigned long phys_addr, 
   17.26 -                 unsigned long size, 
   17.27 -                 unsigned long flags)
   17.28 -{
   17.29 -    unsigned long vaddr;
   17.30 -    unsigned long offset, cur=0, last_addr;
   17.31 -    l2_pgentry_t *pl2e;
   17.32 -    l1_pgentry_t *pl1e;
   17.33 -
   17.34 -    /* Don't allow wraparound or zero size */
   17.35 -    last_addr = phys_addr + size - 1;
   17.36 -    if ( (size == 0) || (last_addr < phys_addr) )
   17.37 -        return NULL;
   17.38 -
   17.39 -    /* Don't remap the low PCI/ISA area: it's always mapped. */
   17.40 -    if ( (phys_addr >= 0xA0000) && (last_addr < 0x100000) )
   17.41 -        return phys_to_virt(phys_addr);
   17.42 -
   17.43 -    if ( (remap_base + size) > (IOREMAP_VIRT_END - 1) )
   17.44 -    {
   17.45 -        printk("ioremap: going past end of reserved space!\n");
   17.46 -        return NULL;
   17.47 -    }
   17.48 -
   17.49 -    /* Mappings have to be page-aligned. */
   17.50 -    offset = phys_addr & ~PAGE_MASK;
   17.51 -    phys_addr &= PAGE_MASK;
   17.52 -    size = PAGE_ALIGN(last_addr) - phys_addr;
   17.53 -
   17.54 -    /* Ok, go for it. */
   17.55 -    vaddr = remap_base;
   17.56 -    remap_base += size;
   17.57 -    pl2e = &idle_pg_table[l2_table_offset(vaddr)];
   17.58 -    pl1e = l2_pgentry_to_l1(*pl2e++) + l1_table_offset(vaddr);
   17.59 -    do {
   17.60 -        *pl1e++ = mk_l1_pgentry((phys_addr+cur)|PAGE_HYPERVISOR|flags);
   17.61 -    }
   17.62 -    while ( (cur += PAGE_SIZE) != size );
   17.63 -
   17.64 -    return (void *)(offset + (char *)vaddr);
   17.65 -}
   17.66 -
   17.67 -void iounmap(void *addr)
   17.68 -{
   17.69 -    /* NOP for now. */
   17.70 -}
    18.1 --- a/xen/arch/i386/irq.c	Thu Jun 10 14:24:30 2004 +0000
    18.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.3 @@ -1,1100 +0,0 @@
    18.4 -/*
    18.5 - *      linux/arch/i386/kernel/irq.c
    18.6 - *
    18.7 - *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
    18.8 - *
    18.9 - * This file contains the code used by various IRQ handling routines:
   18.10 - * asking for different IRQ's should be done through these routines
   18.11 - * instead of just grabbing them. Thus setup_irqs with different IRQ numbers
   18.12 - * shouldn't result in any weird surprises, and installing new handlers
   18.13 - * should be easier.
   18.14 - */
   18.15 -
   18.16 -/*
   18.17 - * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
   18.18 - *
   18.19 - * IRQs are in fact implemented a bit like signal handlers for the kernel.
   18.20 - * Naturally it's not a 1:1 relation, but there are similarities.
   18.21 - */
   18.22 -
   18.23 -#include <xen/config.h>
   18.24 -#include <xen/init.h>
   18.25 -#include <xen/errno.h>
   18.26 -#include <xen/sched.h>
   18.27 -#include <xen/interrupt.h>
   18.28 -#include <xen/irq.h>
   18.29 -#include <xen/slab.h>
   18.30 -#include <xen/event.h>
   18.31 -#include <asm/mpspec.h>
   18.32 -#include <asm/io_apic.h>
   18.33 -#include <asm/msr.h>
   18.34 -#include <asm/hardirq.h>
   18.35 -#include <asm/ptrace.h>
   18.36 -#include <asm/atomic.h>
   18.37 -#include <asm/io.h>
   18.38 -#include <asm/smp.h>
   18.39 -#include <asm/system.h>
   18.40 -#include <asm/bitops.h>
   18.41 -#include <asm/pgalloc.h>
   18.42 -#include <xen/delay.h>
   18.43 -#include <xen/timex.h>
   18.44 -#include <xen/perfc.h>
   18.45 -#include <asm/smpboot.h>
   18.46 -
   18.47 -/*
   18.48 - * Linux has a controller-independent x86 interrupt architecture.
   18.49 - * every controller has a 'controller-template', that is used
   18.50 - * by the main code to do the right thing. Each driver-visible
   18.51 - * interrupt source is transparently wired to the apropriate
   18.52 - * controller. Thus drivers need not be aware of the
   18.53 - * interrupt-controller.
   18.54 - *
   18.55 - * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
   18.56 - * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
   18.57 - * (IO-APICs assumed to be messaging to Pentium local-APICs)
   18.58 - *
   18.59 - * the code is designed to be easily extended with new/different
   18.60 - * interrupt controllers, without having to do assembly magic.
   18.61 - */
   18.62 -
   18.63 -/*
   18.64 - * Controller mappings for all interrupt sources:
   18.65 - */
   18.66 -irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
   18.67 -{ [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
   18.68 -
   18.69 -#ifdef CONFIG_SMP
   18.70 -/* NB. XXX We'll want some way of fiddling with this from DOM0. */
   18.71 -unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
   18.72 -#endif
   18.73 -
   18.74 -static void __do_IRQ_guest(int irq);
   18.75 -
   18.76 -/*
   18.77 - * Special irq handlers.
   18.78 - */
   18.79 -
   18.80 -void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
   18.81 -
   18.82 -/*
   18.83 - * Generic no controller code
   18.84 - */
   18.85 -
   18.86 -static void enable_none(unsigned int irq) { }
   18.87 -static unsigned int startup_none(unsigned int irq) { return 0; }
   18.88 -static void disable_none(unsigned int irq) { }
   18.89 -static void ack_none(unsigned int irq)
   18.90 -{
   18.91 -/*
   18.92 - * 'what should we do if we get a hw irq event on an illegal vector'.
   18.93 - * each architecture has to answer this themselves, it doesnt deserve
   18.94 - * a generic callback i think.
   18.95 - */
   18.96 -#if CONFIG_X86
   18.97 -    printk("unexpected IRQ trap at vector %02x\n", irq);
   18.98 -#ifdef CONFIG_X86_LOCAL_APIC
   18.99 -    /*
  18.100 -         * Currently unexpected vectors happen only on SMP and APIC.
  18.101 -         * We _must_ ack these because every local APIC has only N
  18.102 -         * irq slots per priority level, and a 'hanging, unacked' IRQ
  18.103 -         * holds up an irq slot - in excessive cases (when multiple
  18.104 -         * unexpected vectors occur) that might lock up the APIC
  18.105 -         * completely.
  18.106 -         */
  18.107 -    ack_APIC_irq();
  18.108 -#endif
  18.109 -#endif
  18.110 -}
  18.111 -
  18.112 -/* startup is the same as "enable", shutdown is same as "disable" */
  18.113 -#define shutdown_none   disable_none
  18.114 -#define end_none        enable_none
  18.115 -
  18.116 -struct hw_interrupt_type no_irq_type = {
  18.117 -    "none",
  18.118 -    startup_none,
  18.119 -    shutdown_none,
  18.120 -    enable_none,
  18.121 -    disable_none,
  18.122 -    ack_none,
  18.123 -    end_none
  18.124 -};
  18.125 -
  18.126 -atomic_t irq_err_count;
  18.127 -#ifdef CONFIG_X86_IO_APIC
  18.128 -#ifdef APIC_MISMATCH_DEBUG
  18.129 -atomic_t irq_mis_count;
  18.130 -#endif
  18.131 -#endif
  18.132 -
  18.133 -/*
  18.134 - * Generic, controller-independent functions:
  18.135 - */
  18.136 -
  18.137 -/*
  18.138 - * Global interrupt locks for SMP. Allow interrupts to come in on any
  18.139 - * CPU, yet make cli/sti act globally to protect critical regions..
  18.140 - */
  18.141 -
  18.142 -#ifdef CONFIG_SMP
  18.143 -unsigned char global_irq_holder = 0xff;
  18.144 -unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
  18.145 -        
  18.146 -#define MAXCOUNT 100000000
  18.147 -
  18.148 -/*
  18.149 - * I had a lockup scenario where a tight loop doing
  18.150 - * spin_unlock()/spin_lock() on CPU#1 was racing with
  18.151 - * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
  18.152 - * apparently the spin_unlock() information did not make it
  18.153 - * through to CPU#0 ... nasty, is this by design, do we have to limit
  18.154 - * 'memory update oscillation frequency' artificially like here?
  18.155 - *
  18.156 - * Such 'high frequency update' races can be avoided by careful design, but
  18.157 - * some of our major constructs like spinlocks use similar techniques,
  18.158 - * it would be nice to clarify this issue. Set this define to 0 if you
  18.159 - * want to check whether your system freezes.  I suspect the delay done
  18.160 - * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
  18.161 - * i thought that such things are guaranteed by design, since we use
  18.162 - * the 'LOCK' prefix.
  18.163 - */
  18.164 -#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
  18.165 -
  18.166 -#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
  18.167 -# define SYNC_OTHER_CORES(x) udelay(x+1)
  18.168 -#else
  18.169 -/*
  18.170 - * We have to allow irqs to arrive between __sti and __cli
  18.171 - */
  18.172 -# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
  18.173 -#endif
  18.174 -
  18.175 -static inline void wait_on_irq(int cpu)
  18.176 -{
  18.177 -    for (;;) {
  18.178 -
  18.179 -        /*
  18.180 -         * Wait until all interrupts are gone. Wait
  18.181 -         * for bottom half handlers unless we're
  18.182 -         * already executing in one..
  18.183 -         */
  18.184 -        if (!irqs_running())
  18.185 -            if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
  18.186 -                break;
  18.187 -
  18.188 -        /* Duh, we have to loop. Release the lock to avoid deadlocks */
  18.189 -        clear_bit(0,&global_irq_lock);
  18.190 -
  18.191 -        for (;;) {
  18.192 -            __sti();
  18.193 -            SYNC_OTHER_CORES(cpu);
  18.194 -            __cli();
  18.195 -            if (irqs_running())
  18.196 -                continue;
  18.197 -            if (global_irq_lock)
  18.198 -                continue;
  18.199 -            if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
  18.200 -                continue;
  18.201 -            if (!test_and_set_bit(0,&global_irq_lock))
  18.202 -                break;
  18.203 -        }
  18.204 -    }
  18.205 -}
  18.206 -
  18.207 -/*
  18.208 - * This is called when we want to synchronize with
  18.209 - * interrupts. We may for example tell a device to
  18.210 - * stop sending interrupts: but to make sure there
  18.211 - * are no interrupts that are executing on another
  18.212 - * CPU we need to call this function.
  18.213 - */
  18.214 -void synchronize_irq(void)
  18.215 -{
  18.216 -    if (irqs_running()) {
  18.217 -        /* Stupid approach */
  18.218 -        cli();
  18.219 -        sti();
  18.220 -    }
  18.221 -}
  18.222 -
  18.223 -static inline void get_irqlock(int cpu)
  18.224 -{
  18.225 -    if (test_and_set_bit(0,&global_irq_lock)) {
  18.226 -        /* do we already hold the lock? */
  18.227 -        if ((unsigned char) cpu == global_irq_holder)
  18.228 -            return;
  18.229 -        /* Uhhuh.. Somebody else got it. Wait.. */
  18.230 -        do {
  18.231 -            do {
  18.232 -                rep_nop();
  18.233 -            } while (test_bit(0,&global_irq_lock));
  18.234 -        } while (test_and_set_bit(0,&global_irq_lock));         
  18.235 -    }
  18.236 -    /* 
  18.237 -     * We also to make sure that nobody else is running
  18.238 -     * in an interrupt context. 
  18.239 -     */
  18.240 -    wait_on_irq(cpu);
  18.241 -
  18.242 -    /*
  18.243 -     * Ok, finally..
  18.244 -     */
  18.245 -    global_irq_holder = cpu;
  18.246 -}
  18.247 -
  18.248 -#define EFLAGS_IF_SHIFT 9
  18.249 -
  18.250 -/*
  18.251 - * A global "cli()" while in an interrupt context
  18.252 - * turns into just a local cli(). Interrupts
  18.253 - * should use spinlocks for the (very unlikely)
  18.254 - * case that they ever want to protect against
  18.255 - * each other.
  18.256 - *
  18.257 - * If we already have local interrupts disabled,
  18.258 - * this will not turn a local disable into a
  18.259 - * global one (problems with spinlocks: this makes
  18.260 - * save_flags+cli+sti usable inside a spinlock).
  18.261 - */
  18.262 -void __global_cli(void)
  18.263 -{
  18.264 -    unsigned int flags;
  18.265 -
  18.266 -    __save_flags(flags);
  18.267 -    if (flags & (1 << EFLAGS_IF_SHIFT)) {
  18.268 -        int cpu = smp_processor_id();
  18.269 -        __cli();
  18.270 -        if (!local_irq_count(cpu))
  18.271 -            get_irqlock(cpu);
  18.272 -    }
  18.273 -}
  18.274 -
  18.275 -void __global_sti(void)
  18.276 -{
  18.277 -    int cpu = smp_processor_id();
  18.278 -
  18.279 -    if (!local_irq_count(cpu))
  18.280 -        release_irqlock(cpu);
  18.281 -    __sti();
  18.282 -}
  18.283 -
  18.284 -/*
  18.285 - * SMP flags value to restore to:
  18.286 - * 0 - global cli
  18.287 - * 1 - global sti
  18.288 - * 2 - local cli
  18.289 - * 3 - local sti
  18.290 - */
  18.291 -unsigned long __global_save_flags(void)
  18.292 -{
  18.293 -    int retval;
  18.294 -    int local_enabled;
  18.295 -    unsigned long flags;
  18.296 -    int cpu = smp_processor_id();
  18.297 -
  18.298 -    __save_flags(flags);
  18.299 -    local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
  18.300 -    /* default to local */
  18.301 -    retval = 2 + local_enabled;
  18.302 -
  18.303 -    /* check for global flags if we're not in an interrupt */
  18.304 -    if (!local_irq_count(cpu)) {
  18.305 -        if (local_enabled)
  18.306 -            retval = 1;
  18.307 -        if (global_irq_holder == cpu)
  18.308 -            retval = 0;
  18.309 -    }
  18.310 -    return retval;
  18.311 -}
  18.312 -
  18.313 -void __global_restore_flags(unsigned long flags)
  18.314 -{
  18.315 -    switch (flags) {
  18.316 -    case 0:
  18.317 -        __global_cli();
  18.318 -        break;
  18.319 -    case 1:
  18.320 -        __global_sti();
  18.321 -        break;
  18.322 -    case 2:
  18.323 -        __cli();
  18.324 -        break;
  18.325 -    case 3:
  18.326 -        __sti();
  18.327 -        break;
  18.328 -    default:
  18.329 -        printk("global_restore_flags: %08lx (%08lx)\n",
  18.330 -               flags, (&flags)[-1]);
  18.331 -    }
  18.332 -}
  18.333 -
  18.334 -#endif
  18.335 -
  18.336 -/*
  18.337 - * This should really return information about whether
  18.338 - * we should do bottom half handling etc. Right now we
  18.339 - * end up _always_ checking the bottom half, which is a
  18.340 - * waste of time and is not what some drivers would
  18.341 - * prefer.
  18.342 - */
  18.343 -static int handle_IRQ_event(unsigned int irq, 
  18.344 -                            struct pt_regs * regs, 
  18.345 -                            struct irqaction * action)
  18.346 -{
  18.347 -    int status;
  18.348 -    int cpu = smp_processor_id();
  18.349 -
  18.350 -    irq_enter(cpu, irq);
  18.351 -
  18.352 -    status = 1; /* Force the "do bottom halves" bit */
  18.353 -
  18.354 -    if (!(action->flags & SA_INTERRUPT))
  18.355 -        __sti();
  18.356 -
  18.357 -    do {
  18.358 -        status |= action->flags;
  18.359 -        action->handler(irq, action->dev_id, regs);
  18.360 -        action = action->next;
  18.361 -    } while (action);
  18.362 -
  18.363 -    __cli();
  18.364 -
  18.365 -    irq_exit(cpu, irq);
  18.366 -
  18.367 -    return status;
  18.368 -}
  18.369 -
  18.370 -/*
  18.371 - * Generic enable/disable code: this just calls
  18.372 - * down into the PIC-specific version for the actual
  18.373 - * hardware disable after having gotten the irq
  18.374 - * controller lock. 
  18.375 - */
  18.376 - 
  18.377 -/**
  18.378 - *      disable_irq_nosync - disable an irq without waiting
  18.379 - *      @irq: Interrupt to disable
  18.380 - *
  18.381 - *      Disable the selected interrupt line.  Disables and Enables are
  18.382 - *      nested.
  18.383 - *      Unlike disable_irq(), this function does not ensure existing
  18.384 - *      instances of the IRQ handler have completed before returning.
  18.385 - *
  18.386 - *      This function may be called from IRQ context.
  18.387 - */
  18.388 - 
  18.389 -inline void disable_irq_nosync(unsigned int irq)
  18.390 -{
  18.391 -    irq_desc_t *desc = irq_desc + irq;
  18.392 -    unsigned long flags;
  18.393 -
  18.394 -    spin_lock_irqsave(&desc->lock, flags);
  18.395 -    if (!desc->depth++) {
  18.396 -        desc->status |= IRQ_DISABLED;
  18.397 -        desc->handler->disable(irq);
  18.398 -    }
  18.399 -    spin_unlock_irqrestore(&desc->lock, flags);
  18.400 -}
  18.401 -
  18.402 -/**
  18.403 - *      disable_irq - disable an irq and wait for completion
  18.404 - *      @irq: Interrupt to disable
  18.405 - *
  18.406 - *      Disable the selected interrupt line.  Enables and Disables are
  18.407 - *      nested.
  18.408 - *      This function waits for any pending IRQ handlers for this interrupt
  18.409 - *      to complete before returning. If you use this function while
  18.410 - *      holding a resource the IRQ handler may need you will deadlock.
  18.411 - *
  18.412 - *      This function may be called - with care - from IRQ context.
  18.413 - */
  18.414 - 
  18.415 -void disable_irq(unsigned int irq)
  18.416 -{
  18.417 -    disable_irq_nosync(irq);
  18.418 -
  18.419 -    if (!local_irq_count(smp_processor_id())) {
  18.420 -        do {
  18.421 -            barrier();
  18.422 -            cpu_relax();
  18.423 -        } while (irq_desc[irq].status & IRQ_INPROGRESS);
  18.424 -    }
  18.425 -}
  18.426 -
  18.427 -/**
  18.428 - *      enable_irq - enable handling of an irq
  18.429 - *      @irq: Interrupt to enable
  18.430 - *
  18.431 - *      Undoes the effect of one call to disable_irq().  If this
  18.432 - *      matches the last disable, processing of interrupts on this
  18.433 - *      IRQ line is re-enabled.
  18.434 - *
  18.435 - *      This function may be called from IRQ context.
  18.436 - */
  18.437 - 
  18.438 -void enable_irq(unsigned int irq)
  18.439 -{
  18.440 -    irq_desc_t *desc = irq_desc + irq;
  18.441 -    unsigned long flags;
  18.442 -
  18.443 -    spin_lock_irqsave(&desc->lock, flags);
  18.444 -    switch (desc->depth) {
  18.445 -    case 1: {
  18.446 -        unsigned int status = desc->status & ~IRQ_DISABLED;
  18.447 -        desc->status = status;
  18.448 -        if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
  18.449 -            desc->status = status | IRQ_REPLAY;
  18.450 -            hw_resend_irq(desc->handler,irq);
  18.451 -        }
  18.452 -        desc->handler->enable(irq);
  18.453 -        /* fall-through */
  18.454 -    }
  18.455 -    default:
  18.456 -        desc->depth--;
  18.457 -        break;
  18.458 -    case 0:
  18.459 -        printk("enable_irq(%u) unbalanced from %p\n", irq,
  18.460 -               __builtin_return_address(0));
  18.461 -    }
  18.462 -    spin_unlock_irqrestore(&desc->lock, flags);
  18.463 -}
  18.464 -
  18.465 -/*
  18.466 - * do_IRQ handles all normal device IRQ's (the special
  18.467 - * SMP cross-CPU interrupts have their own specific
  18.468 - * handlers).
  18.469 - */
  18.470 -asmlinkage unsigned int do_IRQ(struct pt_regs regs)
  18.471 -{       
  18.472 -    /* 
  18.473 -     * We ack quickly, we don't want the irq controller
  18.474 -     * thinking we're snobs just because some other CPU has
  18.475 -     * disabled global interrupts (we have already done the
  18.476 -     * INT_ACK cycles, it's too late to try to pretend to the
  18.477 -     * controller that we aren't taking the interrupt).
  18.478 -     *
  18.479 -     * 0 return value means that this irq is already being
  18.480 -     * handled by some other CPU. (or is disabled)
  18.481 -     */
  18.482 -    int irq = regs.orig_eax & 0xff; /* high bits used in ret_from_ code  */
  18.483 -    irq_desc_t *desc = irq_desc + irq;
  18.484 -    struct irqaction * action;
  18.485 -    unsigned int status;
  18.486 -
  18.487 -#ifdef PERF_COUNTERS
  18.488 -    int cpu = smp_processor_id();
  18.489 -    u32 cc_start, cc_end;
  18.490 -
  18.491 -    perfc_incra(irqs, cpu);
  18.492 -    rdtscl(cc_start);
  18.493 -#endif
  18.494 -
  18.495 -    spin_lock(&desc->lock);
  18.496 -    desc->handler->ack(irq);
  18.497 -
  18.498 -    /*
  18.499 -      REPLAY is when Linux resends an IRQ that was dropped earlier
  18.500 -      WAITING is used by probe to mark irqs that are being tested
  18.501 -    */
  18.502 -    status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
  18.503 -    status |= IRQ_PENDING; /* we _want_ to handle it */
  18.504 -
  18.505 -    /* We hook off guest-bound IRQs for special handling. */
  18.506 -    if ( status & IRQ_GUEST )
  18.507 -    {
  18.508 -        __do_IRQ_guest(irq);
  18.509 -        spin_unlock(&desc->lock);
  18.510 -        return 1;
  18.511 -    }
  18.512 -
  18.513 -    /*
  18.514 -     * If the IRQ is disabled for whatever reason, we cannot use the action we 
  18.515 -     * have.
  18.516 -     */
  18.517 -    action = NULL;
  18.518 -    if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
  18.519 -        action = desc->action;
  18.520 -        status &= ~IRQ_PENDING; /* we commit to handling */
  18.521 -        status |= IRQ_INPROGRESS; /* we are handling it */
  18.522 -    }
  18.523 -    desc->status = status;
  18.524 -
  18.525 -    /*
  18.526 -     * If there is no IRQ handler or it was disabled, exit early. Since we set 
  18.527 -     * PENDING, if another processor is handling a different instance of this 
  18.528 -     * same irq, the other processor will take care of it.
  18.529 -     */
  18.530 -    if (!action)
  18.531 -        goto out;
  18.532 -
  18.533 -    /*
  18.534 -     * Edge triggered interrupts need to remember pending events. This applies 
  18.535 -     * to any hw interrupts that allow a second instance of the same irq to 
  18.536 -     * arrive while we are in do_IRQ or in the handler. But the code here only 
  18.537 -     * handles the _second_ instance of the irq, not the third or fourth. So 
  18.538 -     * it is mostly useful for irq hardware that does not mask cleanly in an
  18.539 -     * SMP environment.
  18.540 -     */
  18.541 -    for (;;) {
  18.542 -        spin_unlock(&desc->lock);
  18.543 -        handle_IRQ_event(irq, &regs, action);
  18.544 -        spin_lock(&desc->lock);
  18.545 -                
  18.546 -        if (!(desc->status & IRQ_PENDING))
  18.547 -            break;
  18.548 -        desc->status &= ~IRQ_PENDING;
  18.549 -    }
  18.550 -    desc->status &= ~IRQ_INPROGRESS;
  18.551 - out:
  18.552 -    /*
  18.553 -     * The ->end() handler has to deal with interrupts which got disabled 
  18.554 -     * while the handler was running.
  18.555 -     */
  18.556 -    desc->handler->end(irq);
  18.557 -    spin_unlock(&desc->lock);
  18.558 -
  18.559 -#ifdef PERF_COUNTERS
  18.560 -    rdtscl(cc_end);
  18.561 -
  18.562 -    if ( !action || (!(action->flags & SA_NOPROFILE)) )
  18.563 -    {
  18.564 -        perfc_adda(irq_time, cpu, cc_end - cc_start);
  18.565 -#ifndef NDEBUG
  18.566 -        if ( (cc_end - cc_start) > (cpu_khz * 100) )
  18.567 -            printk("Long interrupt %08x -> %08x\n", cc_start, cc_end);
  18.568 -#endif
  18.569 -    }
  18.570 -#endif
  18.571 -
  18.572 -    return 1;
  18.573 -}
  18.574 -
  18.575 -/**
  18.576 - *      request_irq - allocate an interrupt line
  18.577 - *      @irq: Interrupt line to allocate
  18.578 - *      @handler: Function to be called when the IRQ occurs
  18.579 - *      @irqflags: Interrupt type flags
  18.580 - *      @devname: An ascii name for the claiming device
  18.581 - *      @dev_id: A cookie passed back to the handler function
  18.582 - *
  18.583 - *      This call allocates interrupt resources and enables the
  18.584 - *      interrupt line and IRQ handling. From the point this
  18.585 - *      call is made your handler function may be invoked. Since
  18.586 - *      your handler function must clear any interrupt the board 
  18.587 - *      raises, you must take care both to initialise your hardware
  18.588 - *      and to set up the interrupt handler in the right order.
  18.589 - *
  18.590 - *      Dev_id must be globally unique. Normally the address of the
  18.591 - *      device data structure is used as the cookie. Since the handler
  18.592 - *      receives this value it makes sense to use it.
  18.593 - *
  18.594 - *      If your interrupt is shared you must pass a non NULL dev_id
  18.595 - *      as this is required when freeing the interrupt.
  18.596 - *
  18.597 - *      Flags:
  18.598 - *
  18.599 - *      SA_SHIRQ                Interrupt is shared
  18.600 - *
  18.601 - *      SA_INTERRUPT            Disable local interrupts while processing
  18.602 - */
  18.603 - 
  18.604 -int request_irq(unsigned int irq, 
  18.605 -                void (*handler)(int, void *, struct pt_regs *),
  18.606 -                unsigned long irqflags, 
  18.607 -                const char * devname,
  18.608 -                void *dev_id)
  18.609 -{
  18.610 -    int retval;
  18.611 -    struct irqaction * action;
  18.612 -
  18.613 -    if (irq >= NR_IRQS)
  18.614 -        return -EINVAL;
  18.615 -    if (!handler)
  18.616 -        return -EINVAL;
  18.617 -
  18.618 -    action = (struct irqaction *)
  18.619 -        kmalloc(sizeof(struct irqaction), GFP_KERNEL);
  18.620 -    if (!action)
  18.621 -        return -ENOMEM;
  18.622 -
  18.623 -    action->handler = handler;
  18.624 -    action->flags = irqflags;
  18.625 -    action->mask = 0;
  18.626 -    action->name = devname;
  18.627 -    action->next = NULL;
  18.628 -    action->dev_id = dev_id;
  18.629 -
  18.630 -    retval = setup_irq(irq, action);
  18.631 -    if (retval)
  18.632 -        kfree(action);
  18.633 -
  18.634 -    return retval;
  18.635 -}
  18.636 -
  18.637 -/**
  18.638 - *      free_irq - free an interrupt
  18.639 - *      @irq: Interrupt line to free
  18.640 - *      @dev_id: Device identity to free
  18.641 - *
  18.642 - *      Remove an interrupt handler. The handler is removed and if the
  18.643 - *      interrupt line is no longer in use by any driver it is disabled.
  18.644 - *      On a shared IRQ the caller must ensure the interrupt is disabled
  18.645 - *      on the card it drives before calling this function. The function
  18.646 - *      does not return until any executing interrupts for this IRQ
  18.647 - *      have completed.
  18.648 - *
  18.649 - *      This function may be called from interrupt context. 
  18.650 - *
  18.651 - *      Bugs: Attempting to free an irq in a handler for the same irq hangs
  18.652 - *            the machine.
  18.653 - */
  18.654 - 
  18.655 -void free_irq(unsigned int irq, void *dev_id)
  18.656 -{
  18.657 -    irq_desc_t *desc;
  18.658 -    struct irqaction **p;
  18.659 -    unsigned long flags;
  18.660 -
  18.661 -    if (irq >= NR_IRQS)
  18.662 -        return;
  18.663 -
  18.664 -    desc = irq_desc + irq;
  18.665 -    spin_lock_irqsave(&desc->lock,flags);
  18.666 -    p = &desc->action;
  18.667 -    for (;;) {
  18.668 -        struct irqaction * action = *p;
  18.669 -        if (action) {
  18.670 -            struct irqaction **pp = p;
  18.671 -            p = &action->next;
  18.672 -            if (action->dev_id != dev_id)
  18.673 -                continue;
  18.674 -
  18.675 -            /* Found it - now remove it from the list of entries */
  18.676 -            *pp = action->next;
  18.677 -            if (!desc->action) {
  18.678 -                desc->status |= IRQ_DISABLED;
  18.679 -                desc->handler->shutdown(irq);
  18.680 -            }
  18.681 -            spin_unlock_irqrestore(&desc->lock,flags);
  18.682 -
  18.683 -#ifdef CONFIG_SMP
  18.684 -            /* Wait to make sure it's not being used on another CPU */
  18.685 -            while (desc->status & IRQ_INPROGRESS) {
  18.686 -                barrier();
  18.687 -                cpu_relax();
  18.688 -            }
  18.689 -#endif
  18.690 -            kfree(action);
  18.691 -            return;
  18.692 -        }
  18.693 -        printk("Trying to free free IRQ%d\n",irq);
  18.694 -        spin_unlock_irqrestore(&desc->lock,flags);
  18.695 -        return;
  18.696 -    }
  18.697 -}
  18.698 -
  18.699 -/*
  18.700 - * IRQ autodetection code..
  18.701 - *
  18.702 - * This depends on the fact that any interrupt that
  18.703 - * comes in on to an unassigned handler will get stuck
  18.704 - * with "IRQ_WAITING" cleared and the interrupt
  18.705 - * disabled.
  18.706 - */
  18.707 -
  18.708 -static spinlock_t probe_sem = SPIN_LOCK_UNLOCKED;
  18.709 -
  18.710 -/**
  18.711 - *      probe_irq_on    - begin an interrupt autodetect
  18.712 - *
  18.713 - *      Commence probing for an interrupt. The interrupts are scanned
  18.714 - *      and a mask of potential interrupt lines is returned.
  18.715 - *
  18.716 - */
  18.717 - 
  18.718 -unsigned long probe_irq_on(void)
  18.719 -{
  18.720 -    unsigned int i;
  18.721 -    irq_desc_t *desc;
  18.722 -    unsigned long val;
  18.723 -    unsigned long s=0, e=0;
  18.724 -
  18.725 -    spin_lock(&probe_sem);
  18.726 -    /* 
  18.727 -     * something may have generated an irq long ago and we want to
  18.728 -     * flush such a longstanding irq before considering it as spurious. 
  18.729 -     */
  18.730 -    for (i = NR_IRQS-1; i > 0; i--)  {
  18.731 -        desc = irq_desc + i;
  18.732 -
  18.733 -        spin_lock_irq(&desc->lock);
  18.734 -        if (!irq_desc[i].action) 
  18.735 -            irq_desc[i].handler->startup(i);
  18.736 -        spin_unlock_irq(&desc->lock);
  18.737 -    }
  18.738 -
  18.739 -    /* Wait for longstanding interrupts to trigger (20ms delay). */
  18.740 -    rdtscl(s);
  18.741 -    do {
  18.742 -        synchronize_irq();
  18.743 -        rdtscl(e);
  18.744 -    } while ( ((e-s)/ticks_per_usec) < 20000 );
  18.745 -
  18.746 -    /*
  18.747 -     * enable any unassigned irqs
  18.748 -     * (we must startup again here because if a longstanding irq
  18.749 -     * happened in the previous stage, it may have masked itself)
  18.750 -     */
  18.751 -    for (i = NR_IRQS-1; i > 0; i--) {
  18.752 -        desc = irq_desc + i;
  18.753 -
  18.754 -        spin_lock_irq(&desc->lock);
  18.755 -        if (!desc->action) {
  18.756 -            desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
  18.757 -            if (desc->handler->startup(i))
  18.758 -                desc->status |= IRQ_PENDING;
  18.759 -        }
  18.760 -        spin_unlock_irq(&desc->lock);
  18.761 -    }
  18.762 -
  18.763 -    /*
  18.764 -     * Wait for spurious interrupts to trigger (100ms delay). 
  18.765 -     */
  18.766 -    rdtscl(s);
  18.767 -    do {
  18.768 -        synchronize_irq();
  18.769 -        rdtscl(e);
  18.770 -    } while ( ((e-s)/ticks_per_usec) < 100000 );
  18.771 -
  18.772 -    /*
  18.773 -     * Now filter out any obviously spurious interrupts
  18.774 -     */
  18.775 -    val = 0;
  18.776 -    for (i = 0; i < NR_IRQS; i++) {
  18.777 -        irq_desc_t *desc = irq_desc + i;
  18.778 -        unsigned int status;
  18.779 -
  18.780 -        spin_lock_irq(&desc->lock);
  18.781 -        status = desc->status;
  18.782 -
  18.783 -        if (status & IRQ_AUTODETECT) {
  18.784 -            /* It triggered already - consider it spurious. */
  18.785 -            if (!(status & IRQ_WAITING)) {
  18.786 -                desc->status = status & ~IRQ_AUTODETECT;
  18.787 -                desc->handler->shutdown(i);
  18.788 -            } else
  18.789 -                if (i < 32)
  18.790 -                    val |= 1 << i;
  18.791 -        }
  18.792 -        spin_unlock_irq(&desc->lock);
  18.793 -    }
  18.794 -
  18.795 -    return val;
  18.796 -}
  18.797 -
  18.798 -/*
  18.799 - * Return a mask of triggered interrupts (this
  18.800 - * can handle only legacy ISA interrupts).
  18.801 - */
  18.802 - 
  18.803 -/**
  18.804 - *      probe_irq_mask - scan a bitmap of interrupt lines
  18.805 - *      @val:   mask of interrupts to consider
  18.806 - *
  18.807 - *      Scan the ISA bus interrupt lines and return a bitmap of
  18.808 - *      active interrupts. The interrupt probe logic state is then
  18.809 - *      returned to its previous value.
  18.810 - *
  18.811 - *      Note: we need to scan all the irq's even though we will
  18.812 - *      only return ISA irq numbers - just so that we reset them
  18.813 - *      all to a known state.
  18.814 - */
  18.815 -unsigned int probe_irq_mask(unsigned long val)
  18.816 -{
  18.817 -    int i;
  18.818 -    unsigned int mask;
  18.819 -
  18.820 -    mask = 0;
  18.821 -    for (i = 0; i < NR_IRQS; i++) {
  18.822 -        irq_desc_t *desc = irq_desc + i;
  18.823 -        unsigned int status;
  18.824 -
  18.825 -        spin_lock_irq(&desc->lock);
  18.826 -        status = desc->status;
  18.827 -
  18.828 -        if (status & IRQ_AUTODETECT) {
  18.829 -            if (i < 16 && !(status & IRQ_WAITING))
  18.830 -                mask |= 1 << i;
  18.831 -
  18.832 -            desc->status = status & ~IRQ_AUTODETECT;
  18.833 -            desc->handler->shutdown(i);
  18.834 -        }
  18.835 -        spin_unlock_irq(&desc->lock);
  18.836 -    }
  18.837 -    spin_unlock(&probe_sem);
  18.838 -
  18.839 -    return mask & val;
  18.840 -}
  18.841 -
  18.842 -/*
  18.843 - * Return the one interrupt that triggered (this can
  18.844 - * handle any interrupt source).
  18.845 - */
  18.846 -
  18.847 -/**
  18.848 - *      probe_irq_off   - end an interrupt autodetect
  18.849 - *      @val: mask of potential interrupts (unused)
  18.850 - *
  18.851 - *      Scans the unused interrupt lines and returns the line which
  18.852 - *      appears to have triggered the interrupt. If no interrupt was
  18.853 - *      found then zero is returned. If more than one interrupt is
  18.854 - *      found then minus the first candidate is returned to indicate
  18.855 - *      their is doubt.
  18.856 - *
  18.857 - *      The interrupt probe logic state is returned to its previous
  18.858 - *      value.
  18.859 - *
  18.860 - *      BUGS: When used in a module (which arguably shouldnt happen)
  18.861 - *      nothing prevents two IRQ probe callers from overlapping. The
  18.862 - *      results of this are non-optimal.
  18.863 - */
  18.864 - 
  18.865 -int probe_irq_off(unsigned long val)
  18.866 -{
  18.867 -    int i, irq_found, nr_irqs;
  18.868 -
  18.869 -    nr_irqs = 0;
  18.870 -    irq_found = 0;
  18.871 -    for (i = 0; i < NR_IRQS; i++) {
  18.872 -        irq_desc_t *desc = irq_desc + i;
  18.873 -        unsigned int status;
  18.874 -
  18.875 -        spin_lock_irq(&desc->lock);
  18.876 -        status = desc->status;
  18.877 -
  18.878 -        if (status & IRQ_AUTODETECT) {
  18.879 -            if (!(status & IRQ_WAITING)) {
  18.880 -                if (!nr_irqs)
  18.881 -                    irq_found = i;
  18.882 -                nr_irqs++;
  18.883 -            }
  18.884 -            desc->status = status & ~IRQ_AUTODETECT;
  18.885 -            desc->handler->shutdown(i);
  18.886 -        }
  18.887 -        spin_unlock_irq(&desc->lock);
  18.888 -    }
  18.889 -    spin_unlock(&probe_sem);
  18.890 -
  18.891 -    if (nr_irqs > 1)
  18.892 -        irq_found = -irq_found;
  18.893 -    return irq_found;
  18.894 -}
  18.895 -
  18.896 -/* this was setup_x86_irq but it seems pretty generic */
  18.897 -int setup_irq(unsigned int irq, struct irqaction * new)
  18.898 -{
  18.899 -    int shared = 0;
  18.900 -    unsigned long flags;
  18.901 -    struct irqaction *old, **p;
  18.902 -    irq_desc_t *desc = irq_desc + irq;
  18.903 -
  18.904 -    /*
  18.905 -     * The following block of code has to be executed atomically
  18.906 -     */
  18.907 -    spin_lock_irqsave(&desc->lock,flags);
  18.908 -
  18.909 -    if ( desc->status & IRQ_GUEST )
  18.910 -    {
  18.911 -        spin_unlock_irqrestore(&desc->lock,flags);
  18.912 -        return -EBUSY;
  18.913 -    }
  18.914 -
  18.915 -    p = &desc->action;
  18.916 -    if ((old = *p) != NULL) {
  18.917 -        /* Can't share interrupts unless both agree to */
  18.918 -        if (!(old->flags & new->flags & SA_SHIRQ)) {
  18.919 -            spin_unlock_irqrestore(&desc->lock,flags);
  18.920 -            return -EBUSY;
  18.921 -        }
  18.922 -
  18.923 -        /* add new interrupt at end of irq queue */
  18.924 -        do {
  18.925 -            p = &old->next;
  18.926 -            old = *p;
  18.927 -        } while (old);
  18.928 -        shared = 1;
  18.929 -    }
  18.930 -
  18.931 -    *p = new;
  18.932 -
  18.933 -    if (!shared) {
  18.934 -        desc->depth = 0;
  18.935 -        desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
  18.936 -        desc->handler->startup(irq);
  18.937 -    }
  18.938 -
  18.939 -    spin_unlock_irqrestore(&desc->lock,flags);
  18.940 -
  18.941 -    return 0;
  18.942 -}
  18.943 -
  18.944 -
  18.945 -
  18.946 -/*
  18.947 - * HANDLING OF GUEST-BOUND PHYSICAL IRQS
  18.948 - */
  18.949 -
  18.950 -#define IRQ_MAX_GUESTS 7
  18.951 -typedef struct {
  18.952 -    u8 nr_guests;
  18.953 -    u8 in_flight;
  18.954 -    u8 shareable;
  18.955 -    struct task_struct *guest[IRQ_MAX_GUESTS];
  18.956 -} irq_guest_action_t;
  18.957 -
  18.958 -static void __do_IRQ_guest(int irq)
  18.959 -{
  18.960 -    irq_desc_t *desc = &irq_desc[irq];
  18.961 -    irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
  18.962 -    struct task_struct *p;
  18.963 -    int i;
  18.964 -
  18.965 -    for ( i = 0; i < action->nr_guests; i++ )
  18.966 -    {
  18.967 -        p = action->guest[i];
  18.968 -        if ( !test_and_set_bit(irq, &p->pirq_mask) )
  18.969 -            action->in_flight++;
  18.970 -        send_guest_pirq(p, irq);
  18.971 -    }
  18.972 -}
  18.973 -
  18.974 -int pirq_guest_unmask(struct task_struct *p)
  18.975 -{
  18.976 -    irq_desc_t *desc;
  18.977 -    int i, j, pirq;
  18.978 -    u32 m;
  18.979 -    shared_info_t *s = p->shared_info;
  18.980 -
  18.981 -    for ( i = 0; i < 2; i++ )
  18.982 -    {
  18.983 -        m = p->pirq_mask[i];
  18.984 -        while ( (j = ffs(m)) != 0 )
  18.985 -        {
  18.986 -            m &= ~(1 << --j);
  18.987 -            pirq = (i << 5) + j;
  18.988 -            desc = &irq_desc[pirq];
  18.989 -            spin_lock_irq(&desc->lock);
  18.990 -            if ( !test_bit(p->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
  18.991 -                 test_and_clear_bit(pirq, &p->pirq_mask) &&
  18.992 -                 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
  18.993 -                desc->handler->end(pirq);
  18.994 -            spin_unlock_irq(&desc->lock);
  18.995 -        }
  18.996 -    }
  18.997 -
  18.998 -    return 0;
  18.999 -}
 18.1000 -
 18.1001 -int pirq_guest_bind(struct task_struct *p, int irq, int will_share)
 18.1002 -{
 18.1003 -    unsigned long flags;
 18.1004 -    irq_desc_t *desc = &irq_desc[irq];
 18.1005 -    irq_guest_action_t *action;
 18.1006 -    int rc = 0;
 18.1007 -
 18.1008 -    if ( !IS_CAPABLE_PHYSDEV(p) )
 18.1009 -        return -EPERM;
 18.1010 -
 18.1011 -    spin_lock_irqsave(&desc->lock, flags);
 18.1012 -
 18.1013 -    action = (irq_guest_action_t *)desc->action;
 18.1014 -
 18.1015 -    if ( !(desc->status & IRQ_GUEST) )
 18.1016 -    {
 18.1017 -        if ( desc->action != NULL )
 18.1018 -        {
 18.1019 -            DPRINTK("Cannot bind IRQ %d to guest. In use by '%s'.\n",
 18.1020 -                    irq, desc->action->name);
 18.1021 -            rc = -EBUSY;
 18.1022 -            goto out;
 18.1023 -        }
 18.1024 -
 18.1025 -        action = kmalloc(sizeof(irq_guest_action_t), GFP_KERNEL);
 18.1026 -        if ( (desc->action = (struct irqaction *)action) == NULL )
 18.1027 -        {
 18.1028 -            DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
 18.1029 -            rc = -ENOMEM;
 18.1030 -            goto out;
 18.1031 -        }
 18.1032 -
 18.1033 -        action->nr_guests = 0;
 18.1034 -        action->in_flight = 0;
 18.1035 -        action->shareable = will_share;
 18.1036 -        
 18.1037 -        desc->depth = 0;
 18.1038 -        desc->status |= IRQ_GUEST;
 18.1039 -        desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
 18.1040 -        desc->handler->startup(irq);
 18.1041 -
 18.1042 -        /* Attempt to bind the interrupt target to the correct CPU. */
 18.1043 -        if ( desc->handler->set_affinity != NULL )
 18.1044 -            desc->handler->set_affinity(
 18.1045 -                irq, apicid_to_phys_cpu_present(p->processor));
 18.1046 -    }
 18.1047 -    else if ( !will_share || !action->shareable )
 18.1048 -    {
 18.1049 -        DPRINTK("Cannot bind IRQ %d to guest. Will not share with others.\n",
 18.1050 -                irq);
 18.1051 -        rc = -EBUSY;
 18.1052 -        goto out;
 18.1053 -    }
 18.1054 -
 18.1055 -    if ( action->nr_guests == IRQ_MAX_GUESTS )
 18.1056 -    {
 18.1057 -        DPRINTK("Cannot bind IRQ %d to guest. Already at max share.\n", irq);
 18.1058 -        rc = -EBUSY;
 18.1059 -        goto out;
 18.1060 -    }
 18.1061 -
 18.1062 -    action->guest[action->nr_guests++] = p;
 18.1063 -
 18.1064 - out:
 18.1065 -    spin_unlock_irqrestore(&desc->lock, flags);
 18.1066 -    return rc;
 18.1067 -}
 18.1068 -
 18.1069 -int pirq_guest_unbind(struct task_struct *p, int irq)
 18.1070 -{
 18.1071 -    unsigned long flags;
 18.1072 -    irq_desc_t *desc = &irq_desc[irq];
 18.1073 -    irq_guest_action_t *action;
 18.1074 -    int i;
 18.1075 -
 18.1076 -    spin_lock_irqsave(&desc->lock, flags);
 18.1077 -
 18.1078 -    action = (irq_guest_action_t *)desc->action;
 18.1079 -
 18.1080 -    if ( test_and_clear_bit(irq, &p->pirq_mask) &&
 18.1081 -         (--action->in_flight == 0) )
 18.1082 -        desc->handler->end(irq);
 18.1083 -
 18.1084 -    if ( action->nr_guests == 1 )
 18.1085 -    {
 18.1086 -        desc->action = NULL;
 18.1087 -        kfree(action);
 18.1088 -        desc->status |= IRQ_DISABLED;
 18.1089 -        desc->status &= ~IRQ_GUEST;
 18.1090 -        desc->handler->shutdown(irq);
 18.1091 -    }
 18.1092 -    else
 18.1093 -    {
 18.1094 -        i = 0;
 18.1095 -        while ( action->guest[i] != p )
 18.1096 -            i++;
 18.1097 -        memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
 18.1098 -        action->nr_guests--;
 18.1099 -    }
 18.1100 -
 18.1101 -    spin_unlock_irqrestore(&desc->lock, flags);    
 18.1102 -    return 0;
 18.1103 -}
    19.1 --- a/xen/arch/i386/mm.c	Thu Jun 10 14:24:30 2004 +0000
    19.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.3 @@ -1,412 +0,0 @@
    19.4 -/******************************************************************************
    19.5 - * arch/i386/mm.c
    19.6 - * 
    19.7 - * Modifications to Linux original are copyright (c) 2002-2003, K A Fraser
    19.8 - * 
    19.9 - * This program is free software; you can redistribute it and/or modify
   19.10 - * it under the terms of the GNU General Public License as published by
   19.11 - * the Free Software Foundation; either version 2 of the License, or
   19.12 - * (at your option) any later version.
   19.13 - * 
   19.14 - * This program is distributed in the hope that it will be useful,
   19.15 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
   19.16 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   19.17 - * GNU General Public License for more details.
   19.18 - * 
   19.19 - * You should have received a copy of the GNU General Public License
   19.20 - * along with this program; if not, write to the Free Software
   19.21 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   19.22 - */
   19.23 -
   19.24 -#include <xen/config.h>
   19.25 -#include <xen/lib.h>
   19.26 -#include <xen/init.h>
   19.27 -#include <xen/mm.h>
   19.28 -#include <asm/page.h>
   19.29 -#include <asm/pgalloc.h>
   19.30 -#include <asm/fixmap.h>
   19.31 -#include <asm/domain_page.h>
   19.32 -
   19.33 -static inline void set_pte_phys(unsigned long vaddr,
   19.34 -                                l1_pgentry_t entry)
   19.35 -{
   19.36 -    l2_pgentry_t *l2ent;
   19.37 -    l1_pgentry_t *l1ent;
   19.38 -
   19.39 -    l2ent = &idle_pg_table[l2_table_offset(vaddr)];
   19.40 -    l1ent = l2_pgentry_to_l1(*l2ent) + l1_table_offset(vaddr);
   19.41 -    *l1ent = entry;
   19.42 -
   19.43 -    /* It's enough to flush this one mapping. */
   19.44 -    __flush_tlb_one(vaddr);
   19.45 -}
   19.46 -
   19.47 -
   19.48 -void __set_fixmap(enum fixed_addresses idx, 
   19.49 -                  l1_pgentry_t entry)
   19.50 -{
   19.51 -    unsigned long address = __fix_to_virt(idx);
   19.52 -
   19.53 -    if ( likely(idx < __end_of_fixed_addresses) )
   19.54 -        set_pte_phys(address, entry);
   19.55 -    else
   19.56 -        printk("Invalid __set_fixmap\n");
   19.57 -}
   19.58 -
   19.59 -
   19.60 -static void __init fixrange_init(unsigned long start, 
   19.61 -                                 unsigned long end, 
   19.62 -                                 l2_pgentry_t *pg_base)
   19.63 -{
   19.64 -    l2_pgentry_t *l2e;
   19.65 -    int i;
   19.66 -    unsigned long vaddr, page;
   19.67 -
   19.68 -    vaddr = start;
   19.69 -    i = l2_table_offset(vaddr);
   19.70 -    l2e = pg_base + i;
   19.71 -
   19.72 -    for ( ; (i < ENTRIES_PER_L2_PAGETABLE) && (vaddr != end); l2e++, i++ ) 
   19.73 -    {
   19.74 -        if ( !l2_pgentry_empty(*l2e) )
   19.75 -            continue;
   19.76 -        page = (unsigned long)get_free_page(GFP_KERNEL);
   19.77 -        clear_page(page);
   19.78 -        *l2e = mk_l2_pgentry(__pa(page) | __PAGE_HYPERVISOR);
   19.79 -        vaddr += 1 << L2_PAGETABLE_SHIFT;
   19.80 -    }
   19.81 -}
   19.82 -
   19.83 -void __init paging_init(void)
   19.84 -{
   19.85 -    unsigned long addr;
   19.86 -    void *ioremap_pt;
   19.87 -    int i;
   19.88 -
   19.89 -    /* Idle page table 1:1 maps the first part of physical memory. */
   19.90 -    for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   19.91 -        idle_pg_table[i] = 
   19.92 -            mk_l2_pgentry((i << L2_PAGETABLE_SHIFT) | 
   19.93 -                          __PAGE_HYPERVISOR | _PAGE_PSE);
   19.94 -
   19.95 -    /*
   19.96 -     * Fixed mappings, only the page table structure has to be
   19.97 -     * created - mappings will be set by set_fixmap():
   19.98 -     */
   19.99 -    addr = FIXADDR_START & ~((1<<L2_PAGETABLE_SHIFT)-1);
  19.100 -    fixrange_init(addr, 0, idle_pg_table);
  19.101 -
  19.102 -    /* Create page table for ioremap(). */
  19.103 -    ioremap_pt = (void *)get_free_page(GFP_KERNEL);
  19.104 -    clear_page(ioremap_pt);
  19.105 -    idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] = 
  19.106 -        mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
  19.107 -
  19.108 -    /* Create read-only mapping of MPT for guest-OS use. */
  19.109 -    idle_pg_table[READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
  19.110 -        idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT];
  19.111 -    mk_l2_readonly(idle_pg_table + 
  19.112 -                   (READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT));
  19.113 -
  19.114 -    /* Set up mapping cache for domain pages. */
  19.115 -    mapcache = (unsigned long *)get_free_page(GFP_KERNEL);
  19.116 -    clear_page(mapcache);
  19.117 -    idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
  19.118 -        mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
  19.119 -
  19.120 -    /* Set up linear page table mapping. */
  19.121 -    idle_pg_table[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
  19.122 -        mk_l2_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
  19.123 -
  19.124 -}
  19.125 -
  19.126 -void __init zap_low_mappings(void)
  19.127 -{
  19.128 -    int i;
  19.129 -    for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
  19.130 -        idle_pg_table[i] = mk_l2_pgentry(0);
  19.131 -    flush_tlb_all_pge();
  19.132 -}
  19.133 -
  19.134 -
  19.135 -long do_stack_switch(unsigned long ss, unsigned long esp)
  19.136 -{
  19.137 -    int nr = smp_processor_id();
  19.138 -    struct tss_struct *t = &init_tss[nr];
  19.139 -
  19.140 -    /* We need to do this check as we load and use SS on guest's behalf. */
  19.141 -    if ( (ss & 3) == 0 )
  19.142 -        return -EPERM;
  19.143 -
  19.144 -    current->thread.guestos_ss = ss;
  19.145 -    current->thread.guestos_sp = esp;
  19.146 -    t->ss1  = ss;
  19.147 -    t->esp1 = esp;
  19.148 -
  19.149 -    return 0;
  19.150 -}
  19.151 -
  19.152 -
  19.153 -/* Returns TRUE if given descriptor is valid for GDT or LDT. */
  19.154 -int check_descriptor(unsigned long a, unsigned long b)
  19.155 -{
  19.156 -    unsigned long base, limit;
  19.157 -
  19.158 -    /* A not-present descriptor will always fault, so is safe. */
  19.159 -    if ( !(b & _SEGMENT_P) ) 
  19.160 -        goto good;
  19.161 -
  19.162 -    /*
  19.163 -     * We don't allow a DPL of zero. There is no legitimate reason for 
  19.164 -     * specifying DPL==0, and it gets rather dangerous if we also accept call 
  19.165 -     * gates (consider a call gate pointing at another guestos descriptor with 
  19.166 -     * DPL 0 -- this would get the OS ring-0 privileges).
  19.167 -     */
  19.168 -    if ( (b & _SEGMENT_DPL) == 0 )
  19.169 -        goto bad;
  19.170 -
  19.171 -    if ( !(b & _SEGMENT_S) )
  19.172 -    {
  19.173 -        /*
  19.174 -         * System segment:
  19.175 -         *  1. Don't allow interrupt or trap gates as they belong in the IDT.
  19.176 -         *  2. Don't allow TSS descriptors or task gates as we don't
  19.177 -         *     virtualise x86 tasks.
  19.178 -         *  3. Don't allow LDT descriptors because they're unnecessary and
  19.179 -         *     I'm uneasy about allowing an LDT page to contain LDT
  19.180 -         *     descriptors. In any case, Xen automatically creates the
  19.181 -         *     required descriptor when reloading the LDT register.
  19.182 -         *  4. We allow call gates but they must not jump to a private segment.
  19.183 -         */
  19.184 -
  19.185 -        /* Disallow everything but call gates. */
  19.186 -        if ( (b & _SEGMENT_TYPE) != 0xc00 )
  19.187 -            goto bad;
  19.188 -
  19.189 -        /* Can't allow far jump to a Xen-private segment. */
  19.190 -        if ( !VALID_CODESEL(a>>16) )
  19.191 -            goto bad;
  19.192 -
  19.193 -        /* Reserved bits must be zero. */
  19.194 -        if ( (b & 0xe0) != 0 )
  19.195 -            goto bad;
  19.196 -        
  19.197 -        /* No base/limit check is needed for a call gate. */
  19.198 -        goto good;
  19.199 -    }
  19.200 -    
  19.201 -    /* Check that base/limit do not overlap Xen-private space. */
  19.202 -    base  = (b&(0xff<<24)) | ((b&0xff)<<16) | (a>>16);
  19.203 -    limit = (b&0xf0000) | (a&0xffff);
  19.204 -    limit++; /* We add one because limit is inclusive. */
  19.205 -    if ( (b & _SEGMENT_G) )
  19.206 -        limit <<= 12;
  19.207 -    if ( ((base + limit) <= base) || 
  19.208 -         ((base + limit) > PAGE_OFFSET) )
  19.209 -        goto bad;
  19.210 -
  19.211 - good:
  19.212 -    return 1;
  19.213 - bad:
  19.214 -    return 0;
  19.215 -}
  19.216 -
  19.217 -
  19.218 -long set_gdt(struct task_struct *p, 
  19.219 -             unsigned long *frames,
  19.220 -             unsigned int entries)
  19.221 -{
  19.222 -    /* NB. There are 512 8-byte entries per GDT page. */
  19.223 -    int i, nr_pages = (entries + 511) / 512;
  19.224 -    unsigned long pfn;
  19.225 -    struct desc_struct *vgdt;
  19.226 -
  19.227 -    /* Check the new GDT. */
  19.228 -    for ( i = 0; i < nr_pages; i++ )
  19.229 -    {
  19.230 -        if ( unlikely(frames[i] >= max_page) ||
  19.231 -             unlikely(!get_page_and_type(&frame_table[frames[i]], 
  19.232 -                                         p, PGT_gdt_page)) )
  19.233 -            goto fail;
  19.234 -    }
  19.235 -
  19.236 -    /* Copy reserved GDT entries to the new GDT. */
  19.237 -    vgdt = map_domain_mem(frames[0] << PAGE_SHIFT);
  19.238 -    memcpy(vgdt + FIRST_RESERVED_GDT_ENTRY, 
  19.239 -           gdt_table + FIRST_RESERVED_GDT_ENTRY, 
  19.240 -           NR_RESERVED_GDT_ENTRIES*8);
  19.241 -    unmap_domain_mem(vgdt);
  19.242 -
  19.243 -    /* Tear down the old GDT. */
  19.244 -    for ( i = 0; i < 16; i++ )
  19.245 -    {
  19.246 -        if ( (pfn = l1_pgentry_to_pagenr(p->mm.perdomain_pt[i])) != 0 )
  19.247 -            put_page_and_type(&frame_table[pfn]);
  19.248 -        p->mm.perdomain_pt[i] = mk_l1_pgentry(0);
  19.249 -    }
  19.250 -
  19.251 -    /* Install the new GDT. */
  19.252 -    for ( i = 0; i < nr_pages; i++ )
  19.253 -        p->mm.perdomain_pt[i] =
  19.254 -            mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
  19.255 -
  19.256 -    SET_GDT_ADDRESS(p, GDT_VIRT_START);
  19.257 -    SET_GDT_ENTRIES(p, (entries*8)-1);
  19.258 -
  19.259 -    return 0;
  19.260 -
  19.261 - fail:
  19.262 -    while ( i-- > 0 )
  19.263 -        put_page_and_type(&frame_table[frames[i]]);
  19.264 -    return -EINVAL;
  19.265 -}
  19.266 -
  19.267 -
  19.268 -long do_set_gdt(unsigned long *frame_list, unsigned int entries)
  19.269 -{
  19.270 -    int nr_pages = (entries + 511) / 512;
  19.271 -    unsigned long frames[16];
  19.272 -    long ret;
  19.273 -
  19.274 -    if ( (entries <= LAST_RESERVED_GDT_ENTRY) || (entries > 8192) ) 
  19.275 -        return -EINVAL;
  19.276 -    
  19.277 -    if ( copy_from_user(frames, frame_list, nr_pages * sizeof(unsigned long)) )
  19.278 -        return -EFAULT;
  19.279 -
  19.280 -    if ( (ret = set_gdt(current, frames, entries)) == 0 )
  19.281 -    {
  19.282 -        local_flush_tlb();
  19.283 -        __asm__ __volatile__ ("lgdt %0" : "=m" (*current->mm.gdt));
  19.284 -    }
  19.285 -
  19.286 -    return ret;
  19.287 -}
  19.288 -
  19.289 -
  19.290 -long do_update_descriptor(
  19.291 -    unsigned long pa, unsigned long word1, unsigned long word2)
  19.292 -{
  19.293 -    unsigned long *gdt_pent, pfn = pa >> PAGE_SHIFT;
  19.294 -    struct pfn_info *page;
  19.295 -    long ret = -EINVAL;
  19.296 -
  19.297 -    if ( (pa & 7) || (pfn >= max_page) || !check_descriptor(word1, word2) )
  19.298 -        return -EINVAL;
  19.299 -
  19.300 -    page = &frame_table[pfn];
  19.301 -    if ( unlikely(!get_page(page, current)) )
  19.302 -        goto out;
  19.303 -
  19.304 -    /* Check if the given frame is in use in an unsafe context. */
  19.305 -    switch ( page->type_and_flags & PGT_type_mask )
  19.306 -    {
  19.307 -    case PGT_gdt_page:
  19.308 -        /* Disallow updates of Xen-reserved descriptors in the current GDT. */
  19.309 -        if ( (l1_pgentry_to_pagenr(current->mm.perdomain_pt[0]) == pfn) &&
  19.310 -             (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) &&
  19.311 -             (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) )
  19.312 -            goto out;
  19.313 -        if ( unlikely(!get_page_type(page, PGT_gdt_page)) )
  19.314 -            goto out;
  19.315 -        break;
  19.316 -    case PGT_ldt_page:
  19.317 -        if ( unlikely(!get_page_type(page, PGT_ldt_page)) )
  19.318 -            goto out;
  19.319 -        break;
  19.320 -    default:
  19.321 -        if ( unlikely(!get_page_type(page, PGT_writeable_page)) )
  19.322 -            goto out;
  19.323 -        break;
  19.324 -    }
  19.325 -
  19.326 -    /* All is good so make the update. */
  19.327 -    gdt_pent = map_domain_mem(pa);
  19.328 -    gdt_pent[0] = word1;
  19.329 -    gdt_pent[1] = word2;
  19.330 -    unmap_domain_mem(gdt_pent);
  19.331 -
  19.332 -    put_page_type(page);
  19.333 -
  19.334 -    ret = 0; /* success */
  19.335 -
  19.336 - out:
  19.337 -    put_page(page);
  19.338 -    return ret;
  19.339 -}
  19.340 -
  19.341 -#ifdef MEMORY_GUARD
  19.342 -
  19.343 -void *memguard_init(void *heap_start)
  19.344 -{
  19.345 -    l1_pgentry_t *l1;
  19.346 -    int i, j;
  19.347 -
  19.348 -    /* Round the allocation pointer up to a page boundary. */
  19.349 -    heap_start = (void *)(((unsigned long)heap_start + (PAGE_SIZE-1)) & 
  19.350 -                          PAGE_MASK);
  19.351 -
  19.352 -    /* Memory guarding is incompatible with super pages. */
  19.353 -    for ( i = 0; i < (MAX_MONITOR_ADDRESS >> L2_PAGETABLE_SHIFT); i++ )
  19.354 -    {
  19.355 -        l1 = (l1_pgentry_t *)heap_start;
  19.356 -        heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
  19.357 -        for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
  19.358 -            l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
  19.359 -                                   (j << L1_PAGETABLE_SHIFT) | 
  19.360 -                                  __PAGE_HYPERVISOR);
  19.361 -        idle_pg_table[i] = idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
  19.362 -            mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
  19.363 -    }
  19.364 -
  19.365 -    return heap_start;
  19.366 -}
  19.367 -
  19.368 -static void __memguard_change_range(void *p, unsigned long l, int guard)
  19.369 -{
  19.370 -    l1_pgentry_t *l1;
  19.371 -    l2_pgentry_t *l2;
  19.372 -    unsigned long _p = (unsigned long)p;
  19.373 -    unsigned long _l = (unsigned long)l;
  19.374 -
  19.375 -    /* Ensure we are dealing with a page-aligned whole number of pages. */
  19.376 -    ASSERT((_p&PAGE_MASK) != 0);
  19.377 -    ASSERT((_l&PAGE_MASK) != 0);
  19.378 -    ASSERT((_p&~PAGE_MASK) == 0);
  19.379 -    ASSERT((_l&~PAGE_MASK) == 0);
  19.380 -
  19.381 -    while ( _l != 0 )
  19.382 -    {
  19.383 -        l2  = &idle_pg_table[l2_table_offset(_p)];
  19.384 -        l1  = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
  19.385 -        if ( guard )
  19.386 -            *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
  19.387 -        else
  19.388 -            *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) | _PAGE_PRESENT);
  19.389 -        _p += PAGE_SIZE;
  19.390 -        _l -= PAGE_SIZE;
  19.391 -    }
  19.392 -}
  19.393 -
  19.394 -void memguard_guard_range(void *p, unsigned long l)
  19.395 -{
  19.396 -    __memguard_change_range(p, l, 1);
  19.397 -    local_flush_tlb();
  19.398 -}
  19.399 -
  19.400 -void memguard_unguard_range(void *p, unsigned long l)
  19.401 -{
  19.402 -    __memguard_change_range(p, l, 0);
  19.403 -}
  19.404 -
  19.405 -int memguard_is_guarded(void *p)
  19.406 -{
  19.407 -    l1_pgentry_t *l1;
  19.408 -    l2_pgentry_t *l2;
  19.409 -    unsigned long _p = (unsigned long)p;
  19.410 -    l2  = &idle_pg_table[l2_table_offset(_p)];
  19.411 -    l1  = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
  19.412 -    return !(l1_pgentry_val(*l1) & _PAGE_PRESENT);
  19.413 -}
  19.414 -
  19.415 -#endif
    20.1 --- a/xen/arch/i386/mpparse.c	Thu Jun 10 14:24:30 2004 +0000
    20.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.3 @@ -1,1381 +0,0 @@
    20.4 -/*
    20.5 - *	Intel Multiprocessor Specificiation 1.1 and 1.4
    20.6 - *	compliant MP-table parsing routines.
    20.7 - *
    20.8 - *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
    20.9 - *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
   20.10 - *
   20.11 - *	Fixes
   20.12 - *		Erich Boleyn	:	MP v1.4 and additional changes.
   20.13 - *		Alan Cox	:	Added EBDA scanning
   20.14 - *		Ingo Molnar	:	various cleanups and rewrites
   20.15 - *		Maciej W. Rozycki:	Bits for default MP configurations
   20.16 - *		Paul Diefenbaugh:	Added full ACPI support
   20.17 - */
   20.18 -
   20.19 -#include <xen/config.h>
   20.20 -#include <xen/init.h>
   20.21 -#include <xen/lib.h>
   20.22 -#include <xen/kernel.h>
   20.23 -#include <xen/irq.h>
   20.24 -#include <xen/smp.h>
   20.25 -#include <xen/mm.h>
   20.26 -#include <xen/acpi.h>
   20.27 -#include <asm/acpi.h>
   20.28 -#include <asm/io.h>
   20.29 -#include <asm/apic.h>
   20.30 -#include <asm/mpspec.h>
   20.31 -#include <asm/pgalloc.h>
   20.32 -#include <asm/smpboot.h>
   20.33 -
   20.34 -int numnodes = 1; /* XXX Xen */
   20.35 -
   20.36 -/* Have we found an MP table */
   20.37 -int smp_found_config;
   20.38 -
   20.39 -/*
   20.40 - * Various Linux-internal data structures created from the
   20.41 - * MP-table.
   20.42 - */
   20.43 -int apic_version [MAX_APICS];
   20.44 -int quad_local_to_mp_bus_id [NR_CPUS/4][4];
   20.45 -int mp_current_pci_id;
   20.46 -int *mp_bus_id_to_type;
   20.47 -int *mp_bus_id_to_node;
   20.48 -int *mp_bus_id_to_local;
   20.49 -int *mp_bus_id_to_pci_bus;
   20.50 -int max_mp_busses;
   20.51 -int max_irq_sources;
   20.52 -
   20.53 -/* I/O APIC entries */
   20.54 -struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
   20.55 -
   20.56 -/* # of MP IRQ source entries */
   20.57 -struct mpc_config_intsrc *mp_irqs;
   20.58 -
   20.59 -/* MP IRQ source entries */
   20.60 -int mp_irq_entries;
   20.61 -
   20.62 -int nr_ioapics;
   20.63 -
   20.64 -int pic_mode;
   20.65 -unsigned long mp_lapic_addr;
   20.66 -
   20.67 -/* Processor that is doing the boot up */
   20.68 -unsigned int boot_cpu_physical_apicid = -1U;
   20.69 -unsigned int boot_cpu_logical_apicid = -1U;
   20.70 -/* Internal processor count */
   20.71 -static unsigned int num_processors;
   20.72 -
   20.73 -/* Bitmask of physically existing CPUs */
   20.74 -unsigned long phys_cpu_present_map;
   20.75 -unsigned long logical_cpu_present_map;
   20.76 -
   20.77 -#ifdef CONFIG_X86_CLUSTERED_APIC
   20.78 -unsigned char esr_disable = 0;
   20.79 -unsigned char clustered_apic_mode = CLUSTERED_APIC_NONE;
   20.80 -unsigned int apic_broadcast_id = APIC_BROADCAST_ID_APIC;
   20.81 -#endif
   20.82 -unsigned char raw_phys_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
   20.83 -
   20.84 -/*
   20.85 - * Intel MP BIOS table parsing routines:
   20.86 - */
   20.87 -
   20.88 -#ifndef CONFIG_X86_VISWS_APIC
   20.89 -/*
   20.90 - * Checksum an MP configuration block.
   20.91 - */
   20.92 -
   20.93 -static int __init mpf_checksum(unsigned char *mp, int len)
   20.94 -{
   20.95 -	int sum = 0;
   20.96 -
   20.97 -	while (len--)
   20.98 -		sum += *mp++;
   20.99 -
  20.100 -	return sum & 0xFF;
  20.101 -}
  20.102 -
  20.103 -/*
  20.104 - * Processor encoding in an MP configuration block
  20.105 - */
  20.106 -
  20.107 -static char __init *mpc_family(int family,int model)
  20.108 -{
  20.109 -	static char n[32];
  20.110 -	static char *model_defs[]=
  20.111 -	{
  20.112 -		"80486DX","80486DX",
  20.113 -		"80486SX","80486DX/2 or 80487",
  20.114 -		"80486SL","80486SX/2",
  20.115 -		"Unknown","80486DX/2-WB",
  20.116 -		"80486DX/4","80486DX/4-WB"
  20.117 -	};
  20.118 -
  20.119 -	switch (family) {
  20.120 -		case 0x04:
  20.121 -			if (model < 10)
  20.122 -				return model_defs[model];
  20.123 -			break;
  20.124 -
  20.125 -		case 0x05:
  20.126 -			return("Pentium(tm)");
  20.127 -
  20.128 -		case 0x06:
  20.129 -			return("Pentium(tm) Pro");
  20.130 -
  20.131 -		case 0x0F:
  20.132 -			if (model == 0x00)
  20.133 -				return("Pentium 4(tm)");
  20.134 -			if (model == 0x01)
  20.135 -				return("Pentium 4(tm)");
  20.136 -			if (model == 0x02)
  20.137 -				return("Pentium 4(tm) XEON(tm)");
  20.138 -			if (model == 0x0F)
  20.139 -				return("Special controller");
  20.140 -	}
  20.141 -	sprintf(n,"Unknown CPU [%d:%d]",family, model);
  20.142 -	return n;
  20.143 -}
  20.144 -
  20.145 -/* 
  20.146 - * Have to match translation table entries to main table entries by counter
  20.147 - * hence the mpc_record variable .... can't see a less disgusting way of
  20.148 - * doing this ....
  20.149 - */
  20.150 -
  20.151 -static int mpc_record; 
  20.152 -static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
  20.153 -
  20.154 -void __init MP_processor_info (struct mpc_config_processor *m)
  20.155 -{
  20.156 - 	int ver, quad, logical_apicid;
  20.157 - 	
  20.158 -	if (!(m->mpc_cpuflag & CPU_ENABLED))
  20.159 -		return;
  20.160 -
  20.161 -	logical_apicid = m->mpc_apicid;
  20.162 -	if (clustered_apic_mode == CLUSTERED_APIC_NUMAQ) {
  20.163 -		quad = translation_table[mpc_record]->trans_quad;
  20.164 -		logical_apicid = (quad << 4) + 
  20.165 -			(m->mpc_apicid ? m->mpc_apicid << 1 : 1);
  20.166 -		printk("Processor #%d %s APIC version %d (quad %d, apic %d)\n",
  20.167 -			m->mpc_apicid,
  20.168 -			mpc_family((m->mpc_cpufeature & CPU_FAMILY_MASK)>>8 ,
  20.169 -				   (m->mpc_cpufeature & CPU_MODEL_MASK)>>4),
  20.170 -			m->mpc_apicver, quad, logical_apicid);
  20.171 -	} else {
  20.172 -		printk("Processor #%d %s APIC version %d\n",
  20.173 -			m->mpc_apicid,
  20.174 -			mpc_family((m->mpc_cpufeature & CPU_FAMILY_MASK)>>8 ,
  20.175 -				   (m->mpc_cpufeature & CPU_MODEL_MASK)>>4),
  20.176 -			m->mpc_apicver);
  20.177 -	}
  20.178 -
  20.179 -	if (m->mpc_featureflag&(1<<0))
  20.180 -		Dprintk("    Floating point unit present.\n");
  20.181 -	if (m->mpc_featureflag&(1<<7))
  20.182 -		Dprintk("    Machine Exception supported.\n");
  20.183 -	if (m->mpc_featureflag&(1<<8))
  20.184 -		Dprintk("    64 bit compare & exchange supported.\n");
  20.185 -	if (m->mpc_featureflag&(1<<9))
  20.186 -		Dprintk("    Internal APIC present.\n");
  20.187 -	if (m->mpc_featureflag&(1<<11))
  20.188 -		Dprintk("    SEP present.\n");
  20.189 -	if (m->mpc_featureflag&(1<<12))
  20.190 -		Dprintk("    MTRR  present.\n");
  20.191 -	if (m->mpc_featureflag&(1<<13))
  20.192 -		Dprintk("    PGE  present.\n");
  20.193 -	if (m->mpc_featureflag&(1<<14))
  20.194 -		Dprintk("    MCA  present.\n");
  20.195 -	if (m->mpc_featureflag&(1<<15))
  20.196 -		Dprintk("    CMOV  present.\n");
  20.197 -	if (m->mpc_featureflag&(1<<16))
  20.198 -		Dprintk("    PAT  present.\n");
  20.199 -	if (m->mpc_featureflag&(1<<17))
  20.200 -		Dprintk("    PSE  present.\n");
  20.201 -	if (m->mpc_featureflag&(1<<18))
  20.202 -		Dprintk("    PSN  present.\n");
  20.203 -	if (m->mpc_featureflag&(1<<19))
  20.204 -		Dprintk("    Cache Line Flush Instruction present.\n");
  20.205 -	/* 20 Reserved */
  20.206 -	if (m->mpc_featureflag&(1<<21))
  20.207 -		Dprintk("    Debug Trace and EMON Store present.\n");
  20.208 -	if (m->mpc_featureflag&(1<<22))
  20.209 -		Dprintk("    ACPI Thermal Throttle Registers  present.\n");
  20.210 -	if (m->mpc_featureflag&(1<<23))
  20.211 -		Dprintk("    MMX  present.\n");
  20.212 -	if (m->mpc_featureflag&(1<<24))
  20.213 -		Dprintk("    FXSR  present.\n");
  20.214 -	if (m->mpc_featureflag&(1<<25))
  20.215 -		Dprintk("    XMM  present.\n");
  20.216 -	if (m->mpc_featureflag&(1<<26))
  20.217 -		Dprintk("    Willamette New Instructions  present.\n");
  20.218 -	if (m->mpc_featureflag&(1<<27))
  20.219 -		Dprintk("    Self Snoop  present.\n");
  20.220 -	if (m->mpc_featureflag&(1<<28))
  20.221 -		Dprintk("    HT  present.\n");
  20.222 -	if (m->mpc_featureflag&(1<<29))
  20.223 -		Dprintk("    Thermal Monitor present.\n");
  20.224 -	/* 30, 31 Reserved */
  20.225 -
  20.226 -
  20.227 -	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
  20.228 -		Dprintk("    Bootup CPU\n");
  20.229 -		boot_cpu_physical_apicid = m->mpc_apicid;
  20.230 -		boot_cpu_logical_apicid = logical_apicid;
  20.231 -	}
  20.232 -
  20.233 -	if (num_processors >= NR_CPUS){
  20.234 -		printk(KERN_WARNING "NR_CPUS limit of %i reached. Cannot "
  20.235 -			"boot CPU(apicid 0x%x).\n", NR_CPUS, m->mpc_apicid);
  20.236 -		return;
  20.237 -	}
  20.238 -	num_processors++;
  20.239 -
  20.240 -	if (m->mpc_apicid > MAX_APICS) {
  20.241 -		printk("Processor #%d INVALID. (Max ID: %d).\n",
  20.242 -			m->mpc_apicid, MAX_APICS);
  20.243 -		--num_processors;
  20.244 -		return;
  20.245 -	}
  20.246 -	ver = m->mpc_apicver;
  20.247 -
  20.248 -	logical_cpu_present_map |= 1 << (num_processors-1);
  20.249 - 	phys_cpu_present_map |= apicid_to_phys_cpu_present(m->mpc_apicid);
  20.250 - 
  20.251 -	/*
  20.252 -	 * Validate version
  20.253 -	 */
  20.254 -	if (ver == 0x0) {
  20.255 -		printk("BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
  20.256 -		ver = 0x10;
  20.257 -	}
  20.258 -	apic_version[m->mpc_apicid] = ver;
  20.259 -	raw_phys_apicid[num_processors - 1] = m->mpc_apicid;
  20.260 -}
  20.261 -
  20.262 -static void __init MP_bus_info (struct mpc_config_bus *m)
  20.263 -{
  20.264 -	char str[7];
  20.265 -	int quad;
  20.266 -
  20.267 -	memcpy(str, m->mpc_bustype, 6);
  20.268 -	str[6] = 0;
  20.269 -	
  20.270 -	if (clustered_apic_mode == CLUSTERED_APIC_NUMAQ) {
  20.271 -		quad = translation_table[mpc_record]->trans_quad;
  20.272 -		mp_bus_id_to_node[m->mpc_busid] = quad;
  20.273 -		mp_bus_id_to_local[m->mpc_busid] = translation_table[mpc_record]->trans_local;
  20.274 -		quad_local_to_mp_bus_id[quad][translation_table[mpc_record]->trans_local] = m->mpc_busid;
  20.275 -		printk("Bus #%d is %s (node %d)\n", m->mpc_busid, str, quad);
  20.276 -	} else {
  20.277 -		Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
  20.278 -	}
  20.279 -
  20.280 -	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
  20.281 -		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
  20.282 -	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
  20.283 -		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
  20.284 -	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
  20.285 -		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
  20.286 -		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
  20.287 -		mp_current_pci_id++;
  20.288 -	} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
  20.289 -		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
  20.290 -	} else {
  20.291 -		printk("Unknown bustype %s - ignoring\n", str);
  20.292 -	}
  20.293 -}
  20.294 -
  20.295 -static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
  20.296 -{
  20.297 -	if (!(m->mpc_flags & MPC_APIC_USABLE))
  20.298 -		return;
  20.299 -
  20.300 -	printk("I/O APIC #%d Version %d at 0x%lX.\n",
  20.301 -		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
  20.302 -	if (nr_ioapics >= MAX_IO_APICS) {
  20.303 -		printk("Max # of I/O APICs (%d) exceeded (found %d).\n",
  20.304 -			MAX_IO_APICS, nr_ioapics);
  20.305 -		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
  20.306 -	}
  20.307 -	if (!m->mpc_apicaddr) {
  20.308 -		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
  20.309 -			" found in MP table, skipping!\n");
  20.310 -		return;
  20.311 -	}
  20.312 -	mp_ioapics[nr_ioapics] = *m;
  20.313 -	nr_ioapics++;
  20.314 -}
  20.315 -
  20.316 -static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
  20.317 -{
  20.318 -	mp_irqs [mp_irq_entries] = *m;
  20.319 -	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
  20.320 -		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
  20.321 -			m->mpc_irqtype, m->mpc_irqflag & 3,
  20.322 -			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
  20.323 -			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
  20.324 -	if (++mp_irq_entries == max_irq_sources)
  20.325 -		panic("Max # of irq sources exceeded!!\n");
  20.326 -}
  20.327 -
  20.328 -static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
  20.329 -{
  20.330 -	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
  20.331 -		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
  20.332 -			m->mpc_irqtype, m->mpc_irqflag & 3,
  20.333 -			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
  20.334 -			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
  20.335 -	/*
  20.336 -	 * Well it seems all SMP boards in existence
  20.337 -	 * use ExtINT/LVT1 == LINT0 and
  20.338 -	 * NMI/LVT2 == LINT1 - the following check
  20.339 -	 * will show us if this assumptions is false.
  20.340 -	 * Until then we do not have to add baggage.
  20.341 -	 */
  20.342 -	if ((m->mpc_irqtype == mp_ExtINT) &&
  20.343 -		(m->mpc_destapiclint != 0))
  20.344 -			BUG();
  20.345 -	if ((m->mpc_irqtype == mp_NMI) &&
  20.346 -		(m->mpc_destapiclint != 1))
  20.347 -			BUG();
  20.348 -}
  20.349 -
  20.350 -static void __init MP_translation_info (struct mpc_config_translation *m)
  20.351 -{
  20.352 -	printk("Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
  20.353 -
  20.354 -	if (mpc_record >= MAX_MPC_ENTRY) 
  20.355 -		printk("MAX_MPC_ENTRY exceeded!\n");
  20.356 -	else
  20.357 -		translation_table[mpc_record] = m; /* stash this for later */
  20.358 -	if (m->trans_quad+1 > numnodes)
  20.359 -		numnodes = m->trans_quad+1;
  20.360 -}
  20.361 -
  20.362 -/*
  20.363 - * Read/parse the MPC oem tables
  20.364 - */
  20.365 -
  20.366 -static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
  20.367 -	unsigned short oemsize)
  20.368 -{
  20.369 -	int count = sizeof (*oemtable); /* the header size */
  20.370 -	unsigned char *oemptr = ((unsigned char *)oemtable)+count;
  20.371 -	
  20.372 -	printk("Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
  20.373 -	if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
  20.374 -	{
  20.375 -		printk("SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
  20.376 -			oemtable->oem_signature[0],
  20.377 -			oemtable->oem_signature[1],
  20.378 -			oemtable->oem_signature[2],
  20.379 -			oemtable->oem_signature[3]);
  20.380 -		return;
  20.381 -	}
  20.382 -	if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
  20.383 -	{
  20.384 -		printk("SMP oem mptable: checksum error!\n");
  20.385 -		return;
  20.386 -	}
  20.387 -	while (count < oemtable->oem_length) {
  20.388 -		switch (*oemptr) {
  20.389 -			case MP_TRANSLATION:
  20.390 -			{
  20.391 -				struct mpc_config_translation *m=
  20.392 -					(struct mpc_config_translation *)oemptr;
  20.393 -				MP_translation_info(m);
  20.394 -				oemptr += sizeof(*m);
  20.395 -				count += sizeof(*m);
  20.396 -				++mpc_record;
  20.397 -				break;
  20.398 -			}
  20.399 -			default:
  20.400 -			{
  20.401 -				printk("Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
  20.402 -				return;
  20.403 -			}
  20.404 -		}
  20.405 -       }
  20.406 -}
  20.407 -
  20.408 -/*
  20.409 - * Read/parse the MPC
  20.410 - */
  20.411 -
  20.412 -static int __init smp_read_mpc(struct mp_config_table *mpc)
  20.413 -{
  20.414 -	char oem[16], prod[14];
  20.415 -	int count=sizeof(*mpc);
  20.416 -	unsigned char *mpt=((unsigned char *)mpc)+count;
  20.417 -	int num_bus = 0;
  20.418 -	int num_irq = 0;
  20.419 -	unsigned char *bus_data;
  20.420 -
  20.421 -	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
  20.422 -		panic("SMP mptable: bad signature [%c%c%c%c]!\n",
  20.423 -			mpc->mpc_signature[0],
  20.424 -			mpc->mpc_signature[1],
  20.425 -			mpc->mpc_signature[2],
  20.426 -			mpc->mpc_signature[3]);
  20.427 -		return 0;
  20.428 -	}
  20.429 -	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
  20.430 -		panic("SMP mptable: checksum error!\n");
  20.431 -		return 0;
  20.432 -	}
  20.433 -	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
  20.434 -		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
  20.435 -			mpc->mpc_spec);
  20.436 -		return 0;
  20.437 -	}
  20.438 -	if (!mpc->mpc_lapic) {
  20.439 -		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
  20.440 -		return 0;
  20.441 -	}
  20.442 -	memcpy(oem,mpc->mpc_oem,8);
  20.443 -	oem[8]=0;
  20.444 -	printk("OEM ID: %s ",oem);
  20.445 -
  20.446 -	memcpy(prod,mpc->mpc_productid,12);
  20.447 -	prod[12]=0;
  20.448 -	printk("Product ID: %s ",prod);
  20.449 -
  20.450 -	detect_clustered_apic(oem, prod);
  20.451 -	
  20.452 -	printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
  20.453 -
  20.454 -	/* 
  20.455 -	 * Save the local APIC address (it might be non-default) -- but only
  20.456 -	 * if we're not using ACPI.
  20.457 -	 */
  20.458 -	if (!acpi_lapic)
  20.459 -		mp_lapic_addr = mpc->mpc_lapic;
  20.460 -
  20.461 -	if ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ) && mpc->mpc_oemptr) {
  20.462 -		/* We need to process the oem mpc tables to tell us which quad things are in ... */
  20.463 -		mpc_record = 0;
  20.464 -		smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr, mpc->mpc_oemsize);
  20.465 -		mpc_record = 0;
  20.466 -	}
  20.467 -
  20.468 -	/* Pre-scan to determine the number of bus and 
  20.469 -	 * interrupts records we have
  20.470 -	 */
  20.471 -	while (count < mpc->mpc_length) {
  20.472 -		switch (*mpt) {
  20.473 -			case MP_PROCESSOR:
  20.474 -				mpt += sizeof(struct mpc_config_processor);
  20.475 -				count += sizeof(struct mpc_config_processor);
  20.476 -				break;
  20.477 -			case MP_BUS:
  20.478 -				++num_bus;
  20.479 -				mpt += sizeof(struct mpc_config_bus);
  20.480 -				count += sizeof(struct mpc_config_bus);
  20.481 -				break;
  20.482 -			case MP_INTSRC:
  20.483 -				++num_irq;
  20.484 -				mpt += sizeof(struct mpc_config_intsrc);
  20.485 -				count += sizeof(struct mpc_config_intsrc);
  20.486 -				break;
  20.487 -			case MP_IOAPIC:
  20.488 -				mpt += sizeof(struct mpc_config_ioapic);
  20.489 -				count += sizeof(struct mpc_config_ioapic);
  20.490 -				break;
  20.491 -			case MP_LINTSRC:
  20.492 -				mpt += sizeof(struct mpc_config_lintsrc);
  20.493 -				count += sizeof(struct mpc_config_lintsrc);
  20.494 -				break;
  20.495 -			default:
  20.496 -				count = mpc->mpc_length;
  20.497 -				break;
  20.498 -		}
  20.499 -	}
  20.500 -	/* 
  20.501 -	 * Paranoia: Allocate one extra of both the number of busses and number
  20.502 -	 * of irqs, and make sure that we have at least 4 interrupts per PCI
  20.503 -	 * slot.  But some machines do not report very many busses, so we need
  20.504 -	 * to fall back on the older defaults.
  20.505 -	 */
  20.506 -	++num_bus;
  20.507 -	max_mp_busses = max(num_bus, MAX_MP_BUSSES);
  20.508 -	if (num_irq < (4 * max_mp_busses))
  20.509 -		num_irq = 4 * num_bus;	/* 4 intr/PCI slot */
  20.510 -	++num_irq;
  20.511 -	max_irq_sources = max(num_irq, MAX_IRQ_SOURCES);
  20.512 -	
  20.513 -	count = (max_mp_busses * sizeof(int)) * 4;
  20.514 -	count += (max_irq_sources * sizeof(struct mpc_config_intsrc));
  20.515 -	bus_data = (void *)__get_free_pages(GFP_KERNEL, get_order(count));
  20.516 -	if (!bus_data) {
  20.517 -		printk(KERN_ERR "SMP mptable: out of memory!\n");
  20.518 -		return 0;
  20.519 -	}
  20.520 -	mp_bus_id_to_type = (int *)&bus_data[0];
  20.521 -	mp_bus_id_to_node = (int *)&bus_data[(max_mp_busses * sizeof(int))];
  20.522 -	mp_bus_id_to_local = (int *)&bus_data[(max_mp_busses * sizeof(int)) * 2];
  20.523 -	mp_bus_id_to_pci_bus = (int *)&bus_data[(max_mp_busses * sizeof(int)) * 3];
  20.524 -	mp_irqs = (struct mpc_config_intsrc *)&bus_data[(max_mp_busses * sizeof(int)) * 4];
  20.525 -	memset(mp_bus_id_to_pci_bus, -1, max_mp_busses * sizeof(int));
  20.526 -
  20.527 -	/*
  20.528 -	 *	Now process the configuration blocks.
  20.529 -	 */
  20.530 -	count = sizeof(*mpc);
  20.531 -	mpt = ((unsigned char *)mpc)+count;
  20.532 -	while (count < mpc->mpc_length) {
  20.533 -		switch(*mpt) {
  20.534 -			case MP_PROCESSOR:
  20.535 -			{
  20.536 -				struct mpc_config_processor *m=
  20.537 -					(struct mpc_config_processor *)mpt;
  20.538 -				/* ACPI may have already provided this data */
  20.539 -				if (!acpi_lapic)
  20.540 -					MP_processor_info(m);
  20.541 -				mpt += sizeof(*m);
  20.542 -				count += sizeof(*m);
  20.543 -				break;
  20.544 -			}
  20.545 -			case MP_BUS:
  20.546 -			{
  20.547 -				struct mpc_config_bus *m=
  20.548 -					(struct mpc_config_bus *)mpt;
  20.549 -				MP_bus_info(m);
  20.550 -				mpt += sizeof(*m);
  20.551 -				count += sizeof(*m);
  20.552 -				break;
  20.553 -			}
  20.554 -			case MP_IOAPIC:
  20.555 -			{
  20.556 -				struct mpc_config_ioapic *m=
  20.557 -					(struct mpc_config_ioapic *)mpt;
  20.558 -				MP_ioapic_info(m);
  20.559 -				mpt+=sizeof(*m);
  20.560 -				count+=sizeof(*m);
  20.561 -				break;
  20.562 -			}
  20.563 -			case MP_INTSRC:
  20.564 -			{
  20.565 -				struct mpc_config_intsrc *m=
  20.566 -					(struct mpc_config_intsrc *)mpt;
  20.567 -
  20.568 -				MP_intsrc_info(m);
  20.569 -				mpt+=sizeof(*m);
  20.570 -				count+=sizeof(*m);
  20.571 -				break;
  20.572 -			}
  20.573 -			case MP_LINTSRC:
  20.574 -			{
  20.575 -				struct mpc_config_lintsrc *m=
  20.576 -					(struct mpc_config_lintsrc *)mpt;
  20.577 -				MP_lintsrc_info(m);
  20.578 -				mpt+=sizeof(*m);
  20.579 -				count+=sizeof(*m);
  20.580 -				break;
  20.581 -			}
  20.582 -			default:
  20.583 -			{
  20.584 -				count = mpc->mpc_length;
  20.585 -				break;
  20.586 -			}
  20.587 -		}
  20.588 -		++mpc_record;
  20.589 -	}
  20.590 -
  20.591 -	if (clustered_apic_mode){
  20.592 -		phys_cpu_present_map = logical_cpu_present_map;
  20.593 -	}
  20.594 -
  20.595 -
  20.596 -	printk("Enabling APIC mode: ");
  20.597 -	if(clustered_apic_mode == CLUSTERED_APIC_NUMAQ)
  20.598 -		printk("Clustered Logical.	");
  20.599 -	else if(clustered_apic_mode == CLUSTERED_APIC_XAPIC)
  20.600 -		printk("Physical.	");
  20.601 -	else
  20.602 -		printk("Flat.	");
  20.603 -	printk("Using %d I/O APICs\n",nr_ioapics);
  20.604 -
  20.605 -	if (!num_processors)
  20.606 -		printk(KERN_ERR "SMP mptable: no processors registered!\n");
  20.607 -	return num_processors;
  20.608 -}
  20.609 -
  20.610 -static int __init ELCR_trigger(unsigned int irq)
  20.611 -{
  20.612 -	unsigned int port;
  20.613 -
  20.614 -	port = 0x4d0 + (irq >> 3);
  20.615 -	return (inb(port) >> (irq & 7)) & 1;
  20.616 -}
  20.617 -
  20.618 -static void __init construct_default_ioirq_mptable(int mpc_default_type)
  20.619 -{
  20.620 -	struct mpc_config_intsrc intsrc;
  20.621 -	int i;
  20.622 -	int ELCR_fallback = 0;
  20.623 -
  20.624 -	intsrc.mpc_type = MP_INTSRC;
  20.625 -	intsrc.mpc_irqflag = 0;			/* conforming */
  20.626 -	intsrc.mpc_srcbus = 0;
  20.627 -	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
  20.628 -
  20.629 -	intsrc.mpc_irqtype = mp_INT;
  20.630 -
  20.631 -	/*
  20.632 -	 *  If true, we have an ISA/PCI system with no IRQ entries
  20.633 -	 *  in the MP table. To prevent the PCI interrupts from being set up
  20.634 -	 *  incorrectly, we try to use the ELCR. The sanity check to see if
  20.635 -	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
  20.636 -	 *  never be level sensitive, so we simply see if the ELCR agrees.
  20.637 -	 *  If it does, we assume it's valid.
  20.638 -	 */
  20.639 -	if (mpc_default_type == 5) {
  20.640 -		printk("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
  20.641 -
  20.642 -		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
  20.643 -			printk("ELCR contains invalid data... not using ELCR\n");
  20.644 -		else {
  20.645 -			printk("Using ELCR to identify PCI interrupts\n");
  20.646 -			ELCR_fallback = 1;
  20.647 -		}
  20.648 -	}
  20.649 -
  20.650 -	for (i = 0; i < 16; i++) {
  20.651 -		switch (mpc_default_type) {
  20.652 -		case 2:
  20.653 -			if (i == 0 || i == 13)
  20.654 -				continue;	/* IRQ0 & IRQ13 not connected */
  20.655 -			/* fall through */
  20.656 -		default:
  20.657 -			if (i == 2)
  20.658 -				continue;	/* IRQ2 is never connected */
  20.659 -		}
  20.660 -
  20.661 -		if (ELCR_fallback) {
  20.662 -			/*
  20.663 -			 *  If the ELCR indicates a level-sensitive interrupt, we
  20.664 -			 *  copy that information over to the MP table in the
  20.665 -			 *  irqflag field (level sensitive, active high polarity).
  20.666 -			 */
  20.667 -			if (ELCR_trigger(i))
  20.668 -				intsrc.mpc_irqflag = 13;
  20.669 -			else
  20.670 -				intsrc.mpc_irqflag = 0;
  20.671 -		}
  20.672 -
  20.673 -		intsrc.mpc_srcbusirq = i;
  20.674 -		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
  20.675 -		MP_intsrc_info(&intsrc);
  20.676 -	}
  20.677 -
  20.678 -	intsrc.mpc_irqtype = mp_ExtINT;
  20.679 -	intsrc.mpc_srcbusirq = 0;
  20.680 -	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
  20.681 -	MP_intsrc_info(&intsrc);
  20.682 -}
  20.683 -
  20.684 -static inline void __init construct_default_ISA_mptable(int mpc_default_type)
  20.685 -{
  20.686 -	struct mpc_config_processor processor;
  20.687 -	struct mpc_config_bus bus;
  20.688 -	struct mpc_config_ioapic ioapic;
  20.689 -	struct mpc_config_lintsrc lintsrc;
  20.690 -	int linttypes[2] = { mp_ExtINT, mp_NMI };
  20.691 -	int i;
  20.692 -	struct {
  20.693 -		int mp_bus_id_to_type[MAX_MP_BUSSES];
  20.694 -		int mp_bus_id_to_node[MAX_MP_BUSSES];
  20.695 -		int mp_bus_id_to_local[MAX_MP_BUSSES];
  20.696 -		int mp_bus_id_to_pci_bus[MAX_MP_BUSSES];
  20.697 -		struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
  20.698 -	} *bus_data;
  20.699 -
  20.700 -	bus_data = (void *)__get_free_pages(GFP_KERNEL, get_order(sizeof(*bus_data)));
  20.701 -	if (!bus_data)
  20.702 -		panic("SMP mptable: out of memory!\n");
  20.703 -	mp_bus_id_to_type = bus_data->mp_bus_id_to_type;
  20.704 -	mp_bus_id_to_node = bus_data->mp_bus_id_to_node;
  20.705 -	mp_bus_id_to_local = bus_data->mp_bus_id_to_local;
  20.706 -	mp_bus_id_to_pci_bus = bus_data->mp_bus_id_to_pci_bus;
  20.707 -	mp_irqs = bus_data->mp_irqs;
  20.708 -	for (i = 0; i < MAX_MP_BUSSES; ++i)
  20.709 -		mp_bus_id_to_pci_bus[i] = -1;
  20.710 -
  20.711 -	/*
  20.712 -	 * local APIC has default address
  20.713 -	 */
  20.714 -	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
  20.715 -
  20.716 -	/*
  20.717 -	 * 2 CPUs, numbered 0 & 1.
  20.718 -	 */
  20.719 -	processor.mpc_type = MP_PROCESSOR;
  20.720 -	/* Either an integrated APIC or a discrete 82489DX. */
  20.721 -	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
  20.722 -	processor.mpc_cpuflag = CPU_ENABLED;
  20.723 -	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
  20.724 -				   (boot_cpu_data.x86_model << 4) |
  20.725 -				   boot_cpu_data.x86_mask;
  20.726 -	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
  20.727 -	processor.mpc_reserved[0] = 0;
  20.728 -	processor.mpc_reserved[1] = 0;
  20.729 -	for (i = 0; i < 2; i++) {
  20.730 -		processor.mpc_apicid = i;
  20.731 -		MP_processor_info(&processor);
  20.732 -	}
  20.733 -
  20.734 -	bus.mpc_type = MP_BUS;
  20.735 -	bus.mpc_busid = 0;
  20.736 -	switch (mpc_default_type) {
  20.737 -		default:
  20.738 -			printk("???\nUnknown standard configuration %d\n",
  20.739 -				mpc_default_type);
  20.740 -			/* fall through */
  20.741 -		case 1:
  20.742 -		case 5:
  20.743 -			memcpy(bus.mpc_bustype, "ISA   ", 6);
  20.744 -			break;
  20.745 -		case 2:
  20.746 -		case 6:
  20.747 -		case 3:
  20.748 -			memcpy(bus.mpc_bustype, "EISA  ", 6);
  20.749 -			break;
  20.750 -		case 4:
  20.751 -		case 7:
  20.752 -			memcpy(bus.mpc_bustype, "MCA   ", 6);
  20.753 -	}
  20.754 -	MP_bus_info(&bus);
  20.755 -	if (mpc_default_type > 4) {
  20.756 -		bus.mpc_busid = 1;
  20.757 -		memcpy(bus.mpc_bustype, "PCI   ", 6);
  20.758 -		MP_bus_info(&bus);
  20.759 -	}
  20.760 -
  20.761 -	ioapic.mpc_type = MP_IOAPIC;
  20.762 -	ioapic.mpc_apicid = 2;
  20.763 -	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
  20.764 -	ioapic.mpc_flags = MPC_APIC_USABLE;
  20.765 -	ioapic.mpc_apicaddr = 0xFEC00000;
  20.766 -	MP_ioapic_info(&ioapic);
  20.767 -
  20.768 -	/*
  20.769 -	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
  20.770 -	 */
  20.771 -	construct_default_ioirq_mptable(mpc_default_type);
  20.772 -
  20.773 -	lintsrc.mpc_type = MP_LINTSRC;
  20.774 -	lintsrc.mpc_irqflag = 0;		/* conforming */
  20.775 -	lintsrc.mpc_srcbusid = 0;
  20.776 -	lintsrc.mpc_srcbusirq = 0;
  20.777 -	lintsrc.mpc_destapic = MP_APIC_ALL;
  20.778 -	for (i = 0; i < 2; i++) {
  20.779 -		lintsrc.mpc_irqtype = linttypes[i];
  20.780 -		lintsrc.mpc_destapiclint = i;
  20.781 -		MP_lintsrc_info(&lintsrc);
  20.782 -	}
  20.783 -}
  20.784 -
  20.785 -static struct intel_mp_floating *mpf_found;
  20.786 -
  20.787 -/*
  20.788 - * Scan the memory blocks for an SMP configuration block.
  20.789 - */
  20.790 -void __init get_smp_config (void)
  20.791 -{
  20.792 -	struct intel_mp_floating *mpf = mpf_found;
  20.793 -
  20.794 -	/*
  20.795 -	 * ACPI may be used to obtain the entire SMP configuration or just to 
  20.796 -	 * enumerate/configure processors (CONFIG_ACPI_HT_ONLY).  Note that 
  20.797 -	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
  20.798 -	 * processors, where MPS only supports physical.
  20.799 -	 */
  20.800 -	if (acpi_lapic && acpi_ioapic) {
  20.801 -		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
  20.802 -		return;
  20.803 -	}
  20.804 -	else if (acpi_lapic)
  20.805 -		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
  20.806 -
  20.807 -	printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
  20.808 -	if (mpf->mpf_feature2 & (1<<7)) {
  20.809 -		printk("    IMCR and PIC compatibility mode.\n");
  20.810 -		pic_mode = 1;
  20.811 -	} else {
  20.812 -		printk("    Virtual Wire compatibility mode.\n");
  20.813 -		pic_mode = 0;
  20.814 -	}
  20.815 -
  20.816 -	/*
  20.817 -	 * Now see if we need to read further.
  20.818 -	 */
  20.819 -	if (mpf->mpf_feature1 != 0) {
  20.820 -
  20.821 -		printk("Default MP configuration #%d\n", mpf->mpf_feature1);
  20.822 -		construct_default_ISA_mptable(mpf->mpf_feature1);
  20.823 -
  20.824 -	} else if (mpf->mpf_physptr) {
  20.825 -
  20.826 -		/*
  20.827 -		 * Read the physical hardware table.  Anything here will
  20.828 -		 * override the defaults.
  20.829 -		 */
  20.830 -		if (!smp_read_mpc((void *)mpf->mpf_physptr)) {
  20.831 -			smp_found_config = 0;
  20.832 -			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
  20.833 -			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
  20.834 -			return;
  20.835 -		}
  20.836 -		/*
  20.837 -		 * If there are no explicit MP IRQ entries, then we are
  20.838 -		 * broken.  We set up most of the low 16 IO-APIC pins to
  20.839 -		 * ISA defaults and hope it will work.
  20.840 -		 */
  20.841 -		if (!mp_irq_entries) {
  20.842 -			struct mpc_config_bus bus;
  20.843 -
  20.844 -			printk("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
  20.845 -
  20.846 -			bus.mpc_type = MP_BUS;
  20.847 -			bus.mpc_busid = 0;
  20.848 -			memcpy(bus.mpc_bustype, "ISA   ", 6);
  20.849 -			MP_bus_info(&bus);
  20.850 -
  20.851 -			construct_default_ioirq_mptable(0);
  20.852 -		}
  20.853 -
  20.854 -	} else
  20.855 -		BUG();
  20.856 -
  20.857 -	printk("Processors: %d\n", num_processors);
  20.858 -	/*
  20.859 -	 * Only use the first configuration found.
  20.860 -	 */
  20.861 -}
  20.862 -
  20.863 -static int __init smp_scan_config (unsigned long base, unsigned long length)
  20.864 -{
  20.865 -	unsigned long *bp = phys_to_virt(base);
  20.866 -	struct intel_mp_floating *mpf;
  20.867 -
  20.868 -	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
  20.869 -	if (sizeof(*mpf) != 16)
  20.870 -		printk("Error: MPF size\n");
  20.871 -
  20.872 -	while (length > 0) {
  20.873 -		mpf = (struct intel_mp_floating *)bp;
  20.874 -		if ((*bp == SMP_MAGIC_IDENT) &&
  20.875 -			(mpf->mpf_length == 1) &&
  20.876 -			!mpf_checksum((unsigned char *)bp, 16) &&
  20.877 -			((mpf->mpf_specification == 1)
  20.878 -				|| (mpf->mpf_specification == 4)) ) {
  20.879 -
  20.880 -			smp_found_config = 1;
  20.881 -			printk("found SMP MP-table at %08lx\n",
  20.882 -						virt_to_phys(mpf));
  20.883 -			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
  20.884 -			if (mpf->mpf_physptr)
  20.885 -				reserve_bootmem(mpf->mpf_physptr, PAGE_SIZE);
  20.886 -			mpf_found = mpf;
  20.887 -			return 1;
  20.888 -		}
  20.889 -		bp += 4;
  20.890 -		length -= 16;
  20.891 -	}
  20.892 -	return 0;
  20.893 -}
  20.894 -
  20.895 -void __init find_intel_smp (void)
  20.896 -{
  20.897 -	unsigned int address;
  20.898 -
  20.899 -	/*
  20.900 -	 * FIXME: Linux assumes you have 640K of base ram..
  20.901 -	 * this continues the error...
  20.902 -	 *
  20.903 -	 * 1) Scan the bottom 1K for a signature
  20.904 -	 * 2) Scan the top 1K of base RAM
  20.905 -	 * 3) Scan the 64K of bios
  20.906 -	 */
  20.907 -	if (smp_scan_config(0x0,0x400) ||
  20.908 -		smp_scan_config(639*0x400,0x400) ||
  20.909 -			smp_scan_config(0xF0000,0x10000))
  20.910 -		return;
  20.911 -	/*
  20.912 -	 * If it is an SMP machine we should know now, unless the
  20.913 -	 * configuration is in an EISA/MCA bus machine with an
  20.914 -	 * extended bios data area.
  20.915 -	 *
  20.916 -	 * there is a real-mode segmented pointer pointing to the
  20.917 -	 * 4K EBDA area at 0x40E, calculate and scan it here.
  20.918 -	 *
  20.919 -	 * NOTE! There were Linux loaders that will corrupt the EBDA
  20.920 -	 * area, and as such this kind of SMP config may be less
  20.921 -	 * trustworthy, simply because the SMP table may have been
  20.922 -	 * stomped on during early boot.  Thankfully the bootloaders
  20.923 -	 * now honour the EBDA.
  20.924 -	 */
  20.925 -
  20.926 -	address = *(unsigned short *)phys_to_virt(0x40E);
  20.927 -	address <<= 4;
  20.928 -	smp_scan_config(address, 0x1000);
  20.929 -}
  20.930 -
  20.931 -#else
  20.932 -
  20.933 -/*
  20.934 - * The Visual Workstation is Intel MP compliant in the hardware
  20.935 - * sense, but it doesn't have a BIOS(-configuration table).
  20.936 - * No problem for Linux.
  20.937 - */
  20.938 -void __init find_visws_smp(void)
  20.939 -{
  20.940 -	smp_found_config = 1;
  20.941 -
  20.942 -	phys_cpu_present_map |= 2; /* or in id 1 */
  20.943 -	apic_version[1] |= 0x10; /* integrated APIC */
  20.944 -	apic_version[0] |= 0x10;
  20.945 -
  20.946 -	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
  20.947 -}
  20.948 -
  20.949 -#endif
  20.950 -
  20.951 -/*
  20.952 - * - Intel MP Configuration Table
  20.953 - * - or SGI Visual Workstation configuration
  20.954 - */
  20.955 -void __init find_smp_config (void)
  20.956 -{
  20.957 -#ifdef CONFIG_X86_LOCAL_APIC
  20.958 -	find_intel_smp();
  20.959 -#endif
  20.960 -#ifdef CONFIG_VISWS
  20.961 -	find_visws_smp();
  20.962 -#endif
  20.963 -}
  20.964 -
  20.965 -
  20.966 -/* --------------------------------------------------------------------------
  20.967 -                            ACPI-based MP Configuration
  20.968 -   -------------------------------------------------------------------------- */
  20.969 -
  20.970 -#ifdef CONFIG_ACPI_BOOT
  20.971 -
  20.972 -void __init mp_register_lapic_address (
  20.973 -	u64			address)
  20.974 -{
  20.975 -	mp_lapic_addr = (unsigned long) address;
  20.976 -
  20.977 -	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
  20.978 -
  20.979 -	if (boot_cpu_physical_apicid == -1U)
  20.980 -		boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
  20.981 -
  20.982 -	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
  20.983 -}
  20.984 -
  20.985 -
  20.986 -void __init mp_register_lapic (
  20.987 -	u8			id, 
  20.988 -	u8			enabled)
  20.989 -{
  20.990 -	struct mpc_config_processor processor;
  20.991 -	int			boot_cpu = 0;
  20.992 -	
  20.993 -	if (id >= MAX_APICS) {
  20.994 -		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
  20.995 -			id, MAX_APICS);
  20.996 -		return;
  20.997 -	}
  20.998 -
  20.999 -	if (id == boot_cpu_physical_apicid)
 20.1000 -		boot_cpu = 1;
 20.1001 -
 20.1002 -	processor.mpc_type = MP_PROCESSOR;
 20.1003 -	processor.mpc_apicid = id;
 20.1004 -
 20.1005 -	/*
 20.1006 -	 * mp_register_lapic_address() which is called before the
 20.1007 -	 * current function does the fixmap of FIX_APIC_BASE.
 20.1008 -	 * Read in the correct APIC version from there
 20.1009 -	 */
 20.1010 -	processor.mpc_apicver = apic_read(APIC_LVR);
 20.1011 -
 20.1012 -	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
 20.1013 -	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
 20.1014 -	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 
 20.1015 -		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
 20.1016 -	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
 20.1017 -	processor.mpc_reserved[0] = 0;
 20.1018 -	processor.mpc_reserved[1] = 0;
 20.1019 -
 20.1020 -	MP_processor_info(&processor);
 20.1021 -}
 20.1022 -
 20.1023 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
 20.1024 -
 20.1025 -#define MP_ISA_BUS		0
 20.1026 -#define MP_MAX_IOAPIC_PIN	127
 20.1027 -
 20.1028 -struct mp_ioapic_routing {
 20.1029 -	int			apic_id;
 20.1030 -	int			irq_start;
 20.1031 -	int			irq_end;
 20.1032 -	u32			pin_programmed[4];
 20.1033 -} mp_ioapic_routing[MAX_IO_APICS];
 20.1034 -
 20.1035 -
 20.1036 -static int __init mp_find_ioapic (
 20.1037 -	int			irq)
 20.1038 -{
 20.1039 -	int			i = 0;
 20.1040 -
 20.1041 -	/* Find the IOAPIC that manages this IRQ. */
 20.1042 -	for (i = 0; i < nr_ioapics; i++) {
 20.1043 -		if ((irq >= mp_ioapic_routing[i].irq_start)
 20.1044 -			&& (irq <= mp_ioapic_routing[i].irq_end))
 20.1045 -			return i;
 20.1046 -	}
 20.1047 -
 20.1048 -	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for IRQ %d\n", irq);
 20.1049 -
 20.1050 -	return -1;
 20.1051 -}
 20.1052 -	
 20.1053 -
 20.1054 -void __init mp_register_ioapic (
 20.1055 -	u8			id, 
 20.1056 -	u32			address,
 20.1057 -	u32			irq_base)
 20.1058 -{
 20.1059 -	int			idx = 0;
 20.1060 -
 20.1061 -	if (nr_ioapics >= MAX_IO_APICS) {
 20.1062 -		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
 20.1063 -			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
 20.1064 -		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
 20.1065 -	}
 20.1066 -	if (!address) {
 20.1067 -		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
 20.1068 -			" found in MADT table, skipping!\n");
 20.1069 -		return;
 20.1070 -	}
 20.1071 -
 20.1072 -	idx = nr_ioapics++;
 20.1073 -
 20.1074 -	mp_ioapics[idx].mpc_type = MP_IOAPIC;
 20.1075 -	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
 20.1076 -	mp_ioapics[idx].mpc_apicaddr = address;
 20.1077 -
 20.1078 -	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
 20.1079 -	mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
 20.1080 -	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
 20.1081 -	
 20.1082 -	/* 
 20.1083 -	 * Build basic IRQ lookup table to facilitate irq->io_apic lookups
 20.1084 -	 * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
 20.1085 -	 */
 20.1086 -	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
 20.1087 -	mp_ioapic_routing[idx].irq_start = irq_base;
 20.1088 -	mp_ioapic_routing[idx].irq_end = irq_base + 
 20.1089 -		io_apic_get_redir_entries(idx);
 20.1090 -
 20.1091 -	printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
 20.1092 -		"IRQ %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
 20.1093 -		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
 20.1094 -		mp_ioapic_routing[idx].irq_start,
 20.1095 -		mp_ioapic_routing[idx].irq_end);
 20.1096 -
 20.1097 -	return;
 20.1098 -}
 20.1099 -
 20.1100 -
 20.1101 -void __init mp_override_legacy_irq (
 20.1102 -	u8			bus_irq,
 20.1103 -	u8			polarity, 
 20.1104 -	u8			trigger, 
 20.1105 -	u32			global_irq)
 20.1106 -{
 20.1107 -	struct mpc_config_intsrc intsrc;
 20.1108 -	int			i = 0;
 20.1109 -	int			found = 0;
 20.1110 -	int			ioapic = -1;
 20.1111 -	int			pin = -1;
 20.1112 -
 20.1113 -	/* 
 20.1114 -	 * Convert 'global_irq' to 'ioapic.pin'.
 20.1115 -	 */
 20.1116 -	ioapic = mp_find_ioapic(global_irq);
 20.1117 -	if (ioapic < 0)
 20.1118 -		return;
 20.1119 -	pin = global_irq - mp_ioapic_routing[ioapic].irq_start;
 20.1120 -
 20.1121 -	/*
 20.1122 -	 * TBD: This check is for faulty timer entries, where the override
 20.1123 -	 *      erroneously sets the trigger to level, resulting in a HUGE 
 20.1124 -	 *      increase of timer interrupts!
 20.1125 -	 */
 20.1126 -	if ((bus_irq == 0) && (global_irq == 2) && (trigger == 3))
 20.1127 -		trigger = 1;
 20.1128 -
 20.1129 -	intsrc.mpc_type = MP_INTSRC;
 20.1130 -	intsrc.mpc_irqtype = mp_INT;
 20.1131 -	intsrc.mpc_irqflag = (trigger << 2) | polarity;
 20.1132 -	intsrc.mpc_srcbus = MP_ISA_BUS;
 20.1133 -	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
 20.1134 -	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
 20.1135 -	intsrc.mpc_dstirq = pin;				    /* INTIN# */
 20.1136 -
 20.1137 -	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
 20.1138 -		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
 20.1139 -		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
 20.1140 -		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
 20.1141 -
 20.1142 -	/* 
 20.1143 -	 * If an existing [IOAPIC.PIN -> IRQ] routing entry exists we override it.
 20.1144 -	 * Otherwise create a new entry (e.g. global_irq == 2).
 20.1145 -	 */
 20.1146 -	for (i = 0; i < mp_irq_entries; i++) {
 20.1147 -		if ((mp_irqs[i].mpc_dstapic == intsrc.mpc_dstapic) 
 20.1148 -			&& (mp_irqs[i].mpc_srcbusirq == intsrc.mpc_srcbusirq)) {
 20.1149 -			mp_irqs[i] = intsrc;
 20.1150 -			found = 1;
 20.1151 -			break;
 20.1152 -		}
 20.1153 -	}
 20.1154 -	if (!found) {
 20.1155 -		mp_irqs[mp_irq_entries] = intsrc;
 20.1156 -		if (++mp_irq_entries == MAX_IRQ_SOURCES)
 20.1157 -			panic("Max # of irq sources exceeded!\n");
 20.1158 -	}
 20.1159 -
 20.1160 -	return;
 20.1161 -}
 20.1162 -
 20.1163 -
 20.1164 -void __init mp_config_acpi_legacy_irqs (void)
 20.1165 -{
 20.1166 -	int			i = 0;
 20.1167 -	int			ioapic = -1;
 20.1168 -
 20.1169 -	/*
 20.1170 -	 * Initialize mp_irqs for IRQ configuration.
 20.1171 -	 */
 20.1172 -	unsigned char *bus_data;
 20.1173 -	int count;
 20.1174 -
 20.1175 -	count = (MAX_MP_BUSSES * sizeof(int)) * 4;
 20.1176 -	count += (MAX_IRQ_SOURCES * sizeof(int)) * 4;
 20.1177 -	bus_data = (void *)__get_free_pages(GFP_KERNEL, get_order(count));
 20.1178 -	if (!bus_data) {
 20.1179 -		panic("Fatal: can't allocate bus memory for ACPI legacy IRQ!");
 20.1180 -	}
 20.1181 -	mp_bus_id_to_type = (int *)&bus_data[0];
 20.1182 -	mp_bus_id_to_node = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int))];
 20.1183 -	mp_bus_id_to_local = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 2];
 20.1184 -	mp_bus_id_to_pci_bus = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 3];
 20.1185 -	mp_irqs = (struct mpc_config_intsrc *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 4];
 20.1186 -	for (i = 0; i < MAX_MP_BUSSES; ++i)
 20.1187 -	  mp_bus_id_to_pci_bus[i] = -1;
 20.1188 -
 20.1189 -	/* 
 20.1190 -	 * Fabricate the legacy ISA bus (bus #31).
 20.1191 -	 */
 20.1192 -	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
 20.1193 -	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
 20.1194 -
 20.1195 -	/* 
 20.1196 -	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
 20.1197 -	 */
 20.1198 -	ioapic = mp_find_ioapic(0);
 20.1199 -	if (ioapic < 0)
 20.1200 -		return;
 20.1201 -
 20.1202 -	/* 
 20.1203 -	 * Use the default configuration for the IRQs 0-15.  These may be
 20.1204 -	 * overriden by (MADT) interrupt source override entries.
 20.1205 -	 */
 20.1206 -	for (i = 0; i < 16; i++) {
 20.1207 -
 20.1208 -		if (i == 2) continue;			/* Don't connect IRQ2 */
 20.1209 -
 20.1210 -		mp_irqs[mp_irq_entries].mpc_type = MP_INTSRC;
 20.1211 -		mp_irqs[mp_irq_entries].mpc_irqflag = 0;	/* Conforming */
 20.1212 -		mp_irqs[mp_irq_entries].mpc_srcbus = MP_ISA_BUS;
 20.1213 -		mp_irqs[mp_irq_entries].mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
 20.1214 -		mp_irqs[mp_irq_entries].mpc_irqtype = i ? mp_INT : mp_ExtINT;   /* 8259A to #0 */
 20.1215 -		mp_irqs[mp_irq_entries].mpc_srcbusirq = i;	   /* Identity mapped */
 20.1216 -		mp_irqs[mp_irq_entries].mpc_dstirq = i;
 20.1217 -
 20.1218 -		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
 20.1219 -			"%d-%d\n", 
 20.1220 -			mp_irqs[mp_irq_entries].mpc_irqtype, 
 20.1221 -			mp_irqs[mp_irq_entries].mpc_irqflag & 3, 
 20.1222 -			(mp_irqs[mp_irq_entries].mpc_irqflag >> 2) & 3, 
 20.1223 -			mp_irqs[mp_irq_entries].mpc_srcbus, 
 20.1224 -			mp_irqs[mp_irq_entries].mpc_srcbusirq, 
 20.1225 -			mp_irqs[mp_irq_entries].mpc_dstapic, 
 20.1226 -			mp_irqs[mp_irq_entries].mpc_dstirq);
 20.1227 -
 20.1228 -		if (++mp_irq_entries == MAX_IRQ_SOURCES)
 20.1229 -			panic("Max # of irq sources exceeded!\n");
 20.1230 -	}
 20.1231 -}
 20.1232 -
 20.1233 -/*extern FADT_DESCRIPTOR acpi_fadt;*/
 20.1234 -
 20.1235 -void __init mp_config_ioapic_for_sci(int irq)
 20.1236 -{
 20.1237 -	int ioapic;
 20.1238 -	int ioapic_pin;
 20.1239 -	struct acpi_table_madt* madt;
 20.1240 -	struct acpi_table_int_src_ovr *entry = NULL;
 20.1241 -	acpi_interrupt_flags flags;
 20.1242 -	void *madt_end;
 20.1243 -	acpi_status status;
 20.1244 -
 20.1245 -	/*
 20.1246 -	 * Ensure that if there is an interrupt source override entry
 20.1247 -	 * for the ACPI SCI, we leave it as is. Unfortunately this involves
 20.1248 -	 * walking the MADT again.
 20.1249 -	 */
 20.1250 -	status = acpi_get_firmware_table("APIC", 1, ACPI_LOGICAL_ADDRESSING,
 20.1251 -		(struct acpi_table_header **) &madt);
 20.1252 -	if (ACPI_SUCCESS(status)) {
 20.1253 -		madt_end = (void *) (unsigned long)madt + madt->header.length;
 20.1254 -
 20.1255 -		entry = (struct acpi_table_int_src_ovr *)
 20.1256 -                ((unsigned long) madt + sizeof(struct acpi_table_madt));
 20.1257 -
 20.1258 -		while ((void *) entry < madt_end) {
 20.1259 -                	if (entry->header.type == ACPI_MADT_INT_SRC_OVR &&
 20.1260 -			    acpi_fadt.sci_int == entry->bus_irq)
 20.1261 -				goto found;
 20.1262 -			
 20.1263 -                	entry = (struct acpi_table_int_src_ovr *)
 20.1264 -                	        ((unsigned long) entry + entry->header.length);
 20.1265 -        	}
 20.1266 -	}
 20.1267 -	/*
 20.1268 -	 * Although the ACPI spec says that the SCI should be level/low
 20.1269 -	 * don't reprogram it unless there is an explicit MADT OVR entry
 20.1270 -	 * instructing us to do so -- otherwise we break Tyan boards which
 20.1271 -	 * have the SCI wired edge/high but no MADT OVR.
 20.1272 -	 */
 20.1273 -	return;
 20.1274 -
 20.1275 -found:
 20.1276 -	/*
 20.1277 -	 * See the note at the end of ACPI 2.0b section
 20.1278 -	 * 5.2.10.8 for what this is about.
 20.1279 -	 */
 20.1280 -	flags = entry->flags;
 20.1281 -	acpi_fadt.sci_int = entry->global_irq;
 20.1282 -	irq = entry->global_irq;
 20.1283 -	
 20.1284 -	ioapic = mp_find_ioapic(irq);
 20.1285 -
 20.1286 -	ioapic_pin = irq - mp_ioapic_routing[ioapic].irq_start;
 20.1287 -
 20.1288 -	/*
 20.1289 -	 * MPS INTI flags:
 20.1290 -	 *  trigger: 0=default, 1=edge, 3=level
 20.1291 -	 *  polarity: 0=default, 1=high, 3=low
 20.1292 -	 * Per ACPI spec, default for SCI means level/low.
 20.1293 -	 */
 20.1294 -	io_apic_set_pci_routing(ioapic, ioapic_pin, irq, 
 20.1295 -		(flags.trigger == 1 ? 0 : 1), (flags.polarity == 1 ? 0 : 1));
 20.1296 -}
 20.1297 -
 20.1298 -
 20.1299 -#ifdef CONFIG_ACPI_PCI
 20.1300 -
 20.1301 -void __init mp_parse_prt (void)
 20.1302 -{
 20.1303 -	struct list_head	*node = NULL;
 20.1304 -	struct acpi_prt_entry	*entry = NULL;
 20.1305 -	int			ioapic = -1;
 20.1306 -	int			ioapic_pin = 0;
 20.1307 -	int			irq = 0;
 20.1308 -	int			idx, bit = 0;
 20.1309 -	int			edge_level = 0;
 20.1310 -	int			active_high_low = 0;
 20.1311 -
 20.1312 -	/*
 20.1313 -	 * Parsing through the PCI Interrupt Routing Table (PRT) and program
 20.1314 -	 * routing for all entries.
 20.1315 -	 */
 20.1316 -	list_for_each(node, &acpi_prt.entries) {
 20.1317 -		entry = list_entry(node, struct acpi_prt_entry, node);
 20.1318 -
 20.1319 -		/* Need to get irq for dynamic entry */
 20.1320 -		if (entry->link.handle) {
 20.1321 -			irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index, &edge_level, &active_high_low);
 20.1322 -			if (!irq)
 20.1323 -				continue;
 20.1324 -		}
 20.1325 -		else {
 20.1326 -			/* Hardwired IRQ. Assume PCI standard settings */
 20.1327 -			irq = entry->link.index;
 20.1328 -			edge_level = 1;
 20.1329 -			active_high_low = 1;
 20.1330 -		}
 20.1331 -
 20.1332 -		/* Don't set up the ACPI SCI because it's already set up */
 20.1333 -                if (acpi_fadt.sci_int == irq) {
 20.1334 -                        entry->irq = irq; /*we still need to set entry's irq*/
 20.1335 -			continue;
 20.1336 -                }
 20.1337 -	
 20.1338 -		ioapic = mp_find_ioapic(irq);
 20.1339 -		if (ioapic < 0)
 20.1340 -			continue;
 20.1341 -		ioapic_pin = irq - mp_ioapic_routing[ioapic].irq_start;
 20.1342 -
 20.1343 -		/* 
 20.1344 -		 * Avoid pin reprogramming.  PRTs typically include entries  
 20.1345 -		 * with redundant pin->irq mappings (but unique PCI devices);
 20.1346 -		 * we only only program the IOAPIC on the first.
 20.1347 -		 */
 20.1348 -		bit = ioapic_pin % 32;
 20.1349 -		idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
 20.1350 -		if (idx > 3) {
 20.1351 -			printk(KERN_ERR "Invalid reference to IOAPIC pin "
 20.1352 -				"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
 20.1353 -				ioapic_pin);
 20.1354 -			continue;
 20.1355 -		}
 20.1356 -		if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
 20.1357 -			printk(KERN_DEBUG "Pin %d-%d already programmed\n",
 20.1358 -				mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
 20.1359 -			entry->irq = irq;
 20.1360 -			continue;
 20.1361 -		}
 20.1362 -
 20.1363 -		mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
 20.1364 -
 20.1365 -		if (!io_apic_set_pci_routing(ioapic, ioapic_pin, irq, edge_level, active_high_low))
 20.1366 -			entry->irq = irq;
 20.1367 -
 20.1368 -		printk(KERN_DEBUG "%02x:%02x:%02x[%c] -> %d-%d -> IRQ %d\n",
 20.1369 -			entry->id.segment, entry->id.bus, 
 20.1370 -			entry->id.device, ('A' + entry->pin), 
 20.1371 -			mp_ioapic_routing[ioapic].apic_id, ioapic_pin, 
 20.1372 -			entry->irq);
 20.1373 -	}
 20.1374 -	
 20.1375 -	print_IO_APIC();
 20.1376 -
 20.1377 -	return;
 20.1378 -}
 20.1379 -
 20.1380 -#endif /*CONFIG_ACPI_PCI*/
 20.1381 -
 20.1382 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
 20.1383 -
 20.1384 -#endif /*CONFIG_ACPI*/
    21.1 --- a/xen/arch/i386/nmi.c	Thu Jun 10 14:24:30 2004 +0000
    21.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.3 @@ -1,324 +0,0 @@
    21.4 -/*
    21.5 - *  linux/arch/i386/nmi.c
    21.6 - *
    21.7 - *  NMI watchdog support on APIC systems
    21.8 - *
    21.9 - *  Started by Ingo Molnar <mingo@redhat.com>
   21.10 - *
   21.11 - *  Fixes:
   21.12 - *  Mikael Pettersson	: AMD K7 support for local APIC NMI watchdog.
   21.13 - *  Mikael Pettersson	: Power Management for local APIC NMI watchdog.
   21.14 - *  Mikael Pettersson	: Pentium 4 support for local APIC NMI watchdog.
   21.15 - *  Keir Fraser         : Pentium 4 Hyperthreading support
   21.16 - */
   21.17 -
   21.18 -#include <xen/config.h>
   21.19 -#include <xen/init.h>
   21.20 -#include <xen/lib.h>
   21.21 -#include <xen/mm.h>
   21.22 -#include <xen/irq.h>
   21.23 -#include <xen/delay.h>
   21.24 -#include <xen/interrupt.h>
   21.25 -#include <xen/time.h>
   21.26 -#include <xen/timex.h>
   21.27 -#include <xen/sched.h>
   21.28 -
   21.29 -#include <asm/mc146818rtc.h>
   21.30 -#include <asm/smp.h>
   21.31 -#include <asm/msr.h>
   21.32 -#include <asm/mpspec.h>
   21.33 -
   21.34 -unsigned int nmi_watchdog = NMI_NONE;
   21.35 -unsigned int watchdog_on = 0;
   21.36 -static unsigned int nmi_hz = HZ;
   21.37 -unsigned int nmi_perfctr_msr;	/* the MSR to reset in NMI handler */
   21.38 -extern void show_registers(struct pt_regs *regs);
   21.39 -
   21.40 -extern int logical_proc_id[];
   21.41 -
   21.42 -#define K7_EVNTSEL_ENABLE	(1 << 22)
   21.43 -#define K7_EVNTSEL_INT		(1 << 20)
   21.44 -#define K7_EVNTSEL_OS		(1 << 17)
   21.45 -#define K7_EVNTSEL_USR		(1 << 16)
   21.46 -#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING	0x76
   21.47 -#define K7_NMI_EVENT		K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
   21.48 -
   21.49 -#define P6_EVNTSEL0_ENABLE	(1 << 22)
   21.50 -#define P6_EVNTSEL_INT		(1 << 20)
   21.51 -#define P6_EVNTSEL_OS		(1 << 17)
   21.52 -#define P6_EVNTSEL_USR		(1 << 16)
   21.53 -#define P6_EVENT_CPU_CLOCKS_NOT_HALTED	0x79
   21.54 -#define P6_NMI_EVENT		P6_EVENT_CPU_CLOCKS_NOT_HALTED
   21.55 -
   21.56 -#define MSR_P4_MISC_ENABLE	0x1A0
   21.57 -#define MSR_P4_MISC_ENABLE_PERF_AVAIL	(1<<7)
   21.58 -#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL	(1<<12)
   21.59 -#define MSR_P4_PERFCTR0		0x300
   21.60 -#define MSR_P4_CCCR0		0x360
   21.61 -#define P4_ESCR_EVENT_SELECT(N)	((N)<<25)
   21.62 -#define P4_ESCR_OS0		(1<<3)
   21.63 -#define P4_ESCR_USR0		(1<<2)
   21.64 -#define P4_ESCR_OS1		(1<<1)
   21.65 -#define P4_ESCR_USR1		(1<<0)
   21.66 -#define P4_CCCR_OVF_PMI0	(1<<26)
   21.67 -#define P4_CCCR_OVF_PMI1	(1<<27)
   21.68 -#define P4_CCCR_THRESHOLD(N)	((N)<<20)
   21.69 -#define P4_CCCR_COMPLEMENT	(1<<19)
   21.70 -#define P4_CCCR_COMPARE		(1<<18)
   21.71 -#define P4_CCCR_REQUIRED	(3<<16)
   21.72 -#define P4_CCCR_ESCR_SELECT(N)	((N)<<13)
   21.73 -#define P4_CCCR_ENABLE		(1<<12)
   21.74 -/* 
   21.75 - * Set up IQ_COUNTER{0,1} to behave like a clock, by having IQ_CCCR{0,1} filter
   21.76 - * CRU_ESCR0 (with any non-null event selector) through a complemented
   21.77 - * max threshold. [IA32-Vol3, Section 14.9.9] 
   21.78 - */
   21.79 -#define MSR_P4_IQ_COUNTER0	0x30C
   21.80 -#define MSR_P4_IQ_COUNTER1	0x30D
   21.81 -#define MSR_P4_IQ_CCCR0		0x36C
   21.82 -#define MSR_P4_IQ_CCCR1		0x36D
   21.83 -#define MSR_P4_CRU_ESCR0	0x3B8 /* ESCR no. 4 */
   21.84 -#define P4_NMI_CRU_ESCR0 \
   21.85 -    (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS0|P4_ESCR_USR0| \
   21.86 -     P4_ESCR_OS1|P4_ESCR_USR1)
   21.87 -#define P4_NMI_IQ_CCCR0	\
   21.88 -    (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
   21.89 -     P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
   21.90 -#define P4_NMI_IQ_CCCR1	\
   21.91 -    (P4_CCCR_OVF_PMI1|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT|	\
   21.92 -     P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
   21.93 -
   21.94 -int __init check_nmi_watchdog (void)
   21.95 -{
   21.96 -    unsigned int prev_nmi_count[NR_CPUS];
   21.97 -    int j, cpu;
   21.98 -    
   21.99 -    if ( !nmi_watchdog )
  21.100 -        return 0;
  21.101 -
  21.102 -    printk("Testing NMI watchdog --- ");
  21.103 -
  21.104 -    for ( j = 0; j < smp_num_cpus; j++ ) 
  21.105 -    {
  21.106 -        cpu = cpu_logical_map(j);
  21.107 -        prev_nmi_count[cpu] = irq_stat[cpu].__nmi_count;
  21.108 -    }
  21.109 -    sti();
  21.110 -    mdelay((10*1000)/nmi_hz); /* wait 10 ticks */
  21.111 -
  21.112 -    for ( j = 0; j < smp_num_cpus; j++ ) 
  21.113 -    {
  21.114 -        cpu = cpu_logical_map(j);
  21.115 -        if ( nmi_count(cpu) - prev_nmi_count[cpu] <= 5 )
  21.116 -            printk("CPU#%d stuck. ", cpu);
  21.117 -        else
  21.118 -            printk("CPU#%d okay. ", cpu);
  21.119 -    }
  21.120 -
  21.121 -    printk("\n");
  21.122 -
  21.123 -    /* now that we know it works we can reduce NMI frequency to
  21.124 -       something more reasonable; makes a difference in some configs */
  21.125 -    if ( nmi_watchdog == NMI_LOCAL_APIC )
  21.126 -        nmi_hz = 1;
  21.127 -
  21.128 -    return 0;
  21.129 -}
  21.130 -
  21.131 -static inline void nmi_pm_init(void) { }
  21.132 -#define __pminit	__init
  21.133 -
  21.134 -/*
  21.135 - * Activate the NMI watchdog via the local APIC.
  21.136 - * Original code written by Keith Owens.
  21.137 - */
  21.138 -
  21.139 -static void __pminit clear_msr_range(unsigned int base, unsigned int n)
  21.140 -{
  21.141 -    unsigned int i;
  21.142 -    for ( i = 0; i < n; i++ )
  21.143 -        wrmsr(base+i, 0, 0);
  21.144 -}
  21.145 -
  21.146 -static void __pminit setup_k7_watchdog(void)
  21.147 -{
  21.148 -    unsigned int evntsel;
  21.149 -
  21.150 -    nmi_perfctr_msr = MSR_K7_PERFCTR0;
  21.151 -
  21.152 -    clear_msr_range(MSR_K7_EVNTSEL0, 4);
  21.153 -    clear_msr_range(MSR_K7_PERFCTR0, 4);
  21.154 -
  21.155 -    evntsel = K7_EVNTSEL_INT
  21.156 -        | K7_EVNTSEL_OS
  21.157 -        | K7_EVNTSEL_USR
  21.158 -        | K7_NMI_EVENT;
  21.159 -
  21.160 -    wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
  21.161 -    Dprintk("setting K7_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
  21.162 -    wrmsr(MSR_K7_PERFCTR0, -(cpu_khz/nmi_hz*1000), -1);
  21.163 -    apic_write(APIC_LVTPC, APIC_DM_NMI);
  21.164 -    evntsel |= K7_EVNTSEL_ENABLE;
  21.165 -    wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
  21.166 -}
  21.167 -
  21.168 -static void __pminit setup_p6_watchdog(void)
  21.169 -{
  21.170 -    unsigned int evntsel;
  21.171 -
  21.172 -    nmi_perfctr_msr = MSR_P6_PERFCTR0;
  21.173 -
  21.174 -    clear_msr_range(MSR_P6_EVNTSEL0, 2);
  21.175 -    clear_msr_range(MSR_P6_PERFCTR0, 2);
  21.176 -
  21.177 -    evntsel = P6_EVNTSEL_INT
  21.178 -        | P6_EVNTSEL_OS
  21.179 -        | P6_EVNTSEL_USR
  21.180 -        | P6_NMI_EVENT;
  21.181 -
  21.182 -    wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
  21.183 -    Dprintk("setting P6_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
  21.184 -    wrmsr(MSR_P6_PERFCTR0, -(cpu_khz/nmi_hz*1000), 0);
  21.185 -    apic_write(APIC_LVTPC, APIC_DM_NMI);
  21.186 -    evntsel |= P6_EVNTSEL0_ENABLE;
  21.187 -    wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
  21.188 -}
  21.189 -
  21.190 -static int __pminit setup_p4_watchdog(void)
  21.191 -{
  21.192 -    unsigned int misc_enable, dummy;
  21.193 -
  21.194 -    rdmsr(MSR_P4_MISC_ENABLE, misc_enable, dummy);
  21.195 -    if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
  21.196 -        return 0;
  21.197 -
  21.198 -    nmi_perfctr_msr = MSR_P4_IQ_COUNTER0;
  21.199 -
  21.200 -    if ( logical_proc_id[smp_processor_id()] == 0 )
  21.201 -    {
  21.202 -        if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL))
  21.203 -            clear_msr_range(0x3F1, 2);
  21.204 -        /* MSR 0x3F0 seems to have a default value of 0xFC00, but current
  21.205 -           docs doesn't fully define it, so leave it alone for now. */
  21.206 -        clear_msr_range(0x3A0, 31);
  21.207 -        clear_msr_range(0x3C0, 6);
  21.208 -        clear_msr_range(0x3C8, 6);
  21.209 -        clear_msr_range(0x3E0, 2);
  21.210 -        clear_msr_range(MSR_P4_CCCR0, 18);
  21.211 -        clear_msr_range(MSR_P4_PERFCTR0, 18);
  21.212 -        
  21.213 -        wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
  21.214 -        wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
  21.215 -        Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000));
  21.216 -        wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1);
  21.217 -        apic_write(APIC_LVTPC, APIC_DM_NMI);
  21.218 -        wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0, 0);
  21.219 -    }
  21.220 -    else if ( logical_proc_id[smp_processor_id()] == 1 )
  21.221 -    {
  21.222 -        wrmsr(MSR_P4_IQ_CCCR1, P4_NMI_IQ_CCCR1 & ~P4_CCCR_ENABLE, 0);
  21.223 -        Dprintk("setting P4_IQ_COUNTER2 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000));
  21.224 -        wrmsr(MSR_P4_IQ_COUNTER1, -(cpu_khz/nmi_hz*1000), -1);
  21.225 -        apic_write(APIC_LVTPC, APIC_DM_NMI);
  21.226 -        wrmsr(MSR_P4_IQ_CCCR1, P4_NMI_IQ_CCCR1, 0);        
  21.227 -    }
  21.228 -    else
  21.229 -    {
  21.230 -        return 0;
  21.231 -    }
  21.232 -
  21.233 -    return 1;
  21.234 -}
  21.235 -
  21.236 -void __pminit setup_apic_nmi_watchdog(void)
  21.237 -{
  21.238 -    if (!nmi_watchdog)
  21.239 -        return;
  21.240 -
  21.241 -    switch (boot_cpu_data.x86_vendor) {
  21.242 -    case X86_VENDOR_AMD:
  21.243 -        if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15)
  21.244 -            return;
  21.245 -        setup_k7_watchdog();
  21.246 -        break;
  21.247 -    case X86_VENDOR_INTEL:
  21.248 -        switch (boot_cpu_data.x86) {
  21.249 -        case 6:
  21.250 -            setup_p6_watchdog();
  21.251 -            break;
  21.252 -        case 15:
  21.253 -            if (!setup_p4_watchdog())
  21.254 -                return;
  21.255 -            break;
  21.256 -        default:
  21.257 -            return;
  21.258 -        }
  21.259 -        break;
  21.260 -    default:
  21.261 -        return;
  21.262 -    }
  21.263 -    nmi_pm_init();
  21.264 -}
  21.265 -
  21.266 -
  21.267 -static unsigned int
  21.268 -last_irq_sums [NR_CPUS],
  21.269 -    alert_counter [NR_CPUS];
  21.270 -
  21.271 -void touch_nmi_watchdog (void)
  21.272 -{
  21.273 -    int i;
  21.274 -    for (i = 0; i < smp_num_cpus; i++)
  21.275 -        alert_counter[i] = 0;
  21.276 -}
  21.277 -
  21.278 -void nmi_watchdog_tick (struct pt_regs * regs)
  21.279 -{
  21.280 -    extern spinlock_t console_lock;
  21.281 -    extern void die(const char * str, struct pt_regs * regs, long err);
  21.282 -
  21.283 -    int sum, cpu = smp_processor_id();
  21.284 -
  21.285 -    sum = apic_timer_irqs[cpu];
  21.286 -
  21.287 -    if ( (last_irq_sums[cpu] == sum) && watchdog_on )
  21.288 -    {
  21.289 -        /*
  21.290 -         * Ayiee, looks like this CPU is stuck ... wait a few IRQs (5 seconds) 
  21.291 -         * before doing the oops ...
  21.292 -         */
  21.293 -        alert_counter[cpu]++;
  21.294 -        if (alert_counter[cpu] == 5*nmi_hz) {
  21.295 -            console_lock = SPIN_LOCK_UNLOCKED;
  21.296 -            die("NMI Watchdog detected LOCKUP on CPU", regs, cpu);
  21.297 -        }
  21.298 -    } 
  21.299 -    else 
  21.300 -    {
  21.301 -        last_irq_sums[cpu] = sum;
  21.302 -        alert_counter[cpu] = 0;
  21.303 -    }
  21.304 -
  21.305 -    if ( nmi_perfctr_msr )
  21.306 -    {
  21.307 -        if ( nmi_perfctr_msr == MSR_P4_IQ_COUNTER0 )
  21.308 -        {
  21.309 -            if ( logical_proc_id[cpu] == 0 )
  21.310 -            {
  21.311 -                wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0, 0);
  21.312 -                apic_write(APIC_LVTPC, APIC_DM_NMI);
  21.313 -                wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1);
  21.314 -            }
  21.315 -            else
  21.316 -            {
  21.317 -                wrmsr(MSR_P4_IQ_CCCR1, P4_NMI_IQ_CCCR1, 0);
  21.318 -                apic_write(APIC_LVTPC, APIC_DM_NMI);
  21.319 -                wrmsr(MSR_P4_IQ_COUNTER1, -(cpu_khz/nmi_hz*1000), -1);
  21.320 -            }
  21.321 -        }
  21.322 -        else
  21.323 -        {
  21.324 -            wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
  21.325 -        }
  21.326 -    }
  21.327 -}
    22.1 --- a/xen/arch/i386/pci-dma.c	Thu Jun 10 14:24:30 2004 +0000
    22.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.3 @@ -1,37 +0,0 @@
    22.4 -/*
    22.5 - * Dynamic DMA mapping support.
    22.6 - *
    22.7 - * On i386 there is no hardware dynamic DMA address translation,
    22.8 - * so consistent alloc/free are merely page allocation/freeing.
    22.9 - * The rest of the dynamic DMA mapping interface is implemented
   22.10 - * in asm/pci.h.
   22.11 - */
   22.12 -
   22.13 -#include <xen/types.h>
   22.14 -#include <xen/mm.h>
   22.15 -#include <xen/lib.h>
   22.16 -#include <xen/pci.h>
   22.17 -#include <asm/io.h>
   22.18 -
   22.19 -void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
   22.20 -			   dma_addr_t *dma_handle)
   22.21 -{
   22.22 -	void *ret;
   22.23 -	int gfp = GFP_ATOMIC;
   22.24 -
   22.25 -	if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
   22.26 -		gfp |= GFP_DMA;
   22.27 -	ret = (void *)__get_free_pages(gfp, get_order(size));
   22.28 -
   22.29 -	if (ret != NULL) {
   22.30 -		memset(ret, 0, size);
   22.31 -		*dma_handle = virt_to_bus(ret);
   22.32 -	}
   22.33 -	return ret;
   22.34 -}
   22.35 -
   22.36 -void pci_free_consistent(struct pci_dev *hwdev, size_t size,
   22.37 -			 void *vaddr, dma_addr_t dma_handle)
   22.38 -{
   22.39 -	free_pages((unsigned long)vaddr, get_order(size));
   22.40 -}
    23.1 --- a/xen/arch/i386/pci-i386.c	Thu Jun 10 14:24:30 2004 +0000
    23.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.3 @@ -1,402 +0,0 @@
    23.4 -/*
    23.5 - *	Low-Level PCI Access for i386 machines
    23.6 - *
    23.7 - * Copyright 1993, 1994 Drew Eckhardt
    23.8 - *      Visionary Computing
    23.9 - *      (Unix and Linux consulting and custom programming)
   23.10 - *      Drew@Colorado.EDU
   23.11 - *      +1 (303) 786-7975
   23.12 - *
   23.13 - * Drew's work was sponsored by:
   23.14 - *	iX Multiuser Multitasking Magazine
   23.15 - *	Hannover, Germany
   23.16 - *	hm@ix.de
   23.17 - *
   23.18 - * Copyright 1997--2000 Martin Mares <mj@ucw.cz>
   23.19 - *
   23.20 - * For more information, please consult the following manuals (look at
   23.21 - * http://www.pcisig.com/ for how to get them):
   23.22 - *
   23.23 - * PCI BIOS Specification
   23.24 - * PCI Local Bus Specification
   23.25 - * PCI to PCI Bridge Specification
   23.26 - * PCI System Design Guide
   23.27 - *
   23.28 - *
   23.29 - * CHANGELOG :
   23.30 - * Jun 17, 1994 : Modified to accommodate the broken pre-PCI BIOS SPECIFICATION
   23.31 - *	Revision 2.0 present on <thys@dennis.ee.up.ac.za>'s ASUS mainboard.
   23.32 - *
   23.33 - * Jan 5,  1995 : Modified to probe PCI hardware at boot time by Frederic
   23.34 - *     Potter, potter@cao-vlsi.ibp.fr
   23.35 - *
   23.36 - * Jan 10, 1995 : Modified to store the information about configured pci
   23.37 - *      devices into a list, which can be accessed via /proc/pci by
   23.38 - *      Curtis Varner, cvarner@cs.ucr.edu
   23.39 - *
   23.40 - * Jan 12, 1995 : CPU-PCI bridge optimization support by Frederic Potter.
   23.41 - *	Alpha version. Intel & UMC chipset support only.
   23.42 - *
   23.43 - * Apr 16, 1995 : Source merge with the DEC Alpha PCI support. Most of the code
   23.44 - *	moved to drivers/pci/pci.c.
   23.45 - *
   23.46 - * Dec 7, 1996  : Added support for direct configuration access of boards
   23.47 - *      with Intel compatible access schemes (tsbogend@alpha.franken.de)
   23.48 - *
   23.49 - * Feb 3, 1997  : Set internal functions to static, save/restore flags
   23.50 - *	avoid dead locks reading broken PCI BIOS, werner@suse.de 
   23.51 - *
   23.52 - * Apr 26, 1997 : Fixed case when there is BIOS32, but not PCI BIOS
   23.53 - *	(mj@atrey.karlin.mff.cuni.cz)
   23.54 - *
   23.55 - * May 7,  1997 : Added some missing cli()'s. [mj]
   23.56 - * 
   23.57 - * Jun 20, 1997 : Corrected problems in "conf1" type accesses.
   23.58 - *      (paubert@iram.es)
   23.59 - *
   23.60 - * Aug 2,  1997 : Split to PCI BIOS handling and direct PCI access parts
   23.61 - *	and cleaned it up...     Martin Mares <mj@atrey.karlin.mff.cuni.cz>
   23.62 - *
   23.63 - * Feb 6,  1998 : No longer using BIOS to find devices and device classes. [mj]
   23.64 - *
   23.65 - * May 1,  1998 : Support for peer host bridges. [mj]
   23.66 - *
   23.67 - * Jun 19, 1998 : Changed to use spinlocks, so that PCI configuration space
   23.68 - *	can be accessed from interrupts even on SMP systems. [mj]
   23.69 - *
   23.70 - * August  1998 : Better support for peer host bridges and more paranoid
   23.71 - *	checks for direct hardware access. Ugh, this file starts to look as
   23.72 - *	a large gallery of common hardware bug workarounds (watch the comments)
   23.73 - *	-- the PCI specs themselves are sane, but most implementors should be
   23.74 - *	hit hard with \hammer scaled \magstep5. [mj]
   23.75 - *
   23.76 - * Jan 23, 1999 : More improvements to peer host bridge logic. i450NX fixup. [mj]
   23.77 - *
   23.78 - * Feb 8,  1999 : Added UM8886BF I/O address fixup. [mj]
   23.79 - *
   23.80 - * August  1999 : New resource management and configuration access stuff. [mj]
   23.81 - *
   23.82 - * Sep 19, 1999 : Use PCI IRQ routing tables for detection of peer host bridges.
   23.83 - *		  Based on ideas by Chris Frantz and David Hinds. [mj]
   23.84 - *
   23.85 - * Sep 28, 1999 : Handle unreported/unassigned IRQs. Thanks to Shuu Yamaguchi
   23.86 - *		  for a lot of patience during testing. [mj]
   23.87 - *
   23.88 - * Oct  8, 1999 : Split to pci-i386.c, pci-pc.c and pci-visws.c. [mj]
   23.89 - */
   23.90 -
   23.91 -#include <xen/types.h>
   23.92 -#include <xen/lib.h>
   23.93 -#include <xen/pci.h>
   23.94 -#include <xen/init.h>
   23.95 -#include <xen/ioport.h>
   23.96 -#include <xen/errno.h>
   23.97 -
   23.98 -#include "pci-i386.h"
   23.99 -
  23.100 -void
  23.101 -pcibios_update_resource(struct pci_dev *dev, struct resource *root,
  23.102 -			struct resource *res, int resource)
  23.103 -{
  23.104 -	u32 new, check;
  23.105 -	int reg;
  23.106 -
  23.107 -	new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
  23.108 -	if (resource < 6) {
  23.109 -		reg = PCI_BASE_ADDRESS_0 + 4*resource;
  23.110 -	} else if (resource == PCI_ROM_RESOURCE) {
  23.111 -		res->flags |= PCI_ROM_ADDRESS_ENABLE;
  23.112 -		new |= PCI_ROM_ADDRESS_ENABLE;
  23.113 -		reg = dev->rom_base_reg;
  23.114 -	} else {
  23.115 -		/* Somebody might have asked allocation of a non-standard resource */
  23.116 -		return;
  23.117 -	}
  23.118 -	
  23.119 -	pci_write_config_dword(dev, reg, new);
  23.120 -	pci_read_config_dword(dev, reg, &check);
  23.121 -	if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
  23.122 -		printk(KERN_ERR "PCI: Error while updating region "
  23.123 -		       "%s/%d (%08x != %08x)\n", dev->slot_name, resource,
  23.124 -		       new, check);
  23.125 -	}
  23.126 -}
  23.127 -
  23.128 -/*
  23.129 - * We need to avoid collisions with `mirrored' VGA ports
  23.130 - * and other strange ISA hardware, so we always want the
  23.131 - * addresses to be allocated in the 0x000-0x0ff region
  23.132 - * modulo 0x400.
  23.133 - *
  23.134 - * Why? Because some silly external IO cards only decode
  23.135 - * the low 10 bits of the IO address. The 0x00-0xff region
  23.136 - * is reserved for motherboard devices that decode all 16
  23.137 - * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
  23.138 - * but we want to try to avoid allocating at 0x2900-0x2bff
  23.139 - * which might have be mirrored at 0x0100-0x03ff..
  23.140 - */
  23.141 -void
  23.142 -pcibios_align_resource(void *data, struct resource *res,
  23.143 -		       unsigned long size, unsigned long align)
  23.144 -{
  23.145 -	if (res->flags & IORESOURCE_IO) {
  23.146 -		unsigned long start = res->start;
  23.147 -
  23.148 -		if (start & 0x300) {
  23.149 -			start = (start + 0x3ff) & ~0x3ff;
  23.150 -			res->start = start;
  23.151 -		}
  23.152 -	}
  23.153 -}
  23.154 -
  23.155 -
  23.156 -/*
  23.157 - *  Handle resources of PCI devices.  If the world were perfect, we could
  23.158 - *  just allocate all the resource regions and do nothing more.  It isn't.
  23.159 - *  On the other hand, we cannot just re-allocate all devices, as it would
  23.160 - *  require us to know lots of host bridge internals.  So we attempt to
  23.161 - *  keep as much of the original configuration as possible, but tweak it
  23.162 - *  when it's found to be wrong.
  23.163 - *
  23.164 - *  Known BIOS problems we have to work around:
  23.165 - *	- I/O or memory regions not configured
  23.166 - *	- regions configured, but not enabled in the command register
  23.167 - *	- bogus I/O addresses above 64K used
  23.168 - *	- expansion ROMs left enabled (this may sound harmless, but given
  23.169 - *	  the fact the PCI specs explicitly allow address decoders to be
  23.170 - *	  shared between expansion ROMs and other resource regions, it's
  23.171 - *	  at least dangerous)
  23.172 - *
  23.173 - *  Our solution:
  23.174 - *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
  23.175 - *	    This gives us fixed barriers on where we can allocate.
  23.176 - *	(2) Allocate resources for all enabled devices.  If there is
  23.177 - *	    a collision, just mark the resource as unallocated. Also
  23.178 - *	    disable expansion ROMs during this step.
  23.179 - *	(3) Try to allocate resources for disabled devices.  If the
  23.180 - *	    resources were assigned correctly, everything goes well,
  23.181 - *	    if they weren't, they won't disturb allocation of other
  23.182 - *	    resources.
  23.183 - *	(4) Assign new addresses to resources which were either
  23.184 - *	    not configured at all or misconfigured.  If explicitly
  23.185 - *	    requested by the user, configure expansion ROM address
  23.186 - *	    as well.
  23.187 - */
  23.188 -
  23.189 -static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
  23.190 -{
  23.191 -	struct list_head *ln;
  23.192 -	struct pci_bus *bus;
  23.193 -	struct pci_dev *dev;
  23.194 -	int idx;
  23.195 -	struct resource *r, *pr;
  23.196 -
  23.197 -	/* Depth-First Search on bus tree */
  23.198 -	for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
  23.199 -		bus = pci_bus_b(ln);
  23.200 -		if ((dev = bus->self)) {
  23.201 -			for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
  23.202 -				r = &dev->resource[idx];
  23.203 -				if (!r->start)
  23.204 -					continue;
  23.205 -				pr = pci_find_parent_resource(dev, r);
  23.206 -				if (!pr || request_resource(pr, r) < 0)
  23.207 -					printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, dev->slot_name);
  23.208 -			}
  23.209 -		}
  23.210 -		pcibios_allocate_bus_resources(&bus->children);
  23.211 -	}
  23.212 -}
  23.213 -
  23.214 -static void __init pcibios_allocate_resources(int pass)
  23.215 -{
  23.216 -	struct pci_dev *dev;
  23.217 -	int idx, disabled;
  23.218 -	u16 command;
  23.219 -	struct resource *r, *pr;
  23.220 -
  23.221 -	pci_for_each_dev(dev) {
  23.222 -		pci_read_config_word(dev, PCI_COMMAND, &command);
  23.223 -		for(idx = 0; idx < 6; idx++) {
  23.224 -			r = &dev->resource[idx];
  23.225 -			if (r->parent)		/* Already allocated */
  23.226 -				continue;
  23.227 -			if (!r->start)		/* Address not assigned at all */
  23.228 -				continue;
  23.229 -			if (r->flags & IORESOURCE_IO)
  23.230 -				disabled = !(command & PCI_COMMAND_IO);
  23.231 -			else
  23.232 -				disabled = !(command & PCI_COMMAND_MEMORY);
  23.233 -			if (pass == disabled) {
  23.234 -				DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
  23.235 -				    r->start, r->end, r->flags, disabled, pass);
  23.236 -				pr = pci_find_parent_resource(dev, r);
  23.237 -				if (!pr || request_resource(pr, r) < 0) {
  23.238 -					printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, dev->slot_name);
  23.239 -					/* We'll assign a new address later */
  23.240 -					r->end -= r->start;
  23.241 -					r->start = 0;
  23.242 -				}
  23.243 -			}
  23.244 -		}
  23.245 -		if (!pass) {
  23.246 -			r = &dev->resource[PCI_ROM_RESOURCE];
  23.247 -			if (r->flags & PCI_ROM_ADDRESS_ENABLE) {
  23.248 -				/* Turn the ROM off, leave the resource region, but keep it unregistered. */
  23.249 -				u32 reg;
  23.250 -				DBG("PCI: Switching off ROM of %s\n", dev->slot_name);
  23.251 -				r->flags &= ~PCI_ROM_ADDRESS_ENABLE;
  23.252 -				pci_read_config_dword(dev, dev->rom_base_reg, &reg);
  23.253 -				pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE);
  23.254 -			}
  23.255 -		}
  23.256 -	}
  23.257 -}
  23.258 -
  23.259 -static void __init pcibios_assign_resources(void)
  23.260 -{
  23.261 -	struct pci_dev *dev;
  23.262 -	int idx;
  23.263 -	struct resource *r;
  23.264 -
  23.265 -	pci_for_each_dev(dev) {
  23.266 -		int class = dev->class >> 8;
  23.267 -
  23.268 -		/* Don't touch classless devices and host bridges */
  23.269 -		if (!class || class == PCI_CLASS_BRIDGE_HOST)
  23.270 -			continue;
  23.271 -
  23.272 -		for(idx=0; idx<6; idx++) {
  23.273 -			r = &dev->resource[idx];
  23.274 -
  23.275 -			/*
  23.276 -			 *  Don't touch IDE controllers and I/O ports of video cards!
  23.277 -			 */
  23.278 -			if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) ||
  23.279 -			    (class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO)))
  23.280 -				continue;
  23.281 -
  23.282 -			/*
  23.283 -			 *  We shall assign a new address to this resource, either because
  23.284 -			 *  the BIOS forgot to do so or because we have decided the old
  23.285 -			 *  address was unusable for some reason.
  23.286 -			 */
  23.287 -			if (!r->start && r->end)
  23.288 -				pci_assign_resource(dev, idx);
  23.289 -		}
  23.290 -
  23.291 -		if (pci_probe & PCI_ASSIGN_ROMS) {
  23.292 -			r = &dev->resource[PCI_ROM_RESOURCE];
  23.293 -			r->end -= r->start;
  23.294 -			r->start = 0;
  23.295 -			if (r->end)
  23.296 -				pci_assign_resource(dev, PCI_ROM_RESOURCE);
  23.297 -		}
  23.298 -	}
  23.299 -}
  23.300 -
  23.301 -void __init pcibios_set_cacheline_size(void)
  23.302 -{
  23.303 -	struct cpuinfo_x86 *c = &boot_cpu_data;
  23.304 -
  23.305 -	pci_cache_line_size = 32 >> 2;
  23.306 -	if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
  23.307 -		pci_cache_line_size = 64 >> 2;	/* K7 & K8 */
  23.308 -	else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
  23.309 -		pci_cache_line_size = 128 >> 2;	/* P4 */
  23.310 -}
  23.311 -
  23.312 -void __init pcibios_resource_survey(void)
  23.313 -{
  23.314 -	DBG("PCI: Allocating resources\n");
  23.315 -	pcibios_allocate_bus_resources(&pci_root_buses);
  23.316 -	pcibios_allocate_resources(0);
  23.317 -	pcibios_allocate_resources(1);
  23.318 -	pcibios_assign_resources();
  23.319 -}
  23.320 -
  23.321 -int pcibios_enable_resources(struct pci_dev *dev, int mask)
  23.322 -{
  23.323 -	u16 cmd, old_cmd;
  23.324 -	int idx;
  23.325 -	struct resource *r;
  23.326 -
  23.327 -	pci_read_config_word(dev, PCI_COMMAND, &cmd);
  23.328 -	old_cmd = cmd;
  23.329 -	for(idx=0; idx<6; idx++) {
  23.330 -		/* Only set up the requested stuff */
  23.331 -		if (!(mask & (1<<idx)))
  23.332 -			continue;
  23.333 -			
  23.334 -		r = &dev->resource[idx];
  23.335 -		if (!r->start && r->end) {
  23.336 -			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", dev->slot_name);
  23.337 -			return -EINVAL;
  23.338 -		}
  23.339 -		if (r->flags & IORESOURCE_IO)
  23.340 -			cmd |= PCI_COMMAND_IO;
  23.341 -		if (r->flags & IORESOURCE_MEM)
  23.342 -			cmd |= PCI_COMMAND_MEMORY;
  23.343 -	}
  23.344 -	if (dev->resource[PCI_ROM_RESOURCE].start)
  23.345 -		cmd |= PCI_COMMAND_MEMORY;
  23.346 -	if (cmd != old_cmd) {
  23.347 -		printk("PCI: Enabling device %s (%04x -> %04x)\n", dev->slot_name, old_cmd, cmd);
  23.348 -		pci_write_config_word(dev, PCI_COMMAND, cmd);
  23.349 -	}
  23.350 -	return 0;
  23.351 -}
  23.352 -
  23.353 -/*
  23.354 - *  If we set up a device for bus mastering, we need to check the latency
  23.355 - *  timer as certain crappy BIOSes forget to set it properly.
  23.356 - */
  23.357 -unsigned int pcibios_max_latency = 255;
  23.358 -
  23.359 -void pcibios_set_master(struct pci_dev *dev)
  23.360 -{
  23.361 -	u8 lat;
  23.362 -	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
  23.363 -	if (lat < 16)
  23.364 -		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
  23.365 -	else if (lat > pcibios_max_latency)
  23.366 -		lat = pcibios_max_latency;
  23.367 -	else
  23.368 -		return;
  23.369 -	printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", dev->slot_name, lat);
  23.370 -	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
  23.371 -}
  23.372 -
  23.373 -#if 0
  23.374 -int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  23.375 -			enum pci_mmap_state mmap_state, int write_combine)
  23.376 -{
  23.377 -	unsigned long prot;
  23.378 -
  23.379 -	/* I/O space cannot be accessed via normal processor loads and
  23.380 -	 * stores on this platform.
  23.381 -	 */
  23.382 -	if (mmap_state == pci_mmap_io)
  23.383 -		return -EINVAL;
  23.384 -
  23.385 -	/* Leave vm_pgoff as-is, the PCI space address is the physical
  23.386 -	 * address on this platform.
  23.387 -	 */
  23.388 -	vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
  23.389 -
  23.390 -	prot = pgprot_val(vma->vm_page_prot);
  23.391 -	if (boot_cpu_data.x86 > 3)
  23.392 -		prot |= _PAGE_PCD | _PAGE_PWT;
  23.393 -	vma->vm_page_prot = __pgprot(prot);
  23.394 -
  23.395 -	/* Write-combine setting is ignored, it is changed via the mtrr
  23.396 -	 * interfaces on this platform.
  23.397 -	 */
  23.398 -	if (remap_page_range(vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
  23.399 -			     vma->vm_end - vma->vm_start,
  23.400 -			     vma->vm_page_prot))
  23.401 -		return -EAGAIN;
  23.402 -
  23.403 -	return 0;
  23.404 -}
  23.405 -#endif
    24.1 --- a/xen/arch/i386/pci-i386.h	Thu Jun 10 14:24:30 2004 +0000
    24.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.3 @@ -1,71 +0,0 @@
    24.4 -/*
    24.5 - *	Low-Level PCI Access for i386 machines.
    24.6 - *
    24.7 - *	(c) 1999 Martin Mares <mj@ucw.cz>
    24.8 - */
    24.9 -
   24.10 -#undef DEBUG
   24.11 -
   24.12 -#ifdef DEBUG
   24.13 -#define DBG(x...) printk(x)
   24.14 -#else
   24.15 -#define DBG(x...)
   24.16 -#endif
   24.17 -
   24.18 -#define PCI_PROBE_BIOS		0x0001
   24.19 -#define PCI_PROBE_CONF1		0x0002
   24.20 -#define PCI_PROBE_CONF2		0x0004
   24.21 -#define PCI_NO_SORT		0x0100
   24.22 -#define PCI_BIOS_SORT		0x0200
   24.23 -#define PCI_NO_CHECKS		0x0400
   24.24 -#define PCI_ASSIGN_ROMS		0x1000
   24.25 -#define PCI_BIOS_IRQ_SCAN	0x2000
   24.26 -#define PCI_ASSIGN_ALL_BUSSES	0x4000
   24.27 -
   24.28 -extern unsigned int pci_probe;
   24.29 -
   24.30 -/* pci-i386.c */
   24.31 -
   24.32 -extern unsigned int pcibios_max_latency;
   24.33 -extern u8 pci_cache_line_size;
   24.34 -
   24.35 -void pcibios_resource_survey(void);
   24.36 -void pcibios_set_cacheline_size(void);
   24.37 -int pcibios_enable_resources(struct pci_dev *, int);
   24.38 -
   24.39 -/* pci-pc.c */
   24.40 -
   24.41 -extern int pcibios_last_bus;
   24.42 -extern struct pci_bus *pci_root_bus;
   24.43 -extern struct pci_ops *pci_root_ops;
   24.44 -
   24.45 -/* pci-irq.c */
   24.46 -
   24.47 -struct irq_info {
   24.48 -	u8 bus, devfn;			/* Bus, device and function */
   24.49 -	struct {
   24.50 -		u8 link;		/* IRQ line ID, chipset dependent, 0=not routed */
   24.51 -		u16 bitmap;		/* Available IRQs */
   24.52 -	} __attribute__((packed)) irq[4];
   24.53 -	u8 slot;			/* Slot number, 0=onboard */
   24.54 -	u8 rfu;
   24.55 -} __attribute__((packed));
   24.56 -
   24.57 -struct irq_routing_table {
   24.58 -	u32 signature;			/* PIRQ_SIGNATURE should be here */
   24.59 -	u16 version;			/* PIRQ_VERSION */
   24.60 -	u16 size;			/* Table size in bytes */
   24.61 -	u8 rtr_bus, rtr_devfn;		/* Where the interrupt router lies */
   24.62 -	u16 exclusive_irqs;		/* IRQs devoted exclusively to PCI usage */
   24.63 -	u16 rtr_vendor, rtr_device;	/* Vendor and device ID of interrupt router */
   24.64 -	u32 miniport_data;		/* Crap */
   24.65 -	u8 rfu[11];
   24.66 -	u8 checksum;			/* Modulo 256 checksum must give zero */
   24.67 -	struct irq_info slots[0];
   24.68 -} __attribute__((packed));
   24.69 -
   24.70 -extern unsigned int pcibios_irq_mask;
   24.71 -
   24.72 -void pcibios_irq_init(void);
   24.73 -void pcibios_fixup_irqs(void);
   24.74 -void pcibios_enable_irq(struct pci_dev *dev);
    25.1 --- a/xen/arch/i386/pci-irq.c	Thu Jun 10 14:24:30 2004 +0000
    25.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.3 @@ -1,1092 +0,0 @@
    25.4 -/*
    25.5 - *	Low-Level PCI Support for PC -- Routing of Interrupts
    25.6 - *
    25.7 - *	(c) 1999--2000 Martin Mares <mj@ucw.cz>
    25.8 - */
    25.9 -
   25.10 -#include <xen/config.h>
   25.11 -#include <xen/types.h>
   25.12 -#include <xen/kernel.h>
   25.13 -#include <xen/pci.h>
   25.14 -#include <xen/init.h>
   25.15 -#include <xen/slab.h>
   25.16 -#include <xen/interrupt.h>
   25.17 -#include <xen/irq.h>
   25.18 -
   25.19 -#include <asm/io.h>
   25.20 -#include <asm/smp.h>
   25.21 -#include <asm/io_apic.h>
   25.22 -
   25.23 -#include "pci-i386.h"
   25.24 -
   25.25 -#define PIRQ_SIGNATURE	(('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
   25.26 -#define PIRQ_VERSION 0x0100
   25.27 -
   25.28 -int broken_hp_bios_irq9;
   25.29 -
   25.30 -static struct irq_routing_table *pirq_table;
   25.31 -
   25.32 -/*
   25.33 - * Never use: 0, 1, 2 (timer, keyboard, and cascade)
   25.34 - * Avoid using: 13, 14 and 15 (FP error and IDE).
   25.35 - * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
   25.36 - */
   25.37 -unsigned int pcibios_irq_mask = 0xfff8;
   25.38 -
   25.39 -static int pirq_penalty[16] = {
   25.40 -	1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
   25.41 -	0, 0, 0, 0, 1000, 100000, 100000, 100000
   25.42 -};
   25.43 -
   25.44 -struct irq_router {
   25.45 -	char *name;
   25.46 -	u16 vendor, device;
   25.47 -	int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
   25.48 -	int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
   25.49 -};
   25.50 -
   25.51 -struct irq_router_handler {
   25.52 -	u16 vendor;
   25.53 -	int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
   25.54 -};
   25.55 -
   25.56 -/*
   25.57 - *  Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
   25.58 - */
   25.59 -
   25.60 -static struct irq_routing_table * __init pirq_find_routing_table(void)
   25.61 -{
   25.62 -	u8 *addr;
   25.63 -	struct irq_routing_table *rt;
   25.64 -	int i;
   25.65 -	u8 sum;
   25.66 -
   25.67 -	for(addr = (u8 *) __va(0xf0000); addr < (u8 *) __va(0x100000); addr += 16) {
   25.68 -		rt = (struct irq_routing_table *) addr;
   25.69 -		if (rt->signature != PIRQ_SIGNATURE ||
   25.70 -		    rt->version != PIRQ_VERSION ||
   25.71 -		    rt->size % 16 ||
   25.72 -		    rt->size < sizeof(struct irq_routing_table))
   25.73 -			continue;
   25.74 -		sum = 0;
   25.75 -		for(i=0; i<rt->size; i++)
   25.76 -			sum += addr[i];
   25.77 -		if (!sum) {
   25.78 -			DBG("PCI: Interrupt Routing Table found at 0x%p\n", rt);
   25.79 -			return rt;
   25.80 -		}
   25.81 -	}
   25.82 -	return NULL;
   25.83 -}
   25.84 -
   25.85 -/*
   25.86 - *  If we have a IRQ routing table, use it to search for peer host
   25.87 - *  bridges.  It's a gross hack, but since there are no other known
   25.88 - *  ways how to get a list of buses, we have to go this way.
   25.89 - */
   25.90 -
   25.91 -static void __init pirq_peer_trick(void)
   25.92 -{
   25.93 -	struct irq_routing_table *rt = pirq_table;
   25.94 -	u8 busmap[256];
   25.95 -	int i;
   25.96 -	struct irq_info *e;
   25.97 -
   25.98 -	memset(busmap, 0, sizeof(busmap));
   25.99 -	for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
  25.100 -		e = &rt->slots[i];
  25.101 -#ifdef DEBUG
  25.102 -		{
  25.103 -			int j;
  25.104 -			DBG("%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
  25.105 -			for(j=0; j<4; j++)
  25.106 -				DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
  25.107 -			DBG("\n");
  25.108 -		}
  25.109 -#endif
  25.110 -		busmap[e->bus] = 1;
  25.111 -	}
  25.112 -	for(i=1; i<256; i++)
  25.113 -		/*
  25.114 -		 *  It might be a secondary bus, but in this case its parent is already
  25.115 -		 *  known (ascending bus order) and therefore pci_scan_bus returns immediately.
  25.116 -		 */
  25.117 -		if (busmap[i] && pci_scan_bus(i, pci_root_bus->ops, NULL))
  25.118 -			printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
  25.119 -	pcibios_last_bus = -1;
  25.120 -}
  25.121 -
  25.122 -/*
  25.123 - *  Code for querying and setting of IRQ routes on various interrupt routers.
  25.124 - */
  25.125 -
  25.126 -void eisa_set_level_irq(unsigned int irq)
  25.127 -{
  25.128 -	unsigned char mask = 1 << (irq & 7);
  25.129 -	unsigned int port = 0x4d0 + (irq >> 3);
  25.130 -	unsigned char val = inb(port);
  25.131 -
  25.132 -	if (!(val & mask)) {
  25.133 -		DBG(" -> edge");
  25.134 -		outb(val | mask, port);
  25.135 -	}
  25.136 -}
  25.137 -
  25.138 -/*
  25.139 - * Common IRQ routing practice: nybbles in config space,
  25.140 - * offset by some magic constant.
  25.141 - */
  25.142 -static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
  25.143 -{
  25.144 -	u8 x;
  25.145 -	unsigned reg = offset + (nr >> 1);
  25.146 -
  25.147 -	pci_read_config_byte(router, reg, &x);
  25.148 -	return (nr & 1) ? (x >> 4) : (x & 0xf);
  25.149 -}
  25.150 -
  25.151 -static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
  25.152 -{
  25.153 -	u8 x;
  25.154 -	unsigned reg = offset + (nr >> 1);
  25.155 -
  25.156 -	pci_read_config_byte(router, reg, &x);
  25.157 -	x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
  25.158 -	pci_write_config_byte(router, reg, x);
  25.159 -}
  25.160 -
  25.161 -/*
  25.162 - * ALI pirq entries are damn ugly, and completely undocumented.
  25.163 - * This has been figured out from pirq tables, and it's not a pretty
  25.164 - * picture.
  25.165 - */
  25.166 -static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  25.167 -{
  25.168 -	static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
  25.169 -
  25.170 -	return irqmap[read_config_nybble(router, 0x48, pirq-1)];
  25.171 -}
  25.172 -
  25.173 -static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  25.174 -{
  25.175 -	static unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
  25.176 -	unsigned int val = irqmap[irq];
  25.177 -		
  25.178 -	if (val) {
  25.179 -		write_config_nybble(router, 0x48, pirq-1, val);
  25.180 -		return 1;
  25.181 -	}
  25.182 -	return 0;
  25.183 -}
  25.184 -
  25.185 -/*
  25.186 - * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
  25.187 - * just a pointer to the config space.
  25.188 - */
  25.189 -static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  25.190 -{
  25.191 -	u8 x;
  25.192 -
  25.193 -	pci_read_config_byte(router, pirq, &x);
  25.194 -	return (x < 16) ? x : 0;
  25.195 -}
  25.196 -
  25.197 -static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  25.198 -{
  25.199 -	pci_write_config_byte(router, pirq, irq);
  25.200 -	return 1;
  25.201 -}
  25.202 -
  25.203 -/*
  25.204 - * The VIA pirq rules are nibble-based, like ALI,
  25.205 - * but without the ugly irq number munging.
  25.206 - * However, PIRQD is in the upper instead of lower nibble.
  25.207 - */
  25.208 -static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  25.209 -{
  25.210 -	return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
  25.211 -}
  25.212 -
  25.213 -static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  25.214 -{
  25.215 -	write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
  25.216 -	return 1;
  25.217 -}
  25.218 -
  25.219 -/*
  25.220 - * ITE 8330G pirq rules are nibble-based
  25.221 - * FIXME: pirqmap may be { 1, 0, 3, 2 },
  25.222 - * 	  2+3 are both mapped to irq 9 on my system
  25.223 - */
  25.224 -static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  25.225 -{
  25.226 -	static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
  25.227 -	return read_config_nybble(router,0x43, pirqmap[pirq-1]);
  25.228 -}
  25.229 -
  25.230 -static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  25.231 -{
  25.232 -	static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
  25.233 -	write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
  25.234 -	return 1;
  25.235 -}
  25.236 -
  25.237 -/*
  25.238 - * OPTI: high four bits are nibble pointer..
  25.239 - * I wonder what the low bits do?
  25.240 - */
  25.241 -static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  25.242 -{
  25.243 -	return read_config_nybble(router, 0xb8, pirq >> 4);
  25.244 -}
  25.245 -
  25.246 -static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  25.247 -{
  25.248 -	write_config_nybble(router, 0xb8, pirq >> 4, irq);
  25.249 -	return 1;
  25.250 -}
  25.251 -
  25.252 -/*
  25.253 - * Cyrix: nibble offset 0x5C
  25.254 - */
  25.255 -static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  25.256 -{
  25.257 -	return read_config_nybble(router, 0x5C, (pirq-1)^1);
  25.258 -}
  25.259 -
  25.260 -static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
  25.261 -{
  25.262 -	write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
  25.263 -	return 1;
  25.264 -}
  25.265 -
  25.266 -/*
  25.267 - *	PIRQ routing for SiS 85C503 router used in several SiS chipsets.
  25.268 - *	We have to deal with the following issues here:
  25.269 - *	- vendors have different ideas about the meaning of link values
  25.270 - *	- some onboard devices (integrated in the chipset) have special
  25.271 - *	  links and are thus routed differently (i.e. not via PCI INTA-INTD)
  25.272 - *	- different revision of the router have a different layout for
  25.273 - *	  the routing registers, particularly for the onchip devices
  25.274 - *
  25.275 - *	For all routing registers the common thing is we have one byte
  25.276 - *	per routeable link which is defined as:
  25.277 - *		 bit 7      IRQ mapping enabled (0) or disabled (1)
  25.278 - *		 bits [6:4] reserved (sometimes used for onchip devices)
  25.279 - *		 bits [3:0] IRQ to map to
  25.280 - *		     allowed: 3-7, 9-12, 14-15
  25.281 - *		     reserved: 0, 1, 2, 8, 13
  25.282 - *
  25.283 - *	The config-space registers located at 0x41/0x42/0x43/0x44 are
  25.284 - *	always used to route the normal PCI INT A/B/C/D respectively.
  25.285 - *	Apparently there are systems implementing PCI routing table using
  25.286 - *	link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
  25.287 - *	We try our best to handle both link mappings.
  25.288 - *	
  25.289 - *	Currently (2003-05-21) it appears most SiS chipsets follow the
  25.290 - *	definition of routing registers from the SiS-5595 southbridge.
  25.291 - *	According to the SiS 5595 datasheets the revision id's of the
  25.292 - *	router (ISA-bridge) should be 0x01 or 0xb0.
  25.293 - *
  25.294 - *	Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
  25.295 - *	Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
  25.296 - *	They seem to work with the current routing code. However there is
  25.297 - *	some concern because of the two USB-OHCI HCs (original SiS 5595
  25.298 - *	had only one). YMMV.
  25.299 - *
  25.300 - *	Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
  25.301 - *
  25.302 - *	0x61:	IDEIRQ:
  25.303 - *		bits [6:5] must be written 01
  25.304 - *		bit 4 channel-select primary (0), secondary (1)
  25.305 - *
  25.306 - *	0x62:	USBIRQ:
  25.307 - *		bit 6 OHCI function disabled (0), enabled (1)
  25.308 - *	
  25.309 - *	0x6a:	ACPI/SCI IRQ: bits 4-6 reserved
  25.310 - *
  25.311 - *	0x7e:	Data Acq. Module IRQ - bits 4-6 reserved
  25.312 - *
  25.313 - *	We support USBIRQ (in addition to INTA-INTD) and keep the
  25.314 - *	IDE, ACPI and DAQ routing untouched as set by the BIOS.
  25.315 - *
  25.316 - *	Currently the only reported exception is the new SiS 65x chipset
  25.317 - *	which includes the SiS 69x southbridge. Here we have the 85C503
  25.318 - *	router revision 0x04 and there are changes in the register layout
  25.319 - *	mostly related to the different USB HCs with USB 2.0 support.
  25.320 - *
  25.321 - *	Onchip routing for router rev-id 0x04 (try-and-error observation)
  25.322 - *
  25.323 - *	0x60/0x61/0x62/0x63:	1xEHCI and 3xOHCI (companion) USB-HCs
  25.324 - *				bit 6-4 are probably unused, not like 5595
  25.325 - */
  25.326 -
  25.327 -#define PIRQ_SIS_IRQ_MASK	0x0f
  25.328 -#define PIRQ_SIS_IRQ_DISABLE	0x80
  25.329 -#define PIRQ_SIS_USB_ENABLE	0x40
  25.330 -#define PIRQ_SIS_DETECT_REGISTER 0x40
  25.331 -
  25.332 -/* return value:
  25.333 - * -1 on error
  25.334 - * 0 for PCI INTA-INTD
  25.335 - * 0 or enable bit mask to check or set for onchip functions
  25.336 - */
  25.337 -static inline int pirq_sis5595_onchip(int pirq, int *reg)
  25.338 -{
  25.339 -	int ret = -1;
  25.340 -
  25.341 -	*reg = pirq;
  25.342 -	switch(pirq) {
  25.343 -	case 0x01:
  25.344 -	case 0x02:
  25.345 -	case 0x03:
  25.346 -	case 0x04:
  25.347 -		*reg += 0x40;
  25.348 -	case 0x41:
  25.349 -	case 0x42:
  25.350 -	case 0x43:
  25.351 -	case 0x44:
  25.352 -		ret = 0;
  25.353 -		break;
  25.354 -
  25.355 -	case 0x62:
  25.356 -		ret = PIRQ_SIS_USB_ENABLE;	/* documented for 5595 */
  25.357 -		break;
  25.358 -
  25.359 -	case 0x61:
  25.360 -	case 0x6a:
  25.361 -	case 0x7e:
  25.362 -		printk(KERN_INFO "SiS pirq: IDE/ACPI/DAQ mapping not implemented: (%u)\n",
  25.363 -		       (unsigned) pirq);
  25.364 -		/* fall thru */
  25.365 -	default:
  25.366 -		printk(KERN_INFO "SiS router unknown request: (%u)\n",
  25.367 -		       (unsigned) pirq);
  25.368 -		break;
  25.369 -	}
  25.370 -	return ret;
  25.371 -}		
  25.372 -
  25.373 -/* return value:
  25.374 - * -1 on error
  25.375 - * 0 for PCI INTA-INTD
  25.376 - * 0 or enable bit mask to check or set for onchip functions
  25.377 - */
  25.378 -static inline int pirq_sis96x_onchip(int pirq, int *reg)
  25.379 -{
  25.380 -	int ret = -1;
  25.381 -
  25.382 -	*reg = pirq;
  25.383 -	switch(pirq) {
  25.384 -	case 0x01:
  25.385 -	case 0x02:
  25.386 -	case 0x03:
  25.387 -	case 0x04:
  25.388 -		*reg += 0x40;
  25.389 -	case 0x41:
  25.390 -	case 0x42:
  25.391 -	case 0x43:
  25.392 -	case 0x44:
  25.393 -	case 0x60:
  25.394 -	case 0x61:
  25.395 -	case 0x62:
  25.396 -	case 0x63:
  25.397 -		ret = 0;
  25.398 -		break;
  25.399 -
  25.400 -	default:
  25.401 -		printk(KERN_INFO "SiS router unknown request: (%u)\n",
  25.402 -		       (unsigned) pirq);
  25.403 -		break;
  25.404 -	}
  25.405 -	return ret;
  25.406 -}		
  25.407 -
  25.408 -
  25.409 -static int pirq_sis5595_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
  25.410 -{
  25.411 -	u8 x;
  25.412 -	int reg, check;
  25.413 -
  25.414 -	check = pirq_sis5595_onchip(pirq, &reg);
  25.415 -	if (check < 0)
  25.416 -		return 0;