direct-io.hg
changeset 4804:3404966959f2
bitkeeper revision 1.1389.10.1 (427fa2d3ZV92f_ErvLuIzWbV1f67QA)
Phase 1 of upgrading platform code to be derived from Linux 2.6.11
rather than 2.4.x.
Signed-off-by: Keir Fraser <keir@xensource.com>
Phase 1 of upgrading platform code to be derived from Linux 2.6.11
rather than 2.4.x.
Signed-off-by: Keir Fraser <keir@xensource.com>
line diff
1.1 --- a/.rootkeys Mon May 09 14:34:59 2005 +0000 1.2 +++ b/.rootkeys Mon May 09 17:50:11 2005 +0000 1.3 @@ -1185,7 +1185,7 @@ 421098b6lY2JzrV1oFDbrt7XQhtElg xen/arch/ 1.4 427664f5eygrc3nEhI3RKf0Y37PzyA xen/arch/ia64/xentime.c 1.5 3ddb79bcZbRBzT3elFWSX7u6NtMagQ xen/arch/x86/Makefile 1.6 3ddb79bcBQF85CfLS4i1WGZ4oLLaCA xen/arch/x86/Rules.mk 1.7 -3e5636e5FAYZ5_vQnmgwFJfSdmO5Mw xen/arch/x86/acpi.c 1.8 +3e5636e5FAYZ5_vQnmgwFJfSdmO5Mw xen/arch/x86/acpi/boot.c 1.9 3ddb79bcsjinG9k1KcvbVBuas1R2dA xen/arch/x86/apic.c 1.10 42360b3244-Q6BpEKhR_A1YtG1wPNQ xen/arch/x86/audit.c 1.11 3ddb79c4yGZ7_22QAFFwPzqP4NSHwA xen/arch/x86/boot/mkelf32.c 1.12 @@ -1247,6 +1247,7 @@ 40e96d3akN3Hu_J5Bk-WXD8OGscrYQ xen/arch/ 1.13 422f27c8J9DQfCpegccMid59XhSmGA xen/arch/x86/x86_emulate.c 1.14 3ddb79bdff-gj-jFGKjOejeHLqL8Lg xen/common/Makefile 1.15 3e397e66AyyD5fYraAySWuwi9uqSXg xen/common/ac_timer.c 1.16 +427fa2d0J0LU2s5oKbsM0nTZ2iyd2Q xen/common/bitmap.c 1.17 3ddb79bdLX_P6iB7ILiblRLWvebapg xen/common/dom0_ops.c 1.18 3e6377e4i0c9GtKN65e99OtRbw3AZw xen/common/dom_mem_ops.c 1.19 3ddb79bdYO5D8Av12NHqPeSviav7cg xen/common/domain.c 1.20 @@ -1341,13 +1342,33 @@ 3ddb79c3TMDjkxVndKFKnGiwY0HzDg xen/inclu 1.21 4204e7acwXDo-5iAAiO2eQbtDeYZXA xen/include/asm-x86/init.h 1.22 3ddb79c3fQ_O3o5NHK2N8AJdk0Ea4Q xen/include/asm-x86/io.h 1.23 3ddb79c2TKeScYHQZreTdHqYNLbehQ xen/include/asm-x86/io_apic.h 1.24 -42605109qxLSrHVE2SRhGXmgk907iw xen/include/asm-x86/io_ports.h 1.25 3ddb79c2L7rTlFzazOLW1XuSZefpFw xen/include/asm-x86/irq.h 1.26 404f1b93OjLO4bFfBXYNaJdIqlNz-Q xen/include/asm-x86/ldt.h 1.27 -4260510aYPj2kr6rMbBfMxcvvmXndQ xen/include/asm-x86/mach_apic.h 1.28 +427fa2d0m8MOSSXT13zgb-q0fGA_Dw xen/include/asm-x86/mach-default/apm.h 1.29 +427fa2d0suK9Av7vsAXhsQxZjqpc_Q xen/include/asm-x86/mach-default/bios_ebda.h 1.30 +427fa2d0yC3KzLozoeK3Xa3uGVfIdw xen/include/asm-x86/mach-default/do_timer.h 1.31 +427fa2d0bWQkR1mW5OBYxn07AN-bDw xen/include/asm-x86/mach-default/entry_arch.h 1.32 +427fa2d0-SWcuwbdSypo4953bc2JdQ xen/include/asm-x86/mach-default/io_ports.h 1.33 +427fa2d0eyAl7LAeO-SVV4IW7lZPGQ xen/include/asm-x86/mach-default/irq_vectors.h 1.34 +427fa2d0df7VWG4KKpnKbKR2Cbd1_w xen/include/asm-x86/mach-default/irq_vectors_limits.h 1.35 +4260510aYPj2kr6rMbBfMxcvvmXndQ xen/include/asm-x86/mach-default/mach_apic.h 1.36 +427fa2d0I3FWjE2tWdOhlEOJn7stcg xen/include/asm-x86/mach-default/mach_apicdef.h 1.37 +427fa2d093fDS2gOBLcl7Yndzl7HmA xen/include/asm-x86/mach-default/mach_ipi.h 1.38 +427fa2d0Y7bD35d-FvDAeiJDIdRw2A xen/include/asm-x86/mach-default/mach_mpparse.h 1.39 +427fa2d0aLQgE9e1GY9ZP5jrMOC8pQ xen/include/asm-x86/mach-default/mach_mpspec.h 1.40 +427fa2d0fJ5nNn5ydJuOaZIL6F2fjQ xen/include/asm-x86/mach-default/mach_reboot.h 1.41 +427fa2d0VlN555TE68TjKMsrOoFXNA xen/include/asm-x86/mach-default/mach_time.h 1.42 +427fa2d0C0jWTKYjy7WJjGKeujSpSg xen/include/asm-x86/mach-default/mach_timer.h 1.43 +427fa2d0UXLiS1scpNrK26ZT6Oes3g xen/include/asm-x86/mach-default/mach_traps.h 1.44 +427fa2d0OfglYyfpDTD5DII4M0uZRw xen/include/asm-x86/mach-default/mach_wakecpu.h 1.45 +427fa2d0_OBPxdi5Qo04JWgZhz7BFA xen/include/asm-x86/mach-default/pci-functions.h 1.46 +427fa2d0mrTtXrliqDfLuJc5LLVXaA xen/include/asm-x86/mach-default/setup_arch_post.h 1.47 +427fa2d0Uoo7gC61Kep6Yy7Os367Hg xen/include/asm-x86/mach-default/setup_arch_pre.h 1.48 +427fa2d1EKnA8zCq2QLHiGOUqOgszg xen/include/asm-x86/mach-default/smpboot_hooks.h 1.49 3ddb79c3I98vWcQR8xEo34JMJ4Ahyw xen/include/asm-x86/mc146818rtc.h 1.50 40ec25fd7cSvbP7Biw91zaU_g0xsEQ xen/include/asm-x86/mm.h 1.51 3ddb79c3n_UbPuxlkNxvvLycClIkxA xen/include/asm-x86/mpspec.h 1.52 +427fa2d1eJRenftJJnRyLsHKl1ghtA xen/include/asm-x86/mpspec_def.h 1.53 3ddb79c2wa0dA_LGigxOelSGbJ284Q xen/include/asm-x86/msr.h 1.54 41aaf567Mi3OishhvrCtET1y-mxQBg xen/include/asm-x86/mtrr.h 1.55 41a61536MFhNalgbVmYGXAhQsPTZNw xen/include/asm-x86/multicall.h 1.56 @@ -1409,11 +1430,14 @@ 4266bd01Ul-pC01ZVvBkhBnv5eqzvw xen/inclu 1.57 3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/public/xen.h 1.58 3e397e66m2tO3s-J8Jnr7Ws_tGoPTg xen/include/xen/ac_timer.h 1.59 40715b2epYl2jBbxzz9CI2rgIca7Zg xen/include/xen/acpi.h 1.60 +427fa2d1wyoVbvCyZRLposYjA_D_4g xen/include/xen/bitmap.h 1.61 +427fa2d1ItcC_yWuBUkhc7adedP5ow xen/include/xen/bitops.h 1.62 3ddb79c0c0cX_DZE209-Bb-Rx1v-Aw xen/include/xen/cache.h 1.63 41f2cea7Yna7xc0X9fyavIjoSFFeVg xen/include/xen/compile.h.in 1.64 3f840f12CkbYSlwMrY2S11Mpyxg7Nw xen/include/xen/compiler.h 1.65 3ddb79c259jh8hE7vre_8NuE7nwNSA xen/include/xen/config.h 1.66 3eb165e0eawr3R-p2ZQtSdLWtLRN_A xen/include/xen/console.h 1.67 +427fa2d1bQCWgEQqTTh5MjG4MPEH9g xen/include/xen/cpumask.h 1.68 3ddb79c1V44RD26YqCUm-kqIupM37A xen/include/xen/ctype.h 1.69 3ddb79c05DdHQ0UxX_jKsXdR4QlMCA xen/include/xen/delay.h 1.70 40f2b4a2hC3HtChu-ArD8LyojxWMjg xen/include/xen/domain.h
2.1 --- a/xen/arch/x86/Makefile Mon May 09 14:34:59 2005 +0000 2.2 +++ b/xen/arch/x86/Makefile Mon May 09 17:50:11 2005 +0000 2.3 @@ -3,6 +3,7 @@ include $(BASEDIR)/Rules.mk 2.4 2.5 OBJS += $(patsubst %.S,%.o,$(wildcard $(TARGET_SUBARCH)/*.S)) 2.6 OBJS += $(patsubst %.c,%.o,$(wildcard $(TARGET_SUBARCH)/*.c)) 2.7 +OBJS += $(patsubst %.c,%.o,$(wildcard acpi/*.c)) 2.8 OBJS += $(patsubst %.c,%.o,$(wildcard mtrr/*.c)) 2.9 2.10 OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
3.1 --- a/xen/arch/x86/Rules.mk Mon May 09 14:34:59 2005 +0000 3.2 +++ b/xen/arch/x86/Rules.mk Mon May 09 17:50:11 2005 +0000 3.3 @@ -2,8 +2,8 @@ 3.4 # x86-specific definitions 3.5 3.6 CFLAGS += -nostdinc -fno-builtin -fno-common -fno-strict-aliasing 3.7 -CFLAGS += -iwithprefix include -Wall -Werror -pipe 3.8 -CFLAGS += -I$(BASEDIR)/include -Wno-pointer-arith -Wredundant-decls 3.9 +CFLAGS += -iwithprefix include -Wall -Werror -Wno-pointer-arith -pipe 3.10 +CFLAGS += -I$(BASEDIR)/include -I$(BASEDIR)/include/asm-x86/mach-default 3.11 3.12 ifeq ($(optimize),y) 3.13 CFLAGS += -O3 -fomit-frame-pointer
4.1 --- a/xen/arch/x86/acpi.c Mon May 09 14:34:59 2005 +0000 4.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 4.3 @@ -1,729 +0,0 @@ 4.4 -/* 4.5 - * acpi.c - Architecture-Specific Low-Level ACPI Support 4.6 - * 4.7 - * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 4.8 - * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com> 4.9 - * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> 4.10 - * 4.11 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 4.12 - * 4.13 - * This program is free software; you can redistribute it and/or modify 4.14 - * it under the terms of the GNU General Public License as published by 4.15 - * the Free Software Foundation; either version 2 of the License, or 4.16 - * (at your option) any later version. 4.17 - * 4.18 - * This program is distributed in the hope that it will be useful, 4.19 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 4.20 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 4.21 - * GNU General Public License for more details. 4.22 - * 4.23 - * You should have received a copy of the GNU General Public License 4.24 - * along with this program; if not, write to the Free Software 4.25 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 4.26 - * 4.27 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 4.28 - */ 4.29 - 4.30 -#include <xen/config.h> 4.31 -#include <xen/kernel.h> 4.32 -#include <xen/init.h> 4.33 -#include <xen/types.h> 4.34 -#include <xen/slab.h> 4.35 -#include <xen/pci.h> 4.36 -#include <xen/irq.h> 4.37 -#include <xen/acpi.h> 4.38 -#include <asm/mpspec.h> 4.39 -#include <asm/io.h> 4.40 -#include <asm/apic.h> 4.41 -#include <asm/apicdef.h> 4.42 -#include <asm/page.h> 4.43 -#include <asm/io_apic.h> 4.44 -#include <asm/acpi.h> 4.45 -#include <asm/smpboot.h> 4.46 - 4.47 - 4.48 -#define PREFIX "ACPI: " 4.49 - 4.50 -int acpi_lapic; 4.51 -int acpi_ioapic; 4.52 -int acpi_strict; 4.53 - 4.54 -acpi_interrupt_flags acpi_sci_flags __initdata; 4.55 -int acpi_sci_override_gsi __initdata; 4.56 -/* -------------------------------------------------------------------------- 4.57 - Boot-time Configuration 4.58 - -------------------------------------------------------------------------- */ 4.59 - 4.60 -int acpi_noirq __initdata = 0; /* skip ACPI IRQ initialization */ 4.61 -int acpi_ht __initdata = 1; /* enable HT */ 4.62 - 4.63 -enum acpi_irq_model_id acpi_irq_model; 4.64 - 4.65 - 4.66 -/* 4.67 - * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, 4.68 - * to map the target physical address. The problem is that set_fixmap() 4.69 - * provides a single page, and it is possible that the page is not 4.70 - * sufficient. 4.71 - * By using this area, we can map up to MAX_IO_APICS pages temporarily, 4.72 - * i.e. until the next __va_range() call. 4.73 - * 4.74 - * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* 4.75 - * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and 4.76 - * count idx down while incrementing the phys address. 4.77 - */ 4.78 -char *__acpi_map_table(unsigned long phys, unsigned long size) 4.79 -{ 4.80 - unsigned long base, offset, mapped_size; 4.81 - int idx; 4.82 - 4.83 - if (phys + size < 8*1024*1024) 4.84 - return __va(phys); 4.85 - 4.86 - offset = phys & (PAGE_SIZE - 1); 4.87 - mapped_size = PAGE_SIZE - offset; 4.88 - set_fixmap(FIX_ACPI_END, phys); 4.89 - base = fix_to_virt(FIX_ACPI_END); 4.90 - 4.91 - /* 4.92 - * Most cases can be covered by the below. 4.93 - */ 4.94 - idx = FIX_ACPI_END; 4.95 - while (mapped_size < size) { 4.96 - if (--idx < FIX_ACPI_BEGIN) 4.97 - return 0; /* cannot handle this */ 4.98 - phys += PAGE_SIZE; 4.99 - set_fixmap(idx, phys); 4.100 - mapped_size += PAGE_SIZE; 4.101 - } 4.102 - 4.103 - return ((char *) base + offset); 4.104 -} 4.105 - 4.106 - 4.107 -#ifdef CONFIG_X86_LOCAL_APIC 4.108 - 4.109 -static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; 4.110 - 4.111 - 4.112 -static int __init 4.113 -acpi_parse_madt ( 4.114 - unsigned long phys_addr, 4.115 - unsigned long size) 4.116 -{ 4.117 - struct acpi_table_madt *madt = NULL; 4.118 - 4.119 - if (!phys_addr || !size) 4.120 - return -EINVAL; 4.121 - 4.122 - madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size); 4.123 - if (!madt) { 4.124 - printk(KERN_WARNING PREFIX "Unable to map MADT\n"); 4.125 - return -ENODEV; 4.126 - } 4.127 - 4.128 - if (madt->lapic_address) 4.129 - acpi_lapic_addr = (u64) madt->lapic_address; 4.130 - 4.131 - printk(KERN_INFO PREFIX "Local APIC address 0x%08x\n", 4.132 - madt->lapic_address); 4.133 - 4.134 - detect_clustered_apic(madt->header.oem_id, madt->header.oem_table_id); 4.135 - 4.136 - return 0; 4.137 -} 4.138 - 4.139 - 4.140 -static int __init 4.141 -acpi_parse_lapic ( 4.142 - acpi_table_entry_header *header) 4.143 -{ 4.144 - struct acpi_table_lapic *processor = NULL; 4.145 - 4.146 - processor = (struct acpi_table_lapic*) header; 4.147 - if (!processor) 4.148 - return -EINVAL; 4.149 - 4.150 - acpi_table_print_madt_entry(header); 4.151 - 4.152 - mp_register_lapic ( 4.153 - processor->id, /* APIC ID */ 4.154 - processor->flags.enabled); /* Enabled? */ 4.155 - 4.156 - return 0; 4.157 -} 4.158 - 4.159 - 4.160 -static int __init 4.161 -acpi_parse_lapic_addr_ovr ( 4.162 - acpi_table_entry_header *header) 4.163 -{ 4.164 - struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; 4.165 - 4.166 - lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header; 4.167 - if (!lapic_addr_ovr) 4.168 - return -EINVAL; 4.169 - 4.170 - acpi_lapic_addr = lapic_addr_ovr->address; 4.171 - 4.172 - return 0; 4.173 -} 4.174 - 4.175 -static int __init 4.176 -acpi_parse_lapic_nmi ( 4.177 - acpi_table_entry_header *header) 4.178 -{ 4.179 - struct acpi_table_lapic_nmi *lapic_nmi = NULL; 4.180 - 4.181 - lapic_nmi = (struct acpi_table_lapic_nmi*) header; 4.182 - if (!lapic_nmi) 4.183 - return -EINVAL; 4.184 - 4.185 - acpi_table_print_madt_entry(header); 4.186 - 4.187 - if (lapic_nmi->lint != 1) 4.188 - printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); 4.189 - 4.190 - return 0; 4.191 -} 4.192 - 4.193 -#endif /*CONFIG_X86_LOCAL_APIC*/ 4.194 - 4.195 -#if defined(CONFIG_X86_IO_APIC) /*&& defined(CONFIG_ACPI_INTERPRETER)*/ 4.196 - 4.197 -static int __init 4.198 -acpi_parse_ioapic ( 4.199 - acpi_table_entry_header *header) 4.200 -{ 4.201 - struct acpi_table_ioapic *ioapic = NULL; 4.202 - 4.203 - ioapic = (struct acpi_table_ioapic*) header; 4.204 - if (!ioapic) 4.205 - return -EINVAL; 4.206 - 4.207 - acpi_table_print_madt_entry(header); 4.208 - 4.209 - mp_register_ioapic ( 4.210 - ioapic->id, 4.211 - ioapic->address, 4.212 - ioapic->global_irq_base); 4.213 - 4.214 - return 0; 4.215 -} 4.216 - 4.217 -#ifdef CONFIG_ACPI_INTERPRETER 4.218 -/* 4.219 - * Parse Interrupt Source Override for the ACPI SCI 4.220 - */ 4.221 -static void 4.222 -acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) 4.223 -{ 4.224 - if (trigger == 0) /* compatible SCI trigger is level */ 4.225 - trigger = 3; 4.226 - 4.227 - if (polarity == 0) /* compatible SCI polarity is low */ 4.228 - polarity = 3; 4.229 - 4.230 - /* Command-line over-ride via acpi_sci= */ 4.231 - if (acpi_sci_flags.trigger) 4.232 - trigger = acpi_sci_flags.trigger; 4.233 - 4.234 - if (acpi_sci_flags.polarity) 4.235 - polarity = acpi_sci_flags.polarity; 4.236 - 4.237 - /* 4.238 - * mp_config_acpi_legacy_irqs() already setup IRQs < 16 4.239 - * If GSI is < 16, this will update its flags, 4.240 - * else it will create a new mp_irqs[] entry. 4.241 - */ 4.242 - mp_override_legacy_irq(gsi, polarity, trigger, gsi); 4.243 - 4.244 - /* 4.245 - * stash over-ride to indicate we've been here 4.246 - * and for later update of acpi_fadt 4.247 - */ 4.248 - acpi_sci_override_gsi = gsi; 4.249 - return; 4.250 -} 4.251 -#endif 4.252 - 4.253 -static int __init 4.254 -acpi_parse_fadt(unsigned long phys, unsigned long size) 4.255 -{ 4.256 - struct fadt_descriptor_rev2 *fadt =0; 4.257 - 4.258 - fadt = (struct fadt_descriptor_rev2*) __acpi_map_table(phys,size); 4.259 - if (!fadt) { 4.260 - printk(KERN_WARNING PREFIX "Unable to map FADT\n"); 4.261 - return 0; 4.262 - } 4.263 - 4.264 -#ifdef CONFIG_ACPI_INTERPRETER 4.265 - /* initialize sci_int early for INT_SRC_OVR MADT parsing */ 4.266 - acpi_fadt.sci_int = fadt->sci_int; 4.267 -#endif 4.268 - 4.269 - return 0; 4.270 -} 4.271 - 4.272 - 4.273 -static int __init 4.274 -acpi_parse_int_src_ovr ( 4.275 - acpi_table_entry_header *header) 4.276 -{ 4.277 - struct acpi_table_int_src_ovr *intsrc = NULL; 4.278 - 4.279 - intsrc = (struct acpi_table_int_src_ovr*) header; 4.280 - if (!intsrc) 4.281 - return -EINVAL; 4.282 - 4.283 - acpi_table_print_madt_entry(header); 4.284 - 4.285 -#ifdef CONFIG_ACPI_INTERPRETER 4.286 - if (intsrc->bus_irq == acpi_fadt.sci_int) { 4.287 - acpi_sci_ioapic_setup(intsrc->global_irq, 4.288 - intsrc->flags.polarity, intsrc->flags.trigger); 4.289 - return 0; 4.290 - } 4.291 -#endif 4.292 - 4.293 - mp_override_legacy_irq ( 4.294 - intsrc->bus_irq, 4.295 - intsrc->flags.polarity, 4.296 - intsrc->flags.trigger, 4.297 - intsrc->global_irq); 4.298 - 4.299 - return 0; 4.300 -} 4.301 - 4.302 - 4.303 -static int __init 4.304 -acpi_parse_nmi_src ( 4.305 - acpi_table_entry_header *header) 4.306 -{ 4.307 - struct acpi_table_nmi_src *nmi_src = NULL; 4.308 - 4.309 - nmi_src = (struct acpi_table_nmi_src*) header; 4.310 - if (!nmi_src) 4.311 - return -EINVAL; 4.312 - 4.313 - acpi_table_print_madt_entry(header); 4.314 - 4.315 - /* TBD: Support nimsrc entries? */ 4.316 - 4.317 - return 0; 4.318 -} 4.319 - 4.320 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/ 4.321 - 4.322 - 4.323 -static unsigned long __init 4.324 -acpi_scan_rsdp ( 4.325 - unsigned long start, 4.326 - unsigned long length) 4.327 -{ 4.328 - unsigned long offset = 0; 4.329 - unsigned long sig_len = sizeof("RSD PTR ") - 1; 4.330 - 4.331 - /* 4.332 - * Scan all 16-byte boundaries of the physical memory region for the 4.333 - * RSDP signature. 4.334 - */ 4.335 - for (offset = 0; offset < length; offset += 16) { 4.336 - if (strncmp((char *) (start + offset), "RSD PTR ", sig_len)) 4.337 - continue; 4.338 - return (start + offset); 4.339 - } 4.340 - 4.341 - return 0; 4.342 -} 4.343 - 4.344 - 4.345 -unsigned long __init 4.346 -acpi_find_rsdp (void) 4.347 -{ 4.348 - unsigned long rsdp_phys = 0; 4.349 - 4.350 - /* 4.351 - * Scan memory looking for the RSDP signature. First search EBDA (low 4.352 - * memory) paragraphs and then search upper memory (E0000-FFFFF). 4.353 - */ 4.354 - rsdp_phys = acpi_scan_rsdp (0, 0x400); 4.355 - if (!rsdp_phys) 4.356 - rsdp_phys = acpi_scan_rsdp (0xE0000, 0x20000); 4.357 - 4.358 - return rsdp_phys; 4.359 -} 4.360 - 4.361 - 4.362 -/* 4.363 - * acpi_boot_init() 4.364 - * called from setup_arch(), always. 4.365 - * 1. maps ACPI tables for later use 4.366 - * 2. enumerates lapics 4.367 - * 3. enumerates io-apics 4.368 - * 4.369 - * side effects: 4.370 - * acpi_lapic = 1 if LAPIC found 4.371 - * acpi_ioapic = 1 if IOAPIC found 4.372 - * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; 4.373 - * if acpi_blacklisted() disable_acpi() 4.374 - * acpi_irq_model=... 4.375 - * ... 4.376 - * 4.377 - * return value: (currently ignored) 4.378 - * 0: success 4.379 - * !0: failure 4.380 - */ 4.381 -int __init 4.382 -acpi_boot_init (void) 4.383 -{ 4.384 - int result = 0; 4.385 - 4.386 - if (acpi_disabled && !acpi_ht) 4.387 - return(1); 4.388 - 4.389 - /* 4.390 - * The default interrupt routing model is PIC (8259). This gets 4.391 - * overriden if IOAPICs are enumerated (below). 4.392 - */ 4.393 - acpi_irq_model = ACPI_IRQ_MODEL_PIC; 4.394 - 4.395 - /* 4.396 - * Initialize the ACPI boot-time table parser. 4.397 - */ 4.398 - result = acpi_table_init(); 4.399 - if (result) { 4.400 - disable_acpi(); 4.401 - return result; 4.402 - } 4.403 - 4.404 - result = acpi_blacklisted(); 4.405 - if (result) { 4.406 - printk(KERN_NOTICE PREFIX "BIOS listed in blacklist, disabling ACPI support\n"); 4.407 - disable_acpi(); 4.408 - return result; 4.409 - } 4.410 - 4.411 -#ifdef CONFIG_X86_LOCAL_APIC 4.412 - 4.413 - /* 4.414 - * MADT 4.415 - * ---- 4.416 - * Parse the Multiple APIC Description Table (MADT), if exists. 4.417 - * Note that this table provides platform SMP configuration 4.418 - * information -- the successor to MPS tables. 4.419 - */ 4.420 - 4.421 - result = acpi_table_parse(ACPI_APIC, acpi_parse_madt); 4.422 - if (!result) { 4.423 - return 0; 4.424 - } 4.425 - else if (result < 0) { 4.426 - printk(KERN_ERR PREFIX "Error parsing MADT\n"); 4.427 - return result; 4.428 - } 4.429 - else if (result > 1) 4.430 - printk(KERN_WARNING PREFIX "Multiple MADT tables exist\n"); 4.431 - 4.432 - /* 4.433 - * Local APIC 4.434 - * ---------- 4.435 - * Note that the LAPIC address is obtained from the MADT (32-bit value) 4.436 - * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). 4.437 - */ 4.438 - 4.439 - result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr); 4.440 - if (result < 0) { 4.441 - printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); 4.442 - return result; 4.443 - } 4.444 - 4.445 - mp_register_lapic_address(acpi_lapic_addr); 4.446 - 4.447 - result = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic); 4.448 - if (!result) { 4.449 - printk(KERN_ERR PREFIX "No LAPIC entries present\n"); 4.450 - /* TBD: Cleanup to allow fallback to MPS */ 4.451 - return -ENODEV; 4.452 - } 4.453 - else if (result < 0) { 4.454 - printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); 4.455 - /* TBD: Cleanup to allow fallback to MPS */ 4.456 - return result; 4.457 - } 4.458 - 4.459 - result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi); 4.460 - if (result < 0) { 4.461 - printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 4.462 - /* TBD: Cleanup to allow fallback to MPS */ 4.463 - return result; 4.464 - } 4.465 - 4.466 - acpi_lapic = 1; 4.467 - 4.468 -#endif /*CONFIG_X86_LOCAL_APIC*/ 4.469 - 4.470 -#if defined(CONFIG_X86_IO_APIC) /*&& defined(CONFIG_ACPI_INTERPRETER)*/ 4.471 - 4.472 - /* 4.473 - * I/O APIC 4.474 - * -------- 4.475 - */ 4.476 - 4.477 -#if 0 4.478 - /* 4.479 - * ACPI interpreter is required to complete interrupt setup, 4.480 - * so if it is off, don't enumerate the io-apics with ACPI. 4.481 - * If MPS is present, it will handle them, 4.482 - * otherwise the system will stay in PIC mode 4.483 - */ 4.484 - if (acpi_disabled || acpi_noirq) { 4.485 - return 1; 4.486 - } 4.487 -#endif 4.488 - 4.489 - /* 4.490 - * if "noapic" boot option, don't look for IO-APICs 4.491 - */ 4.492 - if (ioapic_setup_disabled()) { 4.493 - printk(KERN_INFO PREFIX "Skipping IOAPIC probe " 4.494 - "due to 'noapic' option.\n"); 4.495 - return 1; 4.496 - } 4.497 - 4.498 - 4.499 - result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic); 4.500 - if (!result) { 4.501 - printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); 4.502 - return -ENODEV; 4.503 - } 4.504 - else if (result < 0) { 4.505 - printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); 4.506 - return result; 4.507 - } 4.508 - 4.509 - /* Build a default routing table for legacy (ISA) interrupts. */ 4.510 - mp_config_acpi_legacy_irqs(); 4.511 - 4.512 - /* Record sci_int for use when looking for MADT sci_int override */ 4.513 - acpi_table_parse(ACPI_FADT, acpi_parse_fadt); 4.514 - 4.515 - result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr); 4.516 - if (result < 0) { 4.517 - printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); 4.518 - /* TBD: Cleanup to allow fallback to MPS */ 4.519 - return result; 4.520 - } 4.521 - 4.522 -#ifdef CONFIG_ACPI_INTERPRETER 4.523 - /* 4.524 - * If BIOS did not supply an INT_SRC_OVR for the SCI 4.525 - * pretend we got one so we can set the SCI flags. 4.526 - */ 4.527 - if (!acpi_sci_override_gsi) 4.528 - acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); 4.529 -#endif 4.530 - 4.531 - result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src); 4.532 - if (result < 0) { 4.533 - printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 4.534 - /* TBD: Cleanup to allow fallback to MPS */ 4.535 - return result; 4.536 - } 4.537 - 4.538 - acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; 4.539 - 4.540 - acpi_irq_balance_set(NULL); 4.541 - 4.542 - acpi_ioapic = 1; 4.543 - 4.544 - if (acpi_lapic && acpi_ioapic) 4.545 - smp_found_config = 1; 4.546 - 4.547 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/ 4.548 - 4.549 - return 0; 4.550 -} 4.551 - 4.552 - 4.553 -#ifdef CONFIG_ACPI_BUS 4.554 -/* 4.555 - * acpi_pic_sci_set_trigger() 4.556 - * 4.557 - * use ELCR to set PIC-mode trigger type for SCI 4.558 - * 4.559 - * If a PIC-mode SCI is not recognized or gives spurious IRQ7's 4.560 - * it may require Edge Trigger -- use "acpi_sci=edge" 4.561 - * 4.562 - * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers 4.563 - * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. 4.564 - * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) 4.565 - * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) 4.566 - */ 4.567 - 4.568 -void __init 4.569 -acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) 4.570 -{ 4.571 - unsigned char mask = 1 << (irq & 7); 4.572 - unsigned int port = 0x4d0 + (irq >> 3); 4.573 - unsigned char val = inb(port); 4.574 - 4.575 - 4.576 - printk(PREFIX "IRQ%d SCI:", irq); 4.577 - if (!(val & mask)) { 4.578 - printk(" Edge"); 4.579 - 4.580 - if (trigger == 3) { 4.581 - printk(" set to Level"); 4.582 - outb(val | mask, port); 4.583 - } 4.584 - } else { 4.585 - printk(" Level"); 4.586 - 4.587 - if (trigger == 1) { 4.588 - printk(" set to Edge"); 4.589 - outb(val & ~mask, port); 4.590 - } 4.591 - } 4.592 - printk(" Trigger.\n"); 4.593 -} 4.594 - 4.595 -#endif /* CONFIG_ACPI_BUS */ 4.596 - 4.597 - 4.598 -/* -------------------------------------------------------------------------- 4.599 - Low-Level Sleep Support 4.600 - -------------------------------------------------------------------------- */ 4.601 - 4.602 -#ifdef CONFIG_ACPI_SLEEP 4.603 - 4.604 -#define DEBUG 4.605 - 4.606 -#ifdef DEBUG 4.607 -#include <xen/serial.h> 4.608 -#endif 4.609 - 4.610 -/* address in low memory of the wakeup routine. */ 4.611 -unsigned long acpi_wakeup_address = 0; 4.612 - 4.613 -/* new page directory that we will be using */ 4.614 -static pmd_t *pmd; 4.615 - 4.616 -/* saved page directory */ 4.617 -static pmd_t saved_pmd; 4.618 - 4.619 -/* page which we'll use for the new page directory */ 4.620 -static pte_t *ptep; 4.621 - 4.622 -extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long)); 4.623 - 4.624 -/* 4.625 - * acpi_create_identity_pmd 4.626 - * 4.627 - * Create a new, identity mapped pmd. 4.628 - * 4.629 - * Do this by creating new page directory, and marking all the pages as R/W 4.630 - * Then set it as the new Page Middle Directory. 4.631 - * And, of course, flush the TLB so it takes effect. 4.632 - * 4.633 - * We save the address of the old one, for later restoration. 4.634 - */ 4.635 -static void acpi_create_identity_pmd (void) 4.636 -{ 4.637 - pgd_t *pgd; 4.638 - int i; 4.639 - 4.640 - ptep = (pte_t*)__get_free_page(GFP_KERNEL); 4.641 - 4.642 - /* fill page with low mapping */ 4.643 - for (i = 0; i < PTRS_PER_PTE; i++) 4.644 - set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED)); 4.645 - 4.646 - pgd = pgd_offset(current->active_mm, 0); 4.647 - pmd = pmd_alloc(current->mm,pgd, 0); 4.648 - 4.649 - /* save the old pmd */ 4.650 - saved_pmd = *pmd; 4.651 - 4.652 - /* set the new one */ 4.653 - set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(ptep))); 4.654 - 4.655 - /* flush the TLB */ 4.656 - local_flush_tlb(); 4.657 -} 4.658 - 4.659 -/* 4.660 - * acpi_restore_pmd 4.661 - * 4.662 - * Restore the old pmd saved by acpi_create_identity_pmd and 4.663 - * free the page that said function alloc'd 4.664 - */ 4.665 -static void acpi_restore_pmd (void) 4.666 -{ 4.667 - set_pmd(pmd, saved_pmd); 4.668 - local_flush_tlb(); 4.669 - free_page((unsigned long)ptep); 4.670 -} 4.671 - 4.672 -/** 4.673 - * acpi_save_state_mem - save kernel state 4.674 - * 4.675 - * Create an identity mapped page table and copy the wakeup routine to 4.676 - * low memory. 4.677 - */ 4.678 -int acpi_save_state_mem (void) 4.679 -{ 4.680 - acpi_create_identity_pmd(); 4.681 - acpi_copy_wakeup_routine(acpi_wakeup_address); 4.682 - 4.683 - return 0; 4.684 -} 4.685 - 4.686 -/** 4.687 - * acpi_save_state_disk - save kernel state to disk 4.688 - * 4.689 - */ 4.690 -int acpi_save_state_disk (void) 4.691 -{ 4.692 - return 1; 4.693 -} 4.694 - 4.695 -/* 4.696 - * acpi_restore_state 4.697 - */ 4.698 -void acpi_restore_state_mem (void) 4.699 -{ 4.700 - acpi_restore_pmd(); 4.701 -} 4.702 - 4.703 -/** 4.704 - * acpi_reserve_bootmem - do _very_ early ACPI initialisation 4.705 - * 4.706 - * We allocate a page in low memory for the wakeup 4.707 - * routine for when we come back from a sleep state. The 4.708 - * runtime allocator allows specification of <16M pages, but not 4.709 - * <1M pages. 4.710 - */ 4.711 -void __init acpi_reserve_bootmem(void) 4.712 -{ 4.713 - acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE); 4.714 - if (!acpi_wakeup_address) 4.715 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); 4.716 -} 4.717 - 4.718 -void do_suspend_lowlevel_s4bios(int resume) 4.719 -{ 4.720 - if (!resume) { 4.721 - save_processor_context(); 4.722 - acpi_save_register_state((unsigned long)&&acpi_sleep_done); 4.723 - acpi_enter_sleep_state_s4bios(); 4.724 - return; 4.725 - } 4.726 -acpi_sleep_done: 4.727 - restore_processor_context(); 4.728 -} 4.729 - 4.730 - 4.731 -#endif /*CONFIG_ACPI_SLEEP*/ 4.732 -
5.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 5.2 +++ b/xen/arch/x86/acpi/boot.c Mon May 09 17:50:11 2005 +0000 5.3 @@ -0,0 +1,912 @@ 5.4 +/* 5.5 + * boot.c - Architecture-Specific Low-Level ACPI Boot Support 5.6 + * 5.7 + * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5.8 + * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com> 5.9 + * 5.10 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5.11 + * 5.12 + * This program is free software; you can redistribute it and/or modify 5.13 + * it under the terms of the GNU General Public License as published by 5.14 + * the Free Software Foundation; either version 2 of the License, or 5.15 + * (at your option) any later version. 5.16 + * 5.17 + * This program is distributed in the hope that it will be useful, 5.18 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 5.19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 5.20 + * GNU General Public License for more details. 5.21 + * 5.22 + * You should have received a copy of the GNU General Public License 5.23 + * along with this program; if not, write to the Free Software 5.24 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 5.25 + * 5.26 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5.27 + */ 5.28 + 5.29 +#include <xen/config.h> 5.30 +#include <xen/errno.h> 5.31 +#include <xen/init.h> 5.32 +#include <xen/acpi.h> 5.33 +#include <xen/irq.h> 5.34 +#include <xen/sched.h> 5.35 +#include <asm/page.h> 5.36 +#include <asm/apic.h> 5.37 +#include <asm/io_apic.h> 5.38 +#include <asm/apic.h> 5.39 +#include <asm/io.h> 5.40 +#include <asm/irq.h> 5.41 +#include <asm/mpspec.h> 5.42 + 5.43 +int sbf_port; /* XXX XEN */ 5.44 + 5.45 +#ifdef CONFIG_X86_64 5.46 + 5.47 +static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) { } 5.48 +extern void __init clustered_apic_check(void); 5.49 +static inline int ioapic_setup_disabled(void) { return 0; } 5.50 +#include <asm/proto.h> 5.51 + 5.52 +#else /* X86 */ 5.53 + 5.54 +#ifdef CONFIG_X86_LOCAL_APIC 5.55 +#include <mach_apic.h> 5.56 +#include <mach_mpparse.h> 5.57 +#endif /* CONFIG_X86_LOCAL_APIC */ 5.58 + 5.59 +#endif /* X86 */ 5.60 + 5.61 +#define BAD_MADT_ENTRY(entry, end) ( \ 5.62 + (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 5.63 + ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) 5.64 + 5.65 +#define PREFIX "ACPI: " 5.66 + 5.67 +#ifdef CONFIG_ACPI_PCI 5.68 +int acpi_noirq __initdata; /* skip ACPI IRQ initialization */ 5.69 +int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */ 5.70 +#else 5.71 +int acpi_noirq __initdata = 1; 5.72 +int acpi_pci_disabled __initdata = 1; 5.73 +#endif 5.74 +int acpi_ht __initdata = 1; /* enable HT */ 5.75 + 5.76 +int acpi_lapic; 5.77 +int acpi_ioapic; 5.78 +int acpi_strict; 5.79 +EXPORT_SYMBOL(acpi_strict); 5.80 + 5.81 +acpi_interrupt_flags acpi_sci_flags __initdata; 5.82 +int acpi_sci_override_gsi __initdata; 5.83 +int acpi_skip_timer_override __initdata; 5.84 + 5.85 +#ifdef CONFIG_X86_LOCAL_APIC 5.86 +static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; 5.87 +#endif 5.88 + 5.89 +#ifndef __HAVE_ARCH_CMPXCHG 5.90 +#warning ACPI uses CMPXCHG, i486 and later hardware 5.91 +#endif 5.92 + 5.93 +#define MAX_MADT_ENTRIES 256 5.94 +u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] = 5.95 + { [0 ... MAX_MADT_ENTRIES-1] = 0xff }; 5.96 +EXPORT_SYMBOL(x86_acpiid_to_apicid); 5.97 + 5.98 +/* -------------------------------------------------------------------------- 5.99 + Boot-time Configuration 5.100 + -------------------------------------------------------------------------- */ 5.101 + 5.102 +/* 5.103 + * The default interrupt routing model is PIC (8259). This gets 5.104 + * overriden if IOAPICs are enumerated (below). 5.105 + */ 5.106 +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; 5.107 + 5.108 +#ifdef CONFIG_X86_64 5.109 + 5.110 +/* rely on all ACPI tables being in the direct mapping */ 5.111 +char *__acpi_map_table(unsigned long phys_addr, unsigned long size) 5.112 +{ 5.113 + if (!phys_addr || !size) 5.114 + return NULL; 5.115 + 5.116 + if (phys_addr < (end_pfn_map << PAGE_SHIFT)) 5.117 + return __va(phys_addr); 5.118 + 5.119 + return NULL; 5.120 +} 5.121 + 5.122 +#else 5.123 + 5.124 +/* 5.125 + * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, 5.126 + * to map the target physical address. The problem is that set_fixmap() 5.127 + * provides a single page, and it is possible that the page is not 5.128 + * sufficient. 5.129 + * By using this area, we can map up to MAX_IO_APICS pages temporarily, 5.130 + * i.e. until the next __va_range() call. 5.131 + * 5.132 + * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* 5.133 + * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and 5.134 + * count idx down while incrementing the phys address. 5.135 + */ 5.136 +char *__acpi_map_table(unsigned long phys, unsigned long size) 5.137 +{ 5.138 + unsigned long base, offset, mapped_size; 5.139 + int idx; 5.140 + 5.141 + if (phys + size < 8*1024*1024) 5.142 + return __va(phys); 5.143 + 5.144 + offset = phys & (PAGE_SIZE - 1); 5.145 + mapped_size = PAGE_SIZE - offset; 5.146 + set_fixmap(FIX_ACPI_END, phys); 5.147 + base = fix_to_virt(FIX_ACPI_END); 5.148 + 5.149 + /* 5.150 + * Most cases can be covered by the below. 5.151 + */ 5.152 + idx = FIX_ACPI_END; 5.153 + while (mapped_size < size) { 5.154 + if (--idx < FIX_ACPI_BEGIN) 5.155 + return NULL; /* cannot handle this */ 5.156 + phys += PAGE_SIZE; 5.157 + set_fixmap(idx, phys); 5.158 + mapped_size += PAGE_SIZE; 5.159 + } 5.160 + 5.161 + return ((unsigned char *) base + offset); 5.162 +} 5.163 +#endif 5.164 + 5.165 +#ifdef CONFIG_PCI_MMCONFIG 5.166 +static int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size) 5.167 +{ 5.168 + struct acpi_table_mcfg *mcfg; 5.169 + 5.170 + if (!phys_addr || !size) 5.171 + return -EINVAL; 5.172 + 5.173 + mcfg = (struct acpi_table_mcfg *) __acpi_map_table(phys_addr, size); 5.174 + if (!mcfg) { 5.175 + printk(KERN_WARNING PREFIX "Unable to map MCFG\n"); 5.176 + return -ENODEV; 5.177 + } 5.178 + 5.179 + if (mcfg->base_reserved) { 5.180 + printk(KERN_ERR PREFIX "MMCONFIG not in low 4GB of memory\n"); 5.181 + return -ENODEV; 5.182 + } 5.183 + 5.184 + pci_mmcfg_base_addr = mcfg->base_address; 5.185 + 5.186 + return 0; 5.187 +} 5.188 +#else 5.189 +#define acpi_parse_mcfg NULL 5.190 +#endif /* !CONFIG_PCI_MMCONFIG */ 5.191 + 5.192 +#ifdef CONFIG_X86_LOCAL_APIC 5.193 +static int __init 5.194 +acpi_parse_madt ( 5.195 + unsigned long phys_addr, 5.196 + unsigned long size) 5.197 +{ 5.198 + struct acpi_table_madt *madt = NULL; 5.199 + 5.200 + if (!phys_addr || !size) 5.201 + return -EINVAL; 5.202 + 5.203 + madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size); 5.204 + if (!madt) { 5.205 + printk(KERN_WARNING PREFIX "Unable to map MADT\n"); 5.206 + return -ENODEV; 5.207 + } 5.208 + 5.209 + if (madt->lapic_address) { 5.210 + acpi_lapic_addr = (u64) madt->lapic_address; 5.211 + 5.212 + printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", 5.213 + madt->lapic_address); 5.214 + } 5.215 + 5.216 + acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); 5.217 + 5.218 + return 0; 5.219 +} 5.220 + 5.221 + 5.222 +static int __init 5.223 +acpi_parse_lapic ( 5.224 + acpi_table_entry_header *header, const unsigned long end) 5.225 +{ 5.226 + struct acpi_table_lapic *processor = NULL; 5.227 + 5.228 + processor = (struct acpi_table_lapic*) header; 5.229 + 5.230 + if (BAD_MADT_ENTRY(processor, end)) 5.231 + return -EINVAL; 5.232 + 5.233 + acpi_table_print_madt_entry(header); 5.234 + 5.235 + /* no utility in registering a disabled processor */ 5.236 + if (processor->flags.enabled == 0) 5.237 + return 0; 5.238 + 5.239 + x86_acpiid_to_apicid[processor->acpi_id] = processor->id; 5.240 + 5.241 + mp_register_lapic ( 5.242 + processor->id, /* APIC ID */ 5.243 + processor->flags.enabled); /* Enabled? */ 5.244 + 5.245 + return 0; 5.246 +} 5.247 + 5.248 +static int __init 5.249 +acpi_parse_lapic_addr_ovr ( 5.250 + acpi_table_entry_header *header, const unsigned long end) 5.251 +{ 5.252 + struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; 5.253 + 5.254 + lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header; 5.255 + 5.256 + if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) 5.257 + return -EINVAL; 5.258 + 5.259 + acpi_lapic_addr = lapic_addr_ovr->address; 5.260 + 5.261 + return 0; 5.262 +} 5.263 + 5.264 +static int __init 5.265 +acpi_parse_lapic_nmi ( 5.266 + acpi_table_entry_header *header, const unsigned long end) 5.267 +{ 5.268 + struct acpi_table_lapic_nmi *lapic_nmi = NULL; 5.269 + 5.270 + lapic_nmi = (struct acpi_table_lapic_nmi*) header; 5.271 + 5.272 + if (BAD_MADT_ENTRY(lapic_nmi, end)) 5.273 + return -EINVAL; 5.274 + 5.275 + acpi_table_print_madt_entry(header); 5.276 + 5.277 + if (lapic_nmi->lint != 1) 5.278 + printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); 5.279 + 5.280 + return 0; 5.281 +} 5.282 + 5.283 + 5.284 +#endif /*CONFIG_X86_LOCAL_APIC*/ 5.285 + 5.286 +#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER) 5.287 + 5.288 +static int __init 5.289 +acpi_parse_ioapic ( 5.290 + acpi_table_entry_header *header, const unsigned long end) 5.291 +{ 5.292 + struct acpi_table_ioapic *ioapic = NULL; 5.293 + 5.294 + ioapic = (struct acpi_table_ioapic*) header; 5.295 + 5.296 + if (BAD_MADT_ENTRY(ioapic, end)) 5.297 + return -EINVAL; 5.298 + 5.299 + acpi_table_print_madt_entry(header); 5.300 + 5.301 + mp_register_ioapic ( 5.302 + ioapic->id, 5.303 + ioapic->address, 5.304 + ioapic->global_irq_base); 5.305 + 5.306 + return 0; 5.307 +} 5.308 + 5.309 +/* 5.310 + * Parse Interrupt Source Override for the ACPI SCI 5.311 + */ 5.312 +static void 5.313 +acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) 5.314 +{ 5.315 + if (trigger == 0) /* compatible SCI trigger is level */ 5.316 + trigger = 3; 5.317 + 5.318 + if (polarity == 0) /* compatible SCI polarity is low */ 5.319 + polarity = 3; 5.320 + 5.321 + /* Command-line over-ride via acpi_sci= */ 5.322 + if (acpi_sci_flags.trigger) 5.323 + trigger = acpi_sci_flags.trigger; 5.324 + 5.325 + if (acpi_sci_flags.polarity) 5.326 + polarity = acpi_sci_flags.polarity; 5.327 + 5.328 + /* 5.329 + * mp_config_acpi_legacy_irqs() already setup IRQs < 16 5.330 + * If GSI is < 16, this will update its flags, 5.331 + * else it will create a new mp_irqs[] entry. 5.332 + */ 5.333 + mp_override_legacy_irq(gsi, polarity, trigger, gsi); 5.334 + 5.335 + /* 5.336 + * stash over-ride to indicate we've been here 5.337 + * and for later update of acpi_fadt 5.338 + */ 5.339 + acpi_sci_override_gsi = gsi; 5.340 + return; 5.341 +} 5.342 + 5.343 +static int __init 5.344 +acpi_parse_int_src_ovr ( 5.345 + acpi_table_entry_header *header, const unsigned long end) 5.346 +{ 5.347 + struct acpi_table_int_src_ovr *intsrc = NULL; 5.348 + 5.349 + intsrc = (struct acpi_table_int_src_ovr*) header; 5.350 + 5.351 + if (BAD_MADT_ENTRY(intsrc, end)) 5.352 + return -EINVAL; 5.353 + 5.354 + acpi_table_print_madt_entry(header); 5.355 + 5.356 + if (intsrc->bus_irq == acpi_fadt.sci_int) { 5.357 + acpi_sci_ioapic_setup(intsrc->global_irq, 5.358 + intsrc->flags.polarity, intsrc->flags.trigger); 5.359 + return 0; 5.360 + } 5.361 + 5.362 + if (acpi_skip_timer_override && 5.363 + intsrc->bus_irq == 0 && intsrc->global_irq == 2) { 5.364 + printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); 5.365 + return 0; 5.366 + } 5.367 + 5.368 + mp_override_legacy_irq ( 5.369 + intsrc->bus_irq, 5.370 + intsrc->flags.polarity, 5.371 + intsrc->flags.trigger, 5.372 + intsrc->global_irq); 5.373 + 5.374 + return 0; 5.375 +} 5.376 + 5.377 + 5.378 +static int __init 5.379 +acpi_parse_nmi_src ( 5.380 + acpi_table_entry_header *header, const unsigned long end) 5.381 +{ 5.382 + struct acpi_table_nmi_src *nmi_src = NULL; 5.383 + 5.384 + nmi_src = (struct acpi_table_nmi_src*) header; 5.385 + 5.386 + if (BAD_MADT_ENTRY(nmi_src, end)) 5.387 + return -EINVAL; 5.388 + 5.389 + acpi_table_print_madt_entry(header); 5.390 + 5.391 + /* TBD: Support nimsrc entries? */ 5.392 + 5.393 + return 0; 5.394 +} 5.395 + 5.396 +#endif /* CONFIG_X86_IO_APIC */ 5.397 + 5.398 +#ifdef CONFIG_ACPI_BUS 5.399 + 5.400 +/* 5.401 + * acpi_pic_sci_set_trigger() 5.402 + * 5.403 + * use ELCR to set PIC-mode trigger type for SCI 5.404 + * 5.405 + * If a PIC-mode SCI is not recognized or gives spurious IRQ7's 5.406 + * it may require Edge Trigger -- use "acpi_sci=edge" 5.407 + * 5.408 + * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers 5.409 + * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. 5.410 + * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) 5.411 + * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) 5.412 + */ 5.413 + 5.414 +void __init 5.415 +acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) 5.416 +{ 5.417 + unsigned int mask = 1 << irq; 5.418 + unsigned int old, new; 5.419 + 5.420 + /* Real old ELCR mask */ 5.421 + old = inb(0x4d0) | (inb(0x4d1) << 8); 5.422 + 5.423 + /* 5.424 + * If we use ACPI to set PCI irq's, then we should clear ELCR 5.425 + * since we will set it correctly as we enable the PCI irq 5.426 + * routing. 5.427 + */ 5.428 + new = acpi_noirq ? old : 0; 5.429 + 5.430 + /* 5.431 + * Update SCI information in the ELCR, it isn't in the PCI 5.432 + * routing tables.. 5.433 + */ 5.434 + switch (trigger) { 5.435 + case 1: /* Edge - clear */ 5.436 + new &= ~mask; 5.437 + break; 5.438 + case 3: /* Level - set */ 5.439 + new |= mask; 5.440 + break; 5.441 + } 5.442 + 5.443 + if (old == new) 5.444 + return; 5.445 + 5.446 + printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old); 5.447 + outb(new, 0x4d0); 5.448 + outb(new >> 8, 0x4d1); 5.449 +} 5.450 + 5.451 + 5.452 +#endif /* CONFIG_ACPI_BUS */ 5.453 + 5.454 +int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) 5.455 +{ 5.456 +#ifdef CONFIG_X86_IO_APIC 5.457 + if (use_pci_vector() && !platform_legacy_irq(gsi)) 5.458 + *irq = IO_APIC_VECTOR(gsi); 5.459 + else 5.460 +#endif 5.461 + *irq = gsi; 5.462 + return 0; 5.463 +} 5.464 + 5.465 +unsigned int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) 5.466 +{ 5.467 + unsigned int irq; 5.468 + unsigned int plat_gsi = gsi; 5.469 + 5.470 +#ifdef CONFIG_PCI 5.471 + /* 5.472 + * Make sure all (legacy) PCI IRQs are set as level-triggered. 5.473 + */ 5.474 + if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { 5.475 + extern void eisa_set_level_irq(unsigned int irq); 5.476 + 5.477 + if (edge_level == ACPI_LEVEL_SENSITIVE) 5.478 + eisa_set_level_irq(gsi); 5.479 + } 5.480 +#endif 5.481 + 5.482 +#ifdef CONFIG_X86_IO_APIC 5.483 + if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { 5.484 + plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low); 5.485 + } 5.486 +#endif 5.487 + acpi_gsi_to_irq(plat_gsi, &irq); 5.488 + return irq; 5.489 +} 5.490 +EXPORT_SYMBOL(acpi_register_gsi); 5.491 + 5.492 +/* 5.493 + * ACPI based hotplug support for CPU 5.494 + */ 5.495 +#ifdef CONFIG_ACPI_HOTPLUG_CPU 5.496 +int 5.497 +acpi_map_lsapic(acpi_handle handle, int *pcpu) 5.498 +{ 5.499 + /* TBD */ 5.500 + return -EINVAL; 5.501 +} 5.502 +EXPORT_SYMBOL(acpi_map_lsapic); 5.503 + 5.504 + 5.505 +int 5.506 +acpi_unmap_lsapic(int cpu) 5.507 +{ 5.508 + /* TBD */ 5.509 + return -EINVAL; 5.510 +} 5.511 +EXPORT_SYMBOL(acpi_unmap_lsapic); 5.512 +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 5.513 + 5.514 +static unsigned long __init 5.515 +acpi_scan_rsdp ( 5.516 + unsigned long start, 5.517 + unsigned long length) 5.518 +{ 5.519 + unsigned long offset = 0; 5.520 + unsigned long sig_len = sizeof("RSD PTR ") - 1; 5.521 + 5.522 + /* 5.523 + * Scan all 16-byte boundaries of the physical memory region for the 5.524 + * RSDP signature. 5.525 + */ 5.526 + for (offset = 0; offset < length; offset += 16) { 5.527 + if (strncmp((char *) (start + offset), "RSD PTR ", sig_len)) 5.528 + continue; 5.529 + return (start + offset); 5.530 + } 5.531 + 5.532 + return 0; 5.533 +} 5.534 + 5.535 +static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size) 5.536 +{ 5.537 + struct acpi_table_sbf *sb; 5.538 + 5.539 + if (!phys_addr || !size) 5.540 + return -EINVAL; 5.541 + 5.542 + sb = (struct acpi_table_sbf *) __acpi_map_table(phys_addr, size); 5.543 + if (!sb) { 5.544 + printk(KERN_WARNING PREFIX "Unable to map SBF\n"); 5.545 + return -ENODEV; 5.546 + } 5.547 + 5.548 + sbf_port = sb->sbf_cmos; /* Save CMOS port */ 5.549 + 5.550 + return 0; 5.551 +} 5.552 + 5.553 + 5.554 +#ifdef CONFIG_HPET_TIMER 5.555 + 5.556 +static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) 5.557 +{ 5.558 + struct acpi_table_hpet *hpet_tbl; 5.559 + 5.560 + if (!phys || !size) 5.561 + return -EINVAL; 5.562 + 5.563 + hpet_tbl = (struct acpi_table_hpet *) __acpi_map_table(phys, size); 5.564 + if (!hpet_tbl) { 5.565 + printk(KERN_WARNING PREFIX "Unable to map HPET\n"); 5.566 + return -ENODEV; 5.567 + } 5.568 + 5.569 + if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) { 5.570 + printk(KERN_WARNING PREFIX "HPET timers must be located in " 5.571 + "memory.\n"); 5.572 + return -1; 5.573 + } 5.574 + 5.575 +#ifdef CONFIG_X86_64 5.576 + vxtime.hpet_address = hpet_tbl->addr.addrl | 5.577 + ((long) hpet_tbl->addr.addrh << 32); 5.578 + 5.579 + printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 5.580 + hpet_tbl->id, vxtime.hpet_address); 5.581 +#else /* X86 */ 5.582 + { 5.583 + extern unsigned long hpet_address; 5.584 + 5.585 + hpet_address = hpet_tbl->addr.addrl; 5.586 + printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 5.587 + hpet_tbl->id, hpet_address); 5.588 + } 5.589 +#endif /* X86 */ 5.590 + 5.591 + return 0; 5.592 +} 5.593 +#else 5.594 +#define acpi_parse_hpet NULL 5.595 +#endif 5.596 + 5.597 +#ifdef CONFIG_X86_PM_TIMER 5.598 +extern u32 pmtmr_ioport; 5.599 +#endif 5.600 + 5.601 +static int __init acpi_parse_fadt(unsigned long phys, unsigned long size) 5.602 +{ 5.603 + struct fadt_descriptor_rev2 *fadt = NULL; 5.604 + 5.605 + fadt = (struct fadt_descriptor_rev2*) __acpi_map_table(phys,size); 5.606 + if(!fadt) { 5.607 + printk(KERN_WARNING PREFIX "Unable to map FADT\n"); 5.608 + return 0; 5.609 + } 5.610 + 5.611 +#ifdef CONFIG_ACPI_INTERPRETER 5.612 + /* initialize sci_int early for INT_SRC_OVR MADT parsing */ 5.613 + acpi_fadt.sci_int = fadt->sci_int; 5.614 +#endif 5.615 + 5.616 +#ifdef CONFIG_X86_PM_TIMER 5.617 + /* detect the location of the ACPI PM Timer */ 5.618 + if (fadt->revision >= FADT2_REVISION_ID) { 5.619 + /* FADT rev. 2 */ 5.620 + if (fadt->xpm_tmr_blk.address_space_id != ACPI_ADR_SPACE_SYSTEM_IO) 5.621 + return 0; 5.622 + 5.623 + pmtmr_ioport = fadt->xpm_tmr_blk.address; 5.624 + } else { 5.625 + /* FADT rev. 1 */ 5.626 + pmtmr_ioport = fadt->V1_pm_tmr_blk; 5.627 + } 5.628 + if (pmtmr_ioport) 5.629 + printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", pmtmr_ioport); 5.630 +#endif 5.631 + return 0; 5.632 +} 5.633 + 5.634 + 5.635 +unsigned long __init 5.636 +acpi_find_rsdp (void) 5.637 +{ 5.638 + unsigned long rsdp_phys = 0; 5.639 + 5.640 +#if 0 5.641 + if (efi_enabled) { 5.642 + if (efi.acpi20) 5.643 + return __pa(efi.acpi20); 5.644 + else if (efi.acpi) 5.645 + return __pa(efi.acpi); 5.646 + } 5.647 +#endif 5.648 + /* 5.649 + * Scan memory looking for the RSDP signature. First search EBDA (low 5.650 + * memory) paragraphs and then search upper memory (E0000-FFFFF). 5.651 + */ 5.652 + rsdp_phys = acpi_scan_rsdp (0, 0x400); 5.653 + if (!rsdp_phys) 5.654 + rsdp_phys = acpi_scan_rsdp (0xE0000, 0xFFFFF); 5.655 + 5.656 + return rsdp_phys; 5.657 +} 5.658 + 5.659 +#ifdef CONFIG_X86_LOCAL_APIC 5.660 +/* 5.661 + * Parse LAPIC entries in MADT 5.662 + * returns 0 on success, < 0 on error 5.663 + */ 5.664 +static int __init 5.665 +acpi_parse_madt_lapic_entries(void) 5.666 +{ 5.667 + int count; 5.668 + 5.669 + /* 5.670 + * Note that the LAPIC address is obtained from the MADT (32-bit value) 5.671 + * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). 5.672 + */ 5.673 + 5.674 + count = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0); 5.675 + if (count < 0) { 5.676 + printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); 5.677 + return count; 5.678 + } 5.679 + 5.680 + mp_register_lapic_address(acpi_lapic_addr); 5.681 + 5.682 + count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic, 5.683 + MAX_APICS); 5.684 + if (!count) { 5.685 + printk(KERN_ERR PREFIX "No LAPIC entries present\n"); 5.686 + /* TBD: Cleanup to allow fallback to MPS */ 5.687 + return -ENODEV; 5.688 + } 5.689 + else if (count < 0) { 5.690 + printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); 5.691 + /* TBD: Cleanup to allow fallback to MPS */ 5.692 + return count; 5.693 + } 5.694 + 5.695 + count = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0); 5.696 + if (count < 0) { 5.697 + printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 5.698 + /* TBD: Cleanup to allow fallback to MPS */ 5.699 + return count; 5.700 + } 5.701 + return 0; 5.702 +} 5.703 +#endif /* CONFIG_X86_LOCAL_APIC */ 5.704 + 5.705 +#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER) 5.706 +/* 5.707 + * Parse IOAPIC related entries in MADT 5.708 + * returns 0 on success, < 0 on error 5.709 + */ 5.710 +static int __init 5.711 +acpi_parse_madt_ioapic_entries(void) 5.712 +{ 5.713 + int count; 5.714 + 5.715 + /* 5.716 + * ACPI interpreter is required to complete interrupt setup, 5.717 + * so if it is off, don't enumerate the io-apics with ACPI. 5.718 + * If MPS is present, it will handle them, 5.719 + * otherwise the system will stay in PIC mode 5.720 + */ 5.721 + if (acpi_disabled || acpi_noirq) { 5.722 + return -ENODEV; 5.723 + } 5.724 + 5.725 + /* 5.726 + * if "noapic" boot option, don't look for IO-APICs 5.727 + */ 5.728 + if (skip_ioapic_setup) { 5.729 + printk(KERN_INFO PREFIX "Skipping IOAPIC probe " 5.730 + "due to 'noapic' option.\n"); 5.731 + return -ENODEV; 5.732 + } 5.733 + 5.734 + count = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, MAX_IO_APICS); 5.735 + if (!count) { 5.736 + printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); 5.737 + return -ENODEV; 5.738 + } 5.739 + else if (count < 0) { 5.740 + printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); 5.741 + return count; 5.742 + } 5.743 + 5.744 + count = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, NR_IRQ_VECTORS); 5.745 + if (count < 0) { 5.746 + printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); 5.747 + /* TBD: Cleanup to allow fallback to MPS */ 5.748 + return count; 5.749 + } 5.750 + 5.751 + /* 5.752 + * If BIOS did not supply an INT_SRC_OVR for the SCI 5.753 + * pretend we got one so we can set the SCI flags. 5.754 + */ 5.755 + if (!acpi_sci_override_gsi) 5.756 + acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); 5.757 + 5.758 + /* Fill in identity legacy mapings where no override */ 5.759 + mp_config_acpi_legacy_irqs(); 5.760 + 5.761 + count = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, NR_IRQ_VECTORS); 5.762 + if (count < 0) { 5.763 + printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 5.764 + /* TBD: Cleanup to allow fallback to MPS */ 5.765 + return count; 5.766 + } 5.767 + 5.768 + return 0; 5.769 +} 5.770 +#else 5.771 +static inline int acpi_parse_madt_ioapic_entries(void) 5.772 +{ 5.773 + return -1; 5.774 +} 5.775 +#endif /* !(CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER) */ 5.776 + 5.777 + 5.778 +static void __init 5.779 +acpi_process_madt(void) 5.780 +{ 5.781 +#ifdef CONFIG_X86_LOCAL_APIC 5.782 + int count, error; 5.783 + 5.784 + count = acpi_table_parse(ACPI_APIC, acpi_parse_madt); 5.785 + if (count >= 1) { 5.786 + 5.787 + /* 5.788 + * Parse MADT LAPIC entries 5.789 + */ 5.790 + error = acpi_parse_madt_lapic_entries(); 5.791 + if (!error) { 5.792 + acpi_lapic = 1; 5.793 + 5.794 + /* 5.795 + * Parse MADT IO-APIC entries 5.796 + */ 5.797 + error = acpi_parse_madt_ioapic_entries(); 5.798 + if (!error) { 5.799 + acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; 5.800 + acpi_irq_balance_set(NULL); 5.801 + acpi_ioapic = 1; 5.802 + 5.803 + smp_found_config = 1; 5.804 + clustered_apic_check(); 5.805 + } 5.806 + } 5.807 + if (error == -EINVAL) { 5.808 + /* 5.809 + * Dell Precision Workstation 410, 610 come here. 5.810 + */ 5.811 + printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); 5.812 + disable_acpi(); 5.813 + } 5.814 + } 5.815 +#endif 5.816 + return; 5.817 +} 5.818 + 5.819 +/* 5.820 + * acpi_boot_table_init() and acpi_boot_init() 5.821 + * called from setup_arch(), always. 5.822 + * 1. checksums all tables 5.823 + * 2. enumerates lapics 5.824 + * 3. enumerates io-apics 5.825 + * 5.826 + * acpi_table_init() is separate to allow reading SRAT without 5.827 + * other side effects. 5.828 + * 5.829 + * side effects of acpi_boot_init: 5.830 + * acpi_lapic = 1 if LAPIC found 5.831 + * acpi_ioapic = 1 if IOAPIC found 5.832 + * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; 5.833 + * if acpi_blacklisted() acpi_disabled = 1; 5.834 + * acpi_irq_model=... 5.835 + * ... 5.836 + * 5.837 + * return value: (currently ignored) 5.838 + * 0: success 5.839 + * !0: failure 5.840 + */ 5.841 + 5.842 +int __init 5.843 +acpi_boot_table_init(void) 5.844 +{ 5.845 + int error; 5.846 + 5.847 + /* 5.848 + * If acpi_disabled, bail out 5.849 + * One exception: acpi=ht continues far enough to enumerate LAPICs 5.850 + */ 5.851 + if (acpi_disabled && !acpi_ht) 5.852 + return 1; 5.853 + 5.854 + /* 5.855 + * Initialize the ACPI boot-time table parser. 5.856 + */ 5.857 + error = acpi_table_init(); 5.858 + if (error) { 5.859 + disable_acpi(); 5.860 + return error; 5.861 + } 5.862 + 5.863 +#if 0 /*def __i386__*/ 5.864 + check_acpi_pci(); 5.865 +#endif 5.866 + 5.867 + acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); 5.868 + 5.869 + /* 5.870 + * blacklist may disable ACPI entirely 5.871 + */ 5.872 + error = acpi_blacklisted(); 5.873 + if (error) { 5.874 + extern int acpi_force; 5.875 + 5.876 + if (acpi_force) { 5.877 + printk(KERN_WARNING PREFIX "acpi=force override\n"); 5.878 + } else { 5.879 + printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); 5.880 + disable_acpi(); 5.881 + return error; 5.882 + } 5.883 + } 5.884 + 5.885 + return 0; 5.886 +} 5.887 + 5.888 + 5.889 +int __init acpi_boot_init(void) 5.890 +{ 5.891 + /* 5.892 + * If acpi_disabled, bail out 5.893 + * One exception: acpi=ht continues far enough to enumerate LAPICs 5.894 + */ 5.895 + if (acpi_disabled && !acpi_ht) 5.896 + return 1; 5.897 + 5.898 + acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); 5.899 + 5.900 + /* 5.901 + * set sci_int and PM timer address 5.902 + */ 5.903 + acpi_table_parse(ACPI_FADT, acpi_parse_fadt); 5.904 + 5.905 + /* 5.906 + * Process the Multiple APIC Description Table (MADT), if present 5.907 + */ 5.908 + acpi_process_madt(); 5.909 + 5.910 + acpi_table_parse(ACPI_HPET, acpi_parse_hpet); 5.911 + acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 5.912 + 5.913 + return 0; 5.914 +} 5.915 +
6.1 --- a/xen/arch/x86/apic.c Mon May 09 14:34:59 2005 +0000 6.2 +++ b/xen/arch/x86/apic.c Mon May 09 17:50:11 2005 +0000 6.3 @@ -34,14 +34,27 @@ 6.4 #include <asm/hardirq.h> 6.5 #include <asm/apic.h> 6.6 #include <asm/io_apic.h> 6.7 -#include <asm/mach_apic.h> 6.8 -#include <asm/io_ports.h> 6.9 +#include <mach_apic.h> 6.10 +#include <io_ports.h> 6.11 6.12 /* Using APIC to generate smp_local_timer_interrupt? */ 6.13 int using_apic_timer = 0; 6.14 6.15 +int apic_verbosity; 6.16 + 6.17 static int enabled_via_apicbase; 6.18 6.19 +int get_physical_broadcast(void) 6.20 +{ 6.21 + unsigned int lvr, version; 6.22 + lvr = apic_read(APIC_LVR); 6.23 + version = GET_APIC_VERSION(lvr); 6.24 + if (!APIC_INTEGRATED(version) || version >= 0x14) 6.25 + return 0xff; 6.26 + else 6.27 + return 0xf; 6.28 +} 6.29 + 6.30 int get_maxlvt(void) 6.31 { 6.32 unsigned int v, ver, maxlvt; 6.33 @@ -907,7 +920,7 @@ int __init APIC_init_uniprocessor (void) 6.34 #ifdef CONFIG_SMP 6.35 cpu_online_map = 1; 6.36 #endif 6.37 - phys_cpu_present_map = 1; 6.38 + phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 6.39 apic_write_around(APIC_ID, boot_cpu_physical_apicid); 6.40 6.41 setup_local_APIC();
7.1 --- a/xen/arch/x86/i8259.c Mon May 09 14:34:59 2005 +0000 7.2 +++ b/xen/arch/x86/i8259.c Mon May 09 17:50:11 2005 +0000 7.3 @@ -45,7 +45,7 @@ BUILD_COMMON_IRQ() 7.4 7.5 /* 7.6 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: 7.7 - * (these are usually mapped to vectors 0x30-0x3f) 7.8 + * (these are usually mapped to vectors 0x20-0x2f) 7.9 */ 7.10 BUILD_16_IRQS(0x0) 7.11 7.12 @@ -58,7 +58,7 @@ BUILD_16_IRQS(0x0) 7.13 * of these. Plus, more powerful systems might have more than 64 7.14 * IO-APIC registers. 7.15 * 7.16 - * (these are usually mapped into the 0x30-0xff vector range) 7.17 + * (these are usually mapped into the 0x20-0xff vector range) 7.18 */ 7.19 BUILD_16_IRQS(0x1) BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3) 7.20 BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7) 7.21 @@ -101,7 +101,7 @@ BUILD_SMP_INTERRUPT(spurious_interrupt,S 7.22 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ 7.23 IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f) 7.24 7.25 - void *interrupt[NR_IRQS] = { 7.26 + void (*interrupt[NR_IRQS])(void) = { 7.27 IRQLIST_16(0x0), 7.28 7.29 #ifdef CONFIG_X86_IO_APIC 7.30 @@ -341,7 +341,7 @@ void __init init_8259A(int auto_eoi) 7.31 * outb_p - this has to work on a wide range of PC hardware. 7.32 */ 7.33 outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */ 7.34 - outb_p(0x30 + 0, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */ 7.35 + outb_p(0x20 + 0, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */ 7.36 outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */ 7.37 if (auto_eoi) 7.38 outb_p(0x03, 0x21); /* master does Auto EOI */ 7.39 @@ -349,7 +349,7 @@ void __init init_8259A(int auto_eoi) 7.40 outb_p(0x01, 0x21); /* master expects normal EOI */ 7.41 7.42 outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */ 7.43 - outb_p(0x30 + 8, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */ 7.44 + outb_p(0x20 + 8, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */ 7.45 outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */ 7.46 outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode 7.47 is to be investigated) */
8.1 --- a/xen/arch/x86/io_apic.c Mon May 09 14:34:59 2005 +0000 8.2 +++ b/xen/arch/x86/io_apic.c Mon May 09 17:50:11 2005 +0000 8.3 @@ -25,26 +25,20 @@ 8.4 #include <xen/irq.h> 8.5 #include <xen/delay.h> 8.6 #include <xen/sched.h> 8.7 -#include <xen/config.h> 8.8 -#include <asm/mc146818rtc.h> 8.9 +#include <xen/acpi.h> 8.10 #include <asm/io.h> 8.11 -#include <asm/mpspec.h> 8.12 -#include <asm/io_apic.h> 8.13 +#include <asm/mc146818rtc.h> 8.14 #include <asm/smp.h> 8.15 #include <asm/desc.h> 8.16 -#include <asm/smpboot.h> 8.17 - 8.18 -#ifdef CONFIG_X86_IO_APIC 8.19 - 8.20 -#undef APIC_LOCKUP_DEBUG 8.21 +#include <mach_apic.h> 8.22 +#include <io_ports.h> 8.23 8.24 -#define APIC_LOCKUP_DEBUG 8.25 - 8.26 -static spinlock_t ioapic_lock = SPIN_LOCK_UNLOCKED; 8.27 +int (*ioapic_renumber_irq)(int ioapic, int irq); 8.28 +atomic_t irq_mis_count; 8.29 8.30 -unsigned int int_dest_addr_mode = APIC_DEST_LOGICAL; 8.31 -unsigned char int_delivery_mode = dest_LowestPrio; 8.32 +static DEFINE_SPINLOCK(ioapic_lock); 8.33 8.34 +int skip_ioapic_setup; 8.35 8.36 /* 8.37 * # of IRQ routing registers 8.38 @@ -69,12 +63,20 @@ static struct irq_pin_list { 8.39 int apic, pin, next; 8.40 } irq_2_pin[PIN_MAP_SIZE]; 8.41 8.42 +int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; 8.43 +#ifdef CONFIG_PCI_MSI 8.44 +#define vector_to_irq(vector) \ 8.45 + (platform_legacy_irq(vector) ? vector : vector_irq[vector]) 8.46 +#else 8.47 +#define vector_to_irq(vector) (vector) 8.48 +#endif 8.49 + 8.50 /* 8.51 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 8.52 * shared ISA-space IRQs, so we have to support them. We are super 8.53 * fast in the common case, and fast for shared ISA-space IRQs. 8.54 */ 8.55 -static void __init add_pin_to_irq(unsigned int irq, int apic, int pin) 8.56 +static void add_pin_to_irq(unsigned int irq, int apic, int pin) 8.57 { 8.58 static int first_free_entry = NR_IRQS; 8.59 struct irq_pin_list *entry = irq_2_pin + irq; 8.60 @@ -112,36 +114,48 @@ static void __init replace_pin_at_irq(un 8.61 } 8.62 } 8.63 8.64 -#define __DO_ACTION(R, ACTION, FINAL) \ 8.65 - \ 8.66 -{ \ 8.67 - int pin; \ 8.68 - struct irq_pin_list *entry = irq_2_pin + irq; \ 8.69 - \ 8.70 - for (;;) { \ 8.71 - unsigned int reg; \ 8.72 - pin = entry->pin; \ 8.73 - if (pin == -1) \ 8.74 - break; \ 8.75 - reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \ 8.76 - reg ACTION; \ 8.77 - io_apic_write(entry->apic, 0x10 + R + pin*2, reg); \ 8.78 - if (!entry->next) \ 8.79 - break; \ 8.80 - entry = irq_2_pin + entry->next; \ 8.81 - } \ 8.82 - FINAL; \ 8.83 +static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable) 8.84 +{ 8.85 + struct irq_pin_list *entry = irq_2_pin + irq; 8.86 + unsigned int pin, reg; 8.87 + 8.88 + for (;;) { 8.89 + pin = entry->pin; 8.90 + if (pin == -1) 8.91 + break; 8.92 + reg = io_apic_read(entry->apic, 0x10 + pin*2); 8.93 + reg &= ~disable; 8.94 + reg |= enable; 8.95 + io_apic_modify(entry->apic, 0x10 + pin*2, reg); 8.96 + if (!entry->next) 8.97 + break; 8.98 + entry = irq_2_pin + entry->next; 8.99 + } 8.100 } 8.101 8.102 -#define DO_ACTION(name,R,ACTION, FINAL) \ 8.103 - \ 8.104 - static void name##_IO_APIC_irq (unsigned int irq) \ 8.105 - __DO_ACTION(R, ACTION, FINAL) 8.106 +/* mask = 1 */ 8.107 +static void __mask_IO_APIC_irq (unsigned int irq) 8.108 +{ 8.109 + __modify_IO_APIC_irq(irq, 0x00010000, 0); 8.110 +} 8.111 + 8.112 +/* mask = 0 */ 8.113 +static void __unmask_IO_APIC_irq (unsigned int irq) 8.114 +{ 8.115 + __modify_IO_APIC_irq(irq, 0, 0x00010000); 8.116 +} 8.117 8.118 -DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) ) 8.119 -DO_ACTION( __unmask, 0, &= 0xfffeffff, ) 8.120 -DO_ACTION( __edge, 0, &= 0xffff7fff, ) 8.121 -DO_ACTION( __level, 0, |= 0x00008000, ) 8.122 +/* mask = 1, trigger = 0 */ 8.123 +static void __mask_and_edge_IO_APIC_irq (unsigned int irq) 8.124 +{ 8.125 + __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000); 8.126 +} 8.127 + 8.128 +/* mask = 0, trigger = 1 */ 8.129 +static void __unmask_and_level_IO_APIC_irq (unsigned int irq) 8.130 +{ 8.131 + __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000); 8.132 +} 8.133 8.134 static void mask_IO_APIC_irq (unsigned int irq) 8.135 { 8.136 @@ -165,7 +179,7 @@ void clear_IO_APIC_pin(unsigned int apic 8.137 { 8.138 struct IO_APIC_route_entry entry; 8.139 unsigned long flags; 8.140 - 8.141 + 8.142 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 8.143 spin_lock_irqsave(&ioapic_lock, flags); 8.144 *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); 8.145 @@ -194,85 +208,467 @@ static void clear_IO_APIC (void) 8.146 clear_IO_APIC_pin(apic, pin); 8.147 } 8.148 8.149 -static void set_ioapic_affinity (unsigned int irq, unsigned long mask) 8.150 +static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask) 8.151 { 8.152 unsigned long flags; 8.153 - 8.154 - /* 8.155 - * Only the first 8 bits are valid. 8.156 - */ 8.157 - mask = mask << 24; 8.158 + int pin; 8.159 + struct irq_pin_list *entry = irq_2_pin + irq; 8.160 + unsigned int apicid_value; 8.161 + 8.162 + apicid_value = cpu_mask_to_apicid(cpumask); 8.163 + /* Prepare to do the io_apic_write */ 8.164 + apicid_value = apicid_value << 24; 8.165 spin_lock_irqsave(&ioapic_lock, flags); 8.166 - __DO_ACTION(1, = mask, ) 8.167 + for (;;) { 8.168 + pin = entry->pin; 8.169 + if (pin == -1) 8.170 + break; 8.171 + io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value); 8.172 + if (!entry->next) 8.173 + break; 8.174 + entry = irq_2_pin + entry->next; 8.175 + } 8.176 spin_unlock_irqrestore(&ioapic_lock, flags); 8.177 } 8.178 8.179 -#define balance_irq(_irq) ((void)0) 8.180 +#if defined(CONFIG_IRQBALANCE) 8.181 +# include <asm/processor.h> /* kernel_thread() */ 8.182 +# include <xen/kernel_stat.h> /* kstat */ 8.183 +# include <xen/slab.h> /* kmalloc() */ 8.184 +# include <xen/timer.h> /* time_after() */ 8.185 + 8.186 +# ifdef CONFIG_BALANCED_IRQ_DEBUG 8.187 +# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0) 8.188 +# define Dprintk(x...) do { TDprintk(x); } while (0) 8.189 +# else 8.190 +# define TDprintk(x...) 8.191 +# define Dprintk(x...) 8.192 +# endif 8.193 + 8.194 +cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS]; 8.195 8.196 -/* 8.197 - * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 8.198 - * specific CPU-side IRQs. 8.199 - */ 8.200 +#define IRQBALANCE_CHECK_ARCH -999 8.201 +static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH; 8.202 +static int physical_balance = 0; 8.203 + 8.204 +struct irq_cpu_info { 8.205 + unsigned long * last_irq; 8.206 + unsigned long * irq_delta; 8.207 + unsigned long irq; 8.208 +} irq_cpu_data[NR_CPUS]; 8.209 + 8.210 +#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq) 8.211 +#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq]) 8.212 +#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq]) 8.213 + 8.214 +#define IDLE_ENOUGH(cpu,now) \ 8.215 + (idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > 1)) 8.216 + 8.217 +#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) 8.218 8.219 -#define MAX_PIRQS 8 8.220 -int pirq_entries [MAX_PIRQS]; 8.221 -int pirqs_enabled; 8.222 +#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i])) 8.223 + 8.224 +#define MAX_BALANCED_IRQ_INTERVAL (5*HZ) 8.225 +#define MIN_BALANCED_IRQ_INTERVAL (HZ/2) 8.226 +#define BALANCED_IRQ_MORE_DELTA (HZ/10) 8.227 +#define BALANCED_IRQ_LESS_DELTA (HZ) 8.228 + 8.229 +long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL; 8.230 + 8.231 +static unsigned long move(int curr_cpu, cpumask_t allowed_mask, 8.232 + unsigned long now, int direction) 8.233 +{ 8.234 + int search_idle = 1; 8.235 + int cpu = curr_cpu; 8.236 + 8.237 + goto inside; 8.238 8.239 -int skip_ioapic_setup; 8.240 -#if 0 8.241 + do { 8.242 + if (unlikely(cpu == curr_cpu)) 8.243 + search_idle = 0; 8.244 +inside: 8.245 + if (direction == 1) { 8.246 + cpu++; 8.247 + if (cpu >= NR_CPUS) 8.248 + cpu = 0; 8.249 + } else { 8.250 + cpu--; 8.251 + if (cpu == -1) 8.252 + cpu = NR_CPUS-1; 8.253 + } 8.254 + } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) || 8.255 + (search_idle && !IDLE_ENOUGH(cpu,now))); 8.256 8.257 -static int __init noioapic_setup(char *str) 8.258 -{ 8.259 - skip_ioapic_setup = 1; 8.260 - return 1; 8.261 + return cpu; 8.262 } 8.263 8.264 -__setup("noapic", noioapic_setup); 8.265 +static inline void balance_irq(int cpu, int irq) 8.266 +{ 8.267 + unsigned long now = jiffies; 8.268 + cpumask_t allowed_mask; 8.269 + unsigned int new_cpu; 8.270 + 8.271 + if (irqbalance_disabled) 8.272 + return; 8.273 + 8.274 + cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]); 8.275 + new_cpu = move(cpu, allowed_mask, now, 1); 8.276 + if (cpu != new_cpu) { 8.277 + irq_desc_t *desc = irq_desc + irq; 8.278 + unsigned long flags; 8.279 8.280 -static int __init ioapic_setup(char *str) 8.281 + spin_lock_irqsave(&desc->lock, flags); 8.282 + pending_irq_balance_cpumask[irq] = cpumask_of_cpu(new_cpu); 8.283 + spin_unlock_irqrestore(&desc->lock, flags); 8.284 + } 8.285 +} 8.286 + 8.287 +static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) 8.288 { 8.289 - skip_ioapic_setup = 0; 8.290 - return 1; 8.291 + int i, j; 8.292 + Dprintk("Rotating IRQs among CPUs.\n"); 8.293 + for (i = 0; i < NR_CPUS; i++) { 8.294 + for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { 8.295 + if (!irq_desc[j].action) 8.296 + continue; 8.297 + /* Is it a significant load ? */ 8.298 + if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) < 8.299 + useful_load_threshold) 8.300 + continue; 8.301 + balance_irq(i, j); 8.302 + } 8.303 + } 8.304 + balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, 8.305 + balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); 8.306 + return; 8.307 } 8.308 8.309 -__setup("apic", ioapic_setup); 8.310 - 8.311 - 8.312 - 8.313 -static int __init ioapic_pirq_setup(char *str) 8.314 +static void do_irq_balance(void) 8.315 { 8.316 - int i, max; 8.317 - int ints[MAX_PIRQS+1]; 8.318 + int i, j; 8.319 + unsigned long max_cpu_irq = 0, min_cpu_irq = (~0); 8.320 + unsigned long move_this_load = 0; 8.321 + int max_loaded = 0, min_loaded = 0; 8.322 + int load; 8.323 + unsigned long useful_load_threshold = balanced_irq_interval + 10; 8.324 + int selected_irq; 8.325 + int tmp_loaded, first_attempt = 1; 8.326 + unsigned long tmp_cpu_irq; 8.327 + unsigned long imbalance = 0; 8.328 + cpumask_t allowed_mask, target_cpu_mask, tmp; 8.329 + 8.330 + for (i = 0; i < NR_CPUS; i++) { 8.331 + int package_index; 8.332 + CPU_IRQ(i) = 0; 8.333 + if (!cpu_online(i)) 8.334 + continue; 8.335 + package_index = CPU_TO_PACKAGEINDEX(i); 8.336 + for (j = 0; j < NR_IRQS; j++) { 8.337 + unsigned long value_now, delta; 8.338 + /* Is this an active IRQ? */ 8.339 + if (!irq_desc[j].action) 8.340 + continue; 8.341 + if ( package_index == i ) 8.342 + IRQ_DELTA(package_index,j) = 0; 8.343 + /* Determine the total count per processor per IRQ */ 8.344 + value_now = (unsigned long) kstat_cpu(i).irqs[j]; 8.345 + 8.346 + /* Determine the activity per processor per IRQ */ 8.347 + delta = value_now - LAST_CPU_IRQ(i,j); 8.348 + 8.349 + /* Update last_cpu_irq[][] for the next time */ 8.350 + LAST_CPU_IRQ(i,j) = value_now; 8.351 + 8.352 + /* Ignore IRQs whose rate is less than the clock */ 8.353 + if (delta < useful_load_threshold) 8.354 + continue; 8.355 + /* update the load for the processor or package total */ 8.356 + IRQ_DELTA(package_index,j) += delta; 8.357 8.358 - get_options(str, ARRAY_SIZE(ints), ints); 8.359 + /* Keep track of the higher numbered sibling as well */ 8.360 + if (i != package_index) 8.361 + CPU_IRQ(i) += delta; 8.362 + /* 8.363 + * We have sibling A and sibling B in the package 8.364 + * 8.365 + * cpu_irq[A] = load for cpu A + load for cpu B 8.366 + * cpu_irq[B] = load for cpu B 8.367 + */ 8.368 + CPU_IRQ(package_index) += delta; 8.369 + } 8.370 + } 8.371 + /* Find the least loaded processor package */ 8.372 + for (i = 0; i < NR_CPUS; i++) { 8.373 + if (!cpu_online(i)) 8.374 + continue; 8.375 + if (i != CPU_TO_PACKAGEINDEX(i)) 8.376 + continue; 8.377 + if (min_cpu_irq > CPU_IRQ(i)) { 8.378 + min_cpu_irq = CPU_IRQ(i); 8.379 + min_loaded = i; 8.380 + } 8.381 + } 8.382 + max_cpu_irq = ULONG_MAX; 8.383 8.384 - for (i = 0; i < MAX_PIRQS; i++) 8.385 - pirq_entries[i] = -1; 8.386 +tryanothercpu: 8.387 + /* Look for heaviest loaded processor. 8.388 + * We may come back to get the next heaviest loaded processor. 8.389 + * Skip processors with trivial loads. 8.390 + */ 8.391 + tmp_cpu_irq = 0; 8.392 + tmp_loaded = -1; 8.393 + for (i = 0; i < NR_CPUS; i++) { 8.394 + if (!cpu_online(i)) 8.395 + continue; 8.396 + if (i != CPU_TO_PACKAGEINDEX(i)) 8.397 + continue; 8.398 + if (max_cpu_irq <= CPU_IRQ(i)) 8.399 + continue; 8.400 + if (tmp_cpu_irq < CPU_IRQ(i)) { 8.401 + tmp_cpu_irq = CPU_IRQ(i); 8.402 + tmp_loaded = i; 8.403 + } 8.404 + } 8.405 8.406 - pirqs_enabled = 1; 8.407 - printk(KERN_INFO "PIRQ redirection, working around broken MP-BIOS.\n"); 8.408 - max = MAX_PIRQS; 8.409 - if (ints[0] < MAX_PIRQS) 8.410 - max = ints[0]; 8.411 + if (tmp_loaded == -1) { 8.412 + /* In the case of small number of heavy interrupt sources, 8.413 + * loading some of the cpus too much. We use Ingo's original 8.414 + * approach to rotate them around. 8.415 + */ 8.416 + if (!first_attempt && imbalance >= useful_load_threshold) { 8.417 + rotate_irqs_among_cpus(useful_load_threshold); 8.418 + return; 8.419 + } 8.420 + goto not_worth_the_effort; 8.421 + } 8.422 + 8.423 + first_attempt = 0; /* heaviest search */ 8.424 + max_cpu_irq = tmp_cpu_irq; /* load */ 8.425 + max_loaded = tmp_loaded; /* processor */ 8.426 + imbalance = (max_cpu_irq - min_cpu_irq) / 2; 8.427 + 8.428 + Dprintk("max_loaded cpu = %d\n", max_loaded); 8.429 + Dprintk("min_loaded cpu = %d\n", min_loaded); 8.430 + Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq); 8.431 + Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq); 8.432 + Dprintk("load imbalance = %lu\n", imbalance); 8.433 + 8.434 + /* if imbalance is less than approx 10% of max load, then 8.435 + * observe diminishing returns action. - quit 8.436 + */ 8.437 + if (imbalance < (max_cpu_irq >> 3)) { 8.438 + Dprintk("Imbalance too trivial\n"); 8.439 + goto not_worth_the_effort; 8.440 + } 8.441 + 8.442 +tryanotherirq: 8.443 + /* if we select an IRQ to move that can't go where we want, then 8.444 + * see if there is another one to try. 8.445 + */ 8.446 + move_this_load = 0; 8.447 + selected_irq = -1; 8.448 + for (j = 0; j < NR_IRQS; j++) { 8.449 + /* Is this an active IRQ? */ 8.450 + if (!irq_desc[j].action) 8.451 + continue; 8.452 + if (imbalance <= IRQ_DELTA(max_loaded,j)) 8.453 + continue; 8.454 + /* Try to find the IRQ that is closest to the imbalance 8.455 + * without going over. 8.456 + */ 8.457 + if (move_this_load < IRQ_DELTA(max_loaded,j)) { 8.458 + move_this_load = IRQ_DELTA(max_loaded,j); 8.459 + selected_irq = j; 8.460 + } 8.461 + } 8.462 + if (selected_irq == -1) { 8.463 + goto tryanothercpu; 8.464 + } 8.465 8.466 - for (i = 0; i < max; i++) { 8.467 - printk(KERN_DEBUG "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); 8.468 - /* 8.469 - * PIRQs are mapped upside down, usually. 8.470 + imbalance = move_this_load; 8.471 + 8.472 + /* For physical_balance case, we accumlated both load 8.473 + * values in the one of the siblings cpu_irq[], 8.474 + * to use the same code for physical and logical processors 8.475 + * as much as possible. 8.476 + * 8.477 + * NOTE: the cpu_irq[] array holds the sum of the load for 8.478 + * sibling A and sibling B in the slot for the lowest numbered 8.479 + * sibling (A), _AND_ the load for sibling B in the slot for 8.480 + * the higher numbered sibling. 8.481 + * 8.482 + * We seek the least loaded sibling by making the comparison 8.483 + * (A+B)/2 vs B 8.484 + */ 8.485 + load = CPU_IRQ(min_loaded) >> 1; 8.486 + for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) { 8.487 + if (load > CPU_IRQ(j)) { 8.488 + /* This won't change cpu_sibling_map[min_loaded] */ 8.489 + load = CPU_IRQ(j); 8.490 + min_loaded = j; 8.491 + } 8.492 + } 8.493 + 8.494 + cpus_and(allowed_mask, cpu_online_map, irq_affinity[selected_irq]); 8.495 + target_cpu_mask = cpumask_of_cpu(min_loaded); 8.496 + cpus_and(tmp, target_cpu_mask, allowed_mask); 8.497 + 8.498 + if (!cpus_empty(tmp)) { 8.499 + irq_desc_t *desc = irq_desc + selected_irq; 8.500 + unsigned long flags; 8.501 + 8.502 + Dprintk("irq = %d moved to cpu = %d\n", 8.503 + selected_irq, min_loaded); 8.504 + /* mark for change destination */ 8.505 + spin_lock_irqsave(&desc->lock, flags); 8.506 + pending_irq_balance_cpumask[selected_irq] = 8.507 + cpumask_of_cpu(min_loaded); 8.508 + spin_unlock_irqrestore(&desc->lock, flags); 8.509 + /* Since we made a change, come back sooner to 8.510 + * check for more variation. 8.511 */ 8.512 - pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; 8.513 + balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, 8.514 + balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); 8.515 + return; 8.516 } 8.517 - return 1; 8.518 + goto tryanotherirq; 8.519 + 8.520 +not_worth_the_effort: 8.521 + /* 8.522 + * if we did not find an IRQ to move, then adjust the time interval 8.523 + * upward 8.524 + */ 8.525 + balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL, 8.526 + balanced_irq_interval + BALANCED_IRQ_MORE_DELTA); 8.527 + Dprintk("IRQ worth rotating not found\n"); 8.528 + return; 8.529 } 8.530 8.531 -__setup("pirq=", ioapic_pirq_setup); 8.532 +static int balanced_irq(void *unused) 8.533 +{ 8.534 + int i; 8.535 + unsigned long prev_balance_time = jiffies; 8.536 + long time_remaining = balanced_irq_interval; 8.537 + 8.538 + daemonize("kirqd"); 8.539 + 8.540 + /* push everything to CPU 0 to give us a starting point. */ 8.541 + for (i = 0 ; i < NR_IRQS ; i++) { 8.542 + pending_irq_balance_cpumask[i] = cpumask_of_cpu(0); 8.543 + } 8.544 + 8.545 + for ( ; ; ) { 8.546 + set_current_state(TASK_INTERRUPTIBLE); 8.547 + time_remaining = schedule_timeout(time_remaining); 8.548 + try_to_freeze(PF_FREEZE); 8.549 + if (time_after(jiffies, 8.550 + prev_balance_time+balanced_irq_interval)) { 8.551 + do_irq_balance(); 8.552 + prev_balance_time = jiffies; 8.553 + time_remaining = balanced_irq_interval; 8.554 + } 8.555 + } 8.556 + return 0; 8.557 +} 8.558 + 8.559 +static int __init balanced_irq_init(void) 8.560 +{ 8.561 + int i; 8.562 + struct cpuinfo_x86 *c; 8.563 + cpumask_t tmp; 8.564 + 8.565 + cpus_shift_right(tmp, cpu_online_map, 2); 8.566 + c = &boot_cpu_data; 8.567 + /* When not overwritten by the command line ask subarchitecture. */ 8.568 + if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH) 8.569 + irqbalance_disabled = NO_BALANCE_IRQ; 8.570 + if (irqbalance_disabled) 8.571 + return 0; 8.572 + 8.573 + /* disable irqbalance completely if there is only one processor online */ 8.574 + if (num_online_cpus() < 2) { 8.575 + irqbalance_disabled = 1; 8.576 + return 0; 8.577 + } 8.578 + /* 8.579 + * Enable physical balance only if more than 1 physical processor 8.580 + * is present 8.581 + */ 8.582 + if (smp_num_siblings > 1 && !cpus_empty(tmp)) 8.583 + physical_balance = 1; 8.584 8.585 -#endif 8.586 + for (i = 0; i < NR_CPUS; i++) { 8.587 + if (!cpu_online(i)) 8.588 + continue; 8.589 + irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 8.590 + irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 8.591 + if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { 8.592 + printk(KERN_ERR "balanced_irq_init: out of memory"); 8.593 + goto failed; 8.594 + } 8.595 + memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS); 8.596 + memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS); 8.597 + } 8.598 + 8.599 + printk(KERN_INFO "Starting balanced_irq\n"); 8.600 + if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0) 8.601 + return 0; 8.602 + else 8.603 + printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); 8.604 +failed: 8.605 + for (i = 0; i < NR_CPUS; i++) { 8.606 + if(irq_cpu_data[i].irq_delta) 8.607 + kfree(irq_cpu_data[i].irq_delta); 8.608 + if(irq_cpu_data[i].last_irq) 8.609 + kfree(irq_cpu_data[i].last_irq); 8.610 + } 8.611 + return 0; 8.612 +} 8.613 + 8.614 +int __init irqbalance_disable(char *str) 8.615 +{ 8.616 + irqbalance_disabled = 1; 8.617 + return 0; 8.618 +} 8.619 + 8.620 +__setup("noirqbalance", irqbalance_disable); 8.621 + 8.622 +static inline void move_irq(int irq) 8.623 +{ 8.624 + /* note - we hold the desc->lock */ 8.625 + if (unlikely(!cpus_empty(pending_irq_balance_cpumask[irq]))) { 8.626 + set_ioapic_affinity_irq(irq, pending_irq_balance_cpumask[irq]); 8.627 + cpus_clear(pending_irq_balance_cpumask[irq]); 8.628 + } 8.629 +} 8.630 + 8.631 +late_initcall(balanced_irq_init); 8.632 + 8.633 +#else /* !CONFIG_IRQBALANCE */ 8.634 +static inline void move_irq(int irq) { } 8.635 +#endif /* CONFIG_IRQBALANCE */ 8.636 + 8.637 +#ifndef CONFIG_SMP 8.638 +void fastcall send_IPI_self(int vector) 8.639 +{ 8.640 + unsigned int cfg; 8.641 + 8.642 + /* 8.643 + * Wait for idle. 8.644 + */ 8.645 + apic_wait_icr_idle(); 8.646 + cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL; 8.647 + /* 8.648 + * Send the IPI. The write to APIC_ICR fires this off. 8.649 + */ 8.650 + apic_write_around(APIC_ICR, cfg); 8.651 +} 8.652 +#endif /* !CONFIG_SMP */ 8.653 8.654 /* 8.655 * Find the IRQ entry number of a certain pin. 8.656 */ 8.657 -static int __init find_irq_entry(int apic, int pin, int type) 8.658 +static int find_irq_entry(int apic, int pin, int type) 8.659 { 8.660 int i; 8.661 8.662 @@ -289,7 +685,7 @@ static int __init find_irq_entry(int api 8.663 /* 8.664 * Find the pin to which IRQ[irq] (ISA) is connected 8.665 */ 8.666 -static int __init find_isa_irq_pin(int irq, int type) 8.667 +static int find_isa_irq_pin(int irq, int type) 8.668 { 8.669 int i; 8.670 8.671 @@ -298,7 +694,9 @@ static int __init find_isa_irq_pin(int i 8.672 8.673 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || 8.674 mp_bus_id_to_type[lbus] == MP_BUS_EISA || 8.675 - mp_bus_id_to_type[lbus] == MP_BUS_MCA) && 8.676 + mp_bus_id_to_type[lbus] == MP_BUS_MCA || 8.677 + mp_bus_id_to_type[lbus] == MP_BUS_NEC98 8.678 + ) && 8.679 (mp_irqs[i].mpc_irqtype == type) && 8.680 (mp_irqs[i].mpc_srcbusirq == irq)) 8.681 8.682 @@ -313,16 +711,81 @@ static int __init find_isa_irq_pin(int i 8.683 */ 8.684 static int pin_2_irq(int idx, int apic, int pin); 8.685 8.686 +int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) 8.687 +{ 8.688 + int apic, i, best_guess = -1; 8.689 + 8.690 + apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, " 8.691 + "slot:%d, pin:%d.\n", bus, slot, pin); 8.692 + if (mp_bus_id_to_pci_bus[bus] == -1) { 8.693 + printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus); 8.694 + return -1; 8.695 + } 8.696 + for (i = 0; i < mp_irq_entries; i++) { 8.697 + int lbus = mp_irqs[i].mpc_srcbus; 8.698 + 8.699 + for (apic = 0; apic < nr_ioapics; apic++) 8.700 + if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic || 8.701 + mp_irqs[i].mpc_dstapic == MP_APIC_ALL) 8.702 + break; 8.703 + 8.704 + if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) && 8.705 + !mp_irqs[i].mpc_irqtype && 8.706 + (bus == lbus) && 8.707 + (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) { 8.708 + int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq); 8.709 + 8.710 + if (!(apic || IO_APIC_IRQ(irq))) 8.711 + continue; 8.712 + 8.713 + if (pin == (mp_irqs[i].mpc_srcbusirq & 3)) 8.714 + return irq; 8.715 + /* 8.716 + * Use the first all-but-pin matching entry as a 8.717 + * best-guess fuzzy result for broken mptables. 8.718 + */ 8.719 + if (best_guess < 0) 8.720 + best_guess = irq; 8.721 + } 8.722 + } 8.723 + return best_guess; 8.724 +} 8.725 + 8.726 +/* 8.727 + * This function currently is only a helper for the i386 smp boot process where 8.728 + * we need to reprogram the ioredtbls to cater for the cpus which have come online 8.729 + * so mask in all cases should simply be TARGET_CPUS 8.730 + */ 8.731 +void __init setup_ioapic_dest(void) 8.732 +{ 8.733 + int pin, ioapic, irq, irq_entry; 8.734 + 8.735 + if (skip_ioapic_setup == 1) 8.736 + return; 8.737 + 8.738 + for (ioapic = 0; ioapic < nr_ioapics; ioapic++) { 8.739 + for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { 8.740 + irq_entry = find_irq_entry(ioapic, pin, mp_INT); 8.741 + if (irq_entry == -1) 8.742 + continue; 8.743 + irq = pin_2_irq(irq_entry, ioapic, pin); 8.744 + set_ioapic_affinity_irq(irq, TARGET_CPUS); 8.745 + } 8.746 + 8.747 + } 8.748 +} 8.749 + 8.750 /* 8.751 * EISA Edge/Level control register, ELCR 8.752 */ 8.753 -static int __init EISA_ELCR(unsigned int irq) 8.754 +static int EISA_ELCR(unsigned int irq) 8.755 { 8.756 if (irq < 16) { 8.757 unsigned int port = 0x4d0 + (irq >> 3); 8.758 return (inb(port) >> (irq & 7)) & 1; 8.759 } 8.760 - printk(KERN_INFO "Broken MPtable reports ISA irq %d\n", irq); 8.761 + apic_printk(APIC_VERBOSE, KERN_INFO 8.762 + "Broken MPtable reports ISA irq %d\n", irq); 8.763 return 0; 8.764 } 8.765 8.766 @@ -352,6 +815,12 @@ static int __init EISA_ELCR(unsigned int 8.767 #define default_MCA_trigger(idx) (1) 8.768 #define default_MCA_polarity(idx) (0) 8.769 8.770 +/* NEC98 interrupts are always polarity zero edge triggered, 8.771 + * when listed as conforming in the MP table. */ 8.772 + 8.773 +#define default_NEC98_trigger(idx) (0) 8.774 +#define default_NEC98_polarity(idx) (0) 8.775 + 8.776 static int __init MPBIOS_polarity(int idx) 8.777 { 8.778 int bus = mp_irqs[idx].mpc_srcbus; 8.779 @@ -386,6 +855,11 @@ static int __init MPBIOS_polarity(int id 8.780 polarity = default_MCA_polarity(idx); 8.781 break; 8.782 } 8.783 + case MP_BUS_NEC98: /* NEC 98 pin */ 8.784 + { 8.785 + polarity = default_NEC98_polarity(idx); 8.786 + break; 8.787 + } 8.788 default: 8.789 { 8.790 printk(KERN_WARNING "broken BIOS!!\n"); 8.791 @@ -421,7 +895,7 @@ static int __init MPBIOS_polarity(int id 8.792 return polarity; 8.793 } 8.794 8.795 -static int __init MPBIOS_trigger(int idx) 8.796 +static int MPBIOS_trigger(int idx) 8.797 { 8.798 int bus = mp_irqs[idx].mpc_srcbus; 8.799 int trigger; 8.800 @@ -455,6 +929,11 @@ static int __init MPBIOS_trigger(int idx 8.801 trigger = default_MCA_trigger(idx); 8.802 break; 8.803 } 8.804 + case MP_BUS_NEC98: /* NEC 98 pin */ 8.805 + { 8.806 + trigger = default_NEC98_trigger(idx); 8.807 + break; 8.808 + } 8.809 default: 8.810 { 8.811 printk(KERN_WARNING "broken BIOS!!\n"); 8.812 @@ -516,6 +995,7 @@ static int pin_2_irq(int idx, int apic, 8.813 case MP_BUS_ISA: /* ISA pin */ 8.814 case MP_BUS_EISA: 8.815 case MP_BUS_MCA: 8.816 + case MP_BUS_NEC98: 8.817 { 8.818 irq = mp_irqs[idx].mpc_srcbusirq; 8.819 break; 8.820 @@ -529,6 +1009,13 @@ static int pin_2_irq(int idx, int apic, 8.821 while (i < apic) 8.822 irq += nr_ioapic_registers[i++]; 8.823 irq += pin; 8.824 + 8.825 + /* 8.826 + * For MPS mode, so far only needed by ES7000 platform 8.827 + */ 8.828 + if (ioapic_renumber_irq) 8.829 + irq = ioapic_renumber_irq(apic, irq); 8.830 + 8.831 break; 8.832 } 8.833 default: 8.834 @@ -539,20 +1026,6 @@ static int pin_2_irq(int idx, int apic, 8.835 } 8.836 } 8.837 8.838 - /* 8.839 - * PCI IRQ command line redirection. Yes, limits are hardcoded. 8.840 - */ 8.841 - if ((pin >= 16) && (pin <= 23)) { 8.842 - if (pirq_entries[pin-16] != -1) { 8.843 - if (!pirq_entries[pin-16]) { 8.844 - printk(KERN_DEBUG "disabling PIRQ%d\n", pin-16); 8.845 - } else { 8.846 - irq = pirq_entries[pin-16]; 8.847 - printk(KERN_DEBUG "using PIRQ%d -> IRQ %d\n", 8.848 - pin-16, irq); 8.849 - } 8.850 - } 8.851 - } 8.852 return irq; 8.853 } 8.854 8.855 @@ -573,90 +1046,74 @@ static inline int IO_APIC_irq_trigger(in 8.856 return 0; 8.857 } 8.858 8.859 -int irq_vector[NR_IRQS] = { FIRST_DEVICE_VECTOR , 0 }; 8.860 - 8.861 -int vector_irq[256]; 8.862 +/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */ 8.863 +u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 }; 8.864 8.865 int assign_irq_vector(int irq) 8.866 { 8.867 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; 8.868 - if (IO_APIC_VECTOR(irq) > 0) 8.869 + 8.870 + BUG_ON(irq >= NR_IRQ_VECTORS); 8.871 + if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) 8.872 return IO_APIC_VECTOR(irq); 8.873 next: 8.874 current_vector += 8; 8.875 8.876 - /* Skip the hypercall vector. */ 8.877 - if (current_vector == HYPERCALL_VECTOR) goto next; 8.878 - /* Skip the Linux/BSD fast-trap vector. */ 8.879 - if (current_vector == 0x80) goto next; 8.880 + /* Skip the hypercall vector. */ 8.881 + if (current_vector == HYPERCALL_VECTOR) 8.882 + goto next; 8.883 8.884 - if (current_vector > FIRST_SYSTEM_VECTOR) { 8.885 + /* Skip the Linux/BSD fast-trap vector. */ 8.886 + if (current_vector == 0x80) 8.887 + goto next; 8.888 + 8.889 + if (current_vector >= FIRST_SYSTEM_VECTOR) { 8.890 offset++; 8.891 + if (!(offset%8)) 8.892 + return -ENOSPC; 8.893 current_vector = FIRST_DEVICE_VECTOR + offset; 8.894 } 8.895 8.896 - if (current_vector == FIRST_SYSTEM_VECTOR) 8.897 - panic("ran out of interrupt sources!"); 8.898 - 8.899 - IO_APIC_VECTOR(irq) = current_vector; 8.900 - 8.901 - vector_irq[current_vector] = irq; 8.902 + vector_irq[current_vector] = irq; 8.903 + if (irq != AUTO_ASSIGN) 8.904 + IO_APIC_VECTOR(irq) = current_vector; 8.905 8.906 return current_vector; 8.907 } 8.908 8.909 -extern void (*interrupt[NR_IRQS])(void); 8.910 +static struct hw_interrupt_type ioapic_level_type; 8.911 +static struct hw_interrupt_type ioapic_edge_type; 8.912 8.913 -/* 8.914 - * Level and edge triggered IO-APIC interrupts need different handling, 8.915 - * so we use two separate IRQ descriptors. Edge triggered IRQs can be 8.916 - * handled with the level-triggered descriptor, but that one has slightly 8.917 - * more overhead. Level-triggered interrupts cannot be handled with the 8.918 - * edge-triggered handler, without risking IRQ storms and other ugly 8.919 - * races. 8.920 - */ 8.921 +#define IOAPIC_AUTO -1 8.922 +#define IOAPIC_EDGE 0 8.923 +#define IOAPIC_LEVEL 1 8.924 8.925 -static unsigned int startup_edge_ioapic_irq(unsigned int irq); 8.926 -#define shutdown_edge_ioapic_irq disable_edge_ioapic_irq 8.927 -#define enable_edge_ioapic_irq unmask_IO_APIC_irq 8.928 -static void disable_edge_ioapic_irq (unsigned int irq); 8.929 -static void ack_edge_ioapic_irq(unsigned int irq); 8.930 -static void end_edge_ioapic_irq (unsigned int i); 8.931 -static struct hw_interrupt_type ioapic_edge_irq_type = { 8.932 - "IO-APIC-edge", 8.933 - startup_edge_ioapic_irq, 8.934 - shutdown_edge_ioapic_irq, 8.935 - enable_edge_ioapic_irq, 8.936 - disable_edge_ioapic_irq, 8.937 - ack_edge_ioapic_irq, 8.938 - end_edge_ioapic_irq, 8.939 - set_ioapic_affinity, 8.940 -}; 8.941 - 8.942 -static unsigned int startup_level_ioapic_irq (unsigned int irq); 8.943 -#define shutdown_level_ioapic_irq mask_IO_APIC_irq 8.944 -#define enable_level_ioapic_irq unmask_IO_APIC_irq 8.945 -#define disable_level_ioapic_irq mask_IO_APIC_irq 8.946 -static void mask_and_ack_level_ioapic_irq (unsigned int irq); 8.947 -static void end_level_ioapic_irq (unsigned int irq); 8.948 -static struct hw_interrupt_type ioapic_level_irq_type = { 8.949 - "IO-APIC-level", 8.950 - startup_level_ioapic_irq, 8.951 - shutdown_level_ioapic_irq, 8.952 - enable_level_ioapic_irq, 8.953 - disable_level_ioapic_irq, 8.954 - mask_and_ack_level_ioapic_irq, 8.955 - end_level_ioapic_irq, 8.956 - set_ioapic_affinity, 8.957 -}; 8.958 +static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) 8.959 +{ 8.960 + if (use_pci_vector() && !platform_legacy_irq(irq)) { 8.961 + if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 8.962 + trigger == IOAPIC_LEVEL) 8.963 + irq_desc[vector].handler = &ioapic_level_type; 8.964 + else 8.965 + irq_desc[vector].handler = &ioapic_edge_type; 8.966 + set_intr_gate(vector, interrupt[vector]); 8.967 + } else { 8.968 + if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 8.969 + trigger == IOAPIC_LEVEL) 8.970 + irq_desc[irq].handler = &ioapic_level_type; 8.971 + else 8.972 + irq_desc[irq].handler = &ioapic_edge_type; 8.973 + set_intr_gate(vector, interrupt[irq]); 8.974 + } 8.975 +} 8.976 8.977 void __init setup_IO_APIC_irqs(void) 8.978 { 8.979 struct IO_APIC_route_entry entry; 8.980 - int apic, pin, idx, irq, vector; 8.981 + int apic, pin, idx, irq, first_notcon = 1, vector; 8.982 unsigned long flags; 8.983 8.984 - printk(KERN_DEBUG "init IO_APIC IRQs\n"); 8.985 + apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 8.986 8.987 for (apic = 0; apic < nr_ioapics; apic++) { 8.988 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 8.989 @@ -667,13 +1124,24 @@ void __init setup_IO_APIC_irqs(void) 8.990 memset(&entry,0,sizeof(entry)); 8.991 8.992 entry.delivery_mode = INT_DELIVERY_MODE; 8.993 - entry.dest_mode = (INT_DEST_ADDR_MODE != 0); 8.994 + entry.dest_mode = INT_DEST_MODE; 8.995 entry.mask = 0; /* enable IRQ */ 8.996 - entry.dest.logical.logical_dest = target_cpus(); 8.997 + entry.dest.logical.logical_dest = 8.998 + cpu_mask_to_apicid(TARGET_CPUS); 8.999 8.1000 idx = find_irq_entry(apic,pin,mp_INT); 8.1001 - if (idx == -1) 8.1002 + if (idx == -1) { 8.1003 + if (first_notcon) { 8.1004 + apic_printk(APIC_VERBOSE, KERN_DEBUG 8.1005 + " IO-APIC (apicid-pin) %d-%d", 8.1006 + mp_ioapics[apic].mpc_apicid, 8.1007 + pin); 8.1008 + first_notcon = 0; 8.1009 + } else 8.1010 + apic_printk(APIC_VERBOSE, ", %d-%d", 8.1011 + mp_ioapics[apic].mpc_apicid, pin); 8.1012 continue; 8.1013 + } 8.1014 8.1015 entry.trigger = irq_trigger(idx); 8.1016 entry.polarity = irq_polarity(idx); 8.1017 @@ -688,8 +1156,7 @@ void __init setup_IO_APIC_irqs(void) 8.1018 * skip adding the timer int on secondary nodes, which causes 8.1019 * a small but painful rift in the time-space continuum 8.1020 */ 8.1021 - if ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ) 8.1022 - && (apic != 0) && (irq == 0)) 8.1023 + if (multi_timer_check(apic, irq)) 8.1024 continue; 8.1025 else 8.1026 add_pin_to_irq(irq, apic, pin); 8.1027 @@ -700,13 +1167,7 @@ void __init setup_IO_APIC_irqs(void) 8.1028 if (IO_APIC_IRQ(irq)) { 8.1029 vector = assign_irq_vector(irq); 8.1030 entry.vector = vector; 8.1031 - 8.1032 - if (IO_APIC_irq_trigger(irq)) 8.1033 - irq_desc[irq].handler = &ioapic_level_irq_type; 8.1034 - else 8.1035 - irq_desc[irq].handler = &ioapic_edge_irq_type; 8.1036 - 8.1037 - set_intr_gate(vector, interrupt[irq]); 8.1038 + ioapic_register_intr(irq, vector, IOAPIC_AUTO); 8.1039 8.1040 if (!apic && (irq < 16)) 8.1041 disable_8259A_irq(irq); 8.1042 @@ -717,11 +1178,13 @@ void __init setup_IO_APIC_irqs(void) 8.1043 spin_unlock_irqrestore(&ioapic_lock, flags); 8.1044 } 8.1045 } 8.1046 + 8.1047 + if (!first_notcon) 8.1048 + apic_printk(APIC_VERBOSE, " not connected.\n"); 8.1049 } 8.1050 8.1051 /* 8.1052 - * Set up the 8259A-master output pin as broadcast to all 8.1053 - * CPUs. 8.1054 + * Set up the 8259A-master output pin: 8.1055 */ 8.1056 void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector) 8.1057 { 8.1058 @@ -739,9 +1202,9 @@ void __init setup_ExtINT_IRQ0_pin(unsign 8.1059 * We use logical delivery to get the timer IRQ 8.1060 * to the first CPU. 8.1061 */ 8.1062 - entry.dest_mode = (INT_DEST_ADDR_MODE != 0); 8.1063 + entry.dest_mode = INT_DEST_MODE; 8.1064 entry.mask = 0; /* unmask IRQ now */ 8.1065 - entry.dest.logical.logical_dest = target_cpus(); 8.1066 + entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 8.1067 entry.delivery_mode = INT_DELIVERY_MODE; 8.1068 entry.polarity = 0; 8.1069 entry.trigger = 0; 8.1070 @@ -751,7 +1214,7 @@ void __init setup_ExtINT_IRQ0_pin(unsign 8.1071 * The timer IRQ doesn't have to know that behind the 8.1072 * scene we have a 8259A-master in AEOI mode ... 8.1073 */ 8.1074 - irq_desc[0].handler = &ioapic_edge_irq_type; 8.1075 + irq_desc[0].handler = &ioapic_edge_type; 8.1076 8.1077 /* 8.1078 * Add it to the IO-APIC irq-routing table: 8.1079 @@ -764,23 +1227,23 @@ void __init setup_ExtINT_IRQ0_pin(unsign 8.1080 enable_8259A_irq(0); 8.1081 } 8.1082 8.1083 -void __init UNEXPECTED_IO_APIC(void) 8.1084 +static inline void UNEXPECTED_IO_APIC(void) 8.1085 { 8.1086 - printk(KERN_WARNING 8.1087 - "An unexpected IO-APIC was found. If this kernel release is less than\n" 8.1088 - "three months old please report this to linux-smp@vger.kernel.org\n"); 8.1089 } 8.1090 8.1091 void __init print_IO_APIC(void) 8.1092 { 8.1093 -#ifdef VERBOSE 8.1094 +#ifndef NDEBUG 8.1095 int apic, i; 8.1096 - struct IO_APIC_reg_00 reg_00; 8.1097 - struct IO_APIC_reg_01 reg_01; 8.1098 - struct IO_APIC_reg_02 reg_02; 8.1099 - struct IO_APIC_reg_03 reg_03; 8.1100 + union IO_APIC_reg_00 reg_00; 8.1101 + union IO_APIC_reg_01 reg_01; 8.1102 + union IO_APIC_reg_02 reg_02; 8.1103 + union IO_APIC_reg_03 reg_03; 8.1104 unsigned long flags; 8.1105 8.1106 + if (apic_verbosity == APIC_QUIET) 8.1107 + return; 8.1108 + 8.1109 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 8.1110 for (i = 0; i < nr_ioapics; i++) 8.1111 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 8.1112 @@ -795,47 +1258,46 @@ void __init print_IO_APIC(void) 8.1113 for (apic = 0; apic < nr_ioapics; apic++) { 8.1114 8.1115 spin_lock_irqsave(&ioapic_lock, flags); 8.1116 - *(int *)®_00 = io_apic_read(apic, 0); 8.1117 - *(int *)®_01 = io_apic_read(apic, 1); 8.1118 - if (reg_01.version >= 0x10) 8.1119 - *(int *)®_02 = io_apic_read(apic, 2); 8.1120 - if (reg_01.version >= 0x20) 8.1121 - *(int *)®_03 = io_apic_read(apic, 3); 8.1122 + reg_00.raw = io_apic_read(apic, 0); 8.1123 + reg_01.raw = io_apic_read(apic, 1); 8.1124 + if (reg_01.bits.version >= 0x10) 8.1125 + reg_02.raw = io_apic_read(apic, 2); 8.1126 + if (reg_01.bits.version >= 0x20) 8.1127 + reg_03.raw = io_apic_read(apic, 3); 8.1128 spin_unlock_irqrestore(&ioapic_lock, flags); 8.1129 8.1130 - printk("\n"); 8.1131 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid); 8.1132 - printk(KERN_DEBUG ".... register #00: %08X\n", *(int *)®_00); 8.1133 - printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.ID); 8.1134 - printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.delivery_type); 8.1135 - printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.LTS); 8.1136 - if (reg_00.__reserved_0 || reg_00.__reserved_1 || reg_00.__reserved_2) 8.1137 + printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 8.1138 + printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 8.1139 + printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 8.1140 + printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 8.1141 + if (reg_00.bits.ID >= get_physical_broadcast()) 8.1142 + UNEXPECTED_IO_APIC(); 8.1143 + if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2) 8.1144 UNEXPECTED_IO_APIC(); 8.1145 8.1146 - printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); 8.1147 - printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.entries); 8.1148 - if ( (reg_01.entries != 0x0f) && /* older (Neptune) boards */ 8.1149 - (reg_01.entries != 0x17) && /* typical ISA+PCI boards */ 8.1150 - (reg_01.entries != 0x1b) && /* Compaq Proliant boards */ 8.1151 - (reg_01.entries != 0x1f) && /* dual Xeon boards */ 8.1152 - (reg_01.entries != 0x22) && /* bigger Xeon boards */ 8.1153 - (reg_01.entries != 0x2E) && 8.1154 - (reg_01.entries != 0x3F) 8.1155 + printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw); 8.1156 + printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); 8.1157 + if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */ 8.1158 + (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */ 8.1159 + (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */ 8.1160 + (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */ 8.1161 + (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */ 8.1162 + (reg_01.bits.entries != 0x2E) && 8.1163 + (reg_01.bits.entries != 0x3F) 8.1164 ) 8.1165 UNEXPECTED_IO_APIC(); 8.1166 8.1167 - printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.PRQ); 8.1168 - printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.version); 8.1169 - if ( (reg_01.version != 0x01) && /* 82489DX IO-APICs */ 8.1170 - (reg_01.version != 0x02) && /* VIA */ 8.1171 - (reg_01.version != 0x03) && /* later VIA */ 8.1172 - (reg_01.version != 0x10) && /* oldest IO-APICs */ 8.1173 - (reg_01.version != 0x11) && /* Pentium/Pro IO-APICs */ 8.1174 - (reg_01.version != 0x13) && /* Xeon IO-APICs */ 8.1175 - (reg_01.version != 0x20) /* Intel P64H (82806 AA) */ 8.1176 + printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 8.1177 + printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); 8.1178 + if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */ 8.1179 + (reg_01.bits.version != 0x10) && /* oldest IO-APICs */ 8.1180 + (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */ 8.1181 + (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */ 8.1182 + (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */ 8.1183 ) 8.1184 UNEXPECTED_IO_APIC(); 8.1185 - if (reg_01.__reserved_1 || reg_01.__reserved_2) 8.1186 + if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2) 8.1187 UNEXPECTED_IO_APIC(); 8.1188 8.1189 /* 8.1190 @@ -843,10 +1305,10 @@ void __init print_IO_APIC(void) 8.1191 * but the value of reg_02 is read as the previous read register 8.1192 * value, so ignore it if reg_02 == reg_01. 8.1193 */ 8.1194 - if (reg_01.version >= 0x10 && *(int *)®_02 != *(int *)®_01) { 8.1195 - printk(KERN_DEBUG ".... register #02: %08X\n", *(int *)®_02); 8.1196 - printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.arbitration); 8.1197 - if (reg_02.__reserved_1 || reg_02.__reserved_2) 8.1198 + if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { 8.1199 + printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 8.1200 + printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 8.1201 + if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2) 8.1202 UNEXPECTED_IO_APIC(); 8.1203 } 8.1204 8.1205 @@ -855,11 +1317,11 @@ void __init print_IO_APIC(void) 8.1206 * or reg_03, but the value of reg_0[23] is read as the previous read 8.1207 * register value, so ignore it if reg_03 == reg_0[12]. 8.1208 */ 8.1209 - if (reg_01.version >= 0x20 && *(int *)®_03 != *(int *)®_02 && 8.1210 - *(int *)®_03 != *(int *)®_01) { 8.1211 - printk(KERN_DEBUG ".... register #03: %08X\n", *(int *)®_03); 8.1212 - printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.boot_DT); 8.1213 - if (reg_03.__reserved_1) 8.1214 + if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && 8.1215 + reg_03.raw != reg_01.raw) { 8.1216 + printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); 8.1217 + printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); 8.1218 + if (reg_03.bits.__reserved_1) 8.1219 UNEXPECTED_IO_APIC(); 8.1220 } 8.1221 8.1222 @@ -868,7 +1330,7 @@ void __init print_IO_APIC(void) 8.1223 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol" 8.1224 " Stat Dest Deli Vect: \n"); 8.1225 8.1226 - for (i = 0; i <= reg_01.entries; i++) { 8.1227 + for (i = 0; i <= reg_01.bits.entries; i++) { 8.1228 struct IO_APIC_route_entry entry; 8.1229 8.1230 spin_lock_irqsave(&ioapic_lock, flags); 8.1231 @@ -894,12 +1356,17 @@ void __init print_IO_APIC(void) 8.1232 ); 8.1233 } 8.1234 } 8.1235 + if (use_pci_vector()) 8.1236 + printk(KERN_INFO "Using vector-based indexing\n"); 8.1237 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 8.1238 for (i = 0; i < NR_IRQS; i++) { 8.1239 struct irq_pin_list *entry = irq_2_pin + i; 8.1240 if (entry->pin < 0) 8.1241 continue; 8.1242 - printk(KERN_DEBUG "IRQ%d ", i); 8.1243 + if (use_pci_vector() && !platform_legacy_irq(i)) 8.1244 + printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i)); 8.1245 + else 8.1246 + printk(KERN_DEBUG "IRQ%d ", i); 8.1247 for (;;) { 8.1248 printk("-> %d:%d", entry->apic, entry->pin); 8.1249 if (!entry->next) 8.1250 @@ -910,152 +1377,13 @@ void __init print_IO_APIC(void) 8.1251 } 8.1252 8.1253 printk(KERN_INFO ".................................... done.\n"); 8.1254 -#endif 8.1255 -} 8.1256 - 8.1257 - 8.1258 -#if 0 /* Maybe useful for debugging, but not currently used anywhere. */ 8.1259 - 8.1260 -static void print_APIC_bitfield (int base) 8.1261 -{ 8.1262 - unsigned int v; 8.1263 - int i, j; 8.1264 - 8.1265 - printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG); 8.1266 - for (i = 0; i < 8; i++) { 8.1267 - v = apic_read(base + i*0x10); 8.1268 - for (j = 0; j < 32; j++) { 8.1269 - if (v & (1<<j)) 8.1270 - printk("1"); 8.1271 - else 8.1272 - printk("0"); 8.1273 - } 8.1274 - printk("\n"); 8.1275 - } 8.1276 +#endif /* !NDEBUG */ 8.1277 + return; 8.1278 } 8.1279 8.1280 - 8.1281 -void /*__init*/ print_local_APIC(void * dummy) 8.1282 -{ 8.1283 - unsigned int v, ver, maxlvt; 8.1284 - 8.1285 - printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 8.1286 - smp_processor_id(), hard_smp_processor_id()); 8.1287 - v = apic_read(APIC_ID); 8.1288 - printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v)); 8.1289 - v = apic_read(APIC_LVR); 8.1290 - printk(KERN_INFO "... APIC VERSION: %08x\n", v); 8.1291 - ver = GET_APIC_VERSION(v); 8.1292 - maxlvt = get_maxlvt(); 8.1293 - 8.1294 - v = apic_read(APIC_TASKPRI); 8.1295 - printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 8.1296 - 8.1297 - if (APIC_INTEGRATED(ver)) { /* !82489DX */ 8.1298 - v = apic_read(APIC_ARBPRI); 8.1299 - printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 8.1300 - v & APIC_ARBPRI_MASK); 8.1301 - v = apic_read(APIC_PROCPRI); 8.1302 - printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 8.1303 - } 8.1304 - 8.1305 - v = apic_read(APIC_EOI); 8.1306 - printk(KERN_DEBUG "... APIC EOI: %08x\n", v); 8.1307 - v = apic_read(APIC_RRR); 8.1308 - printk(KERN_DEBUG "... APIC RRR: %08x\n", v); 8.1309 - v = apic_read(APIC_LDR); 8.1310 - printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 8.1311 - v = apic_read(APIC_DFR); 8.1312 - printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 8.1313 - v = apic_read(APIC_SPIV); 8.1314 - printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 8.1315 - 8.1316 - printk(KERN_DEBUG "... APIC ISR field:\n"); 8.1317 - print_APIC_bitfield(APIC_ISR); 8.1318 - printk(KERN_DEBUG "... APIC TMR field:\n"); 8.1319 - print_APIC_bitfield(APIC_TMR); 8.1320 - printk(KERN_DEBUG "... APIC IRR field:\n"); 8.1321 - print_APIC_bitfield(APIC_IRR); 8.1322 - 8.1323 - if (APIC_INTEGRATED(ver)) { /* !82489DX */ 8.1324 - if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 8.1325 - apic_write(APIC_ESR, 0); 8.1326 - v = apic_read(APIC_ESR); 8.1327 - printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 8.1328 - } 8.1329 - 8.1330 - v = apic_read(APIC_ICR); 8.1331 - printk(KERN_DEBUG "... APIC ICR: %08x\n", v); 8.1332 - v = apic_read(APIC_ICR2); 8.1333 - printk(KERN_DEBUG "... APIC ICR2: %08x\n", v); 8.1334 - 8.1335 - v = apic_read(APIC_LVTT); 8.1336 - printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 8.1337 - 8.1338 - if (maxlvt > 3) { /* PC is LVT#4. */ 8.1339 - v = apic_read(APIC_LVTPC); 8.1340 - printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); 8.1341 - } 8.1342 - v = apic_read(APIC_LVT0); 8.1343 - printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); 8.1344 - v = apic_read(APIC_LVT1); 8.1345 - printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); 8.1346 - 8.1347 - if (maxlvt > 2) { /* ERR is LVT#3. */ 8.1348 - v = apic_read(APIC_LVTERR); 8.1349 - printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); 8.1350 - } 8.1351 - 8.1352 - v = apic_read(APIC_TMICT); 8.1353 - printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); 8.1354 - v = apic_read(APIC_TMCCT); 8.1355 - printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); 8.1356 - v = apic_read(APIC_TDCR); 8.1357 - printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); 8.1358 - printk("\n"); 8.1359 -} 8.1360 - 8.1361 -void print_all_local_APICs (void) 8.1362 -{ 8.1363 - smp_call_function(print_local_APIC, NULL, 1, 1); 8.1364 - print_local_APIC(NULL); 8.1365 -} 8.1366 - 8.1367 -void /*__init*/ print_PIC(void) 8.1368 -{ 8.1369 - extern spinlock_t i8259A_lock; 8.1370 - unsigned int v, flags; 8.1371 - 8.1372 - printk(KERN_DEBUG "\nprinting PIC contents\n"); 8.1373 - 8.1374 - spin_lock_irqsave(&i8259A_lock, flags); 8.1375 - 8.1376 - v = inb(0xa1) << 8 | inb(0x21); 8.1377 - printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 8.1378 - 8.1379 - v = inb(0xa0) << 8 | inb(0x20); 8.1380 - printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 8.1381 - 8.1382 - outb(0x0b,0xa0); 8.1383 - outb(0x0b,0x20); 8.1384 - v = inb(0xa0) << 8 | inb(0x20); 8.1385 - outb(0x0a,0xa0); 8.1386 - outb(0x0a,0x20); 8.1387 - 8.1388 - spin_unlock_irqrestore(&i8259A_lock, flags); 8.1389 - 8.1390 - printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 8.1391 - 8.1392 - v = inb(0x4d1) << 8 | inb(0x4d0); 8.1393 - printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 8.1394 -} 8.1395 - 8.1396 -#endif /* 0 */ 8.1397 - 8.1398 - 8.1399 static void __init enable_IO_APIC(void) 8.1400 { 8.1401 - struct IO_APIC_reg_01 reg_01; 8.1402 + union IO_APIC_reg_01 reg_01; 8.1403 int i; 8.1404 unsigned long flags; 8.1405 8.1406 @@ -1063,18 +1391,15 @@ static void __init enable_IO_APIC(void) 8.1407 irq_2_pin[i].pin = -1; 8.1408 irq_2_pin[i].next = 0; 8.1409 } 8.1410 - if (!pirqs_enabled) 8.1411 - for (i = 0; i < MAX_PIRQS; i++) 8.1412 - pirq_entries[i] = -1; 8.1413 8.1414 /* 8.1415 * The number of IO-APIC IRQ registers (== #pins): 8.1416 */ 8.1417 for (i = 0; i < nr_ioapics; i++) { 8.1418 spin_lock_irqsave(&ioapic_lock, flags); 8.1419 - *(int *)®_01 = io_apic_read(i, 1); 8.1420 + reg_01.raw = io_apic_read(i, 1); 8.1421 spin_unlock_irqrestore(&ioapic_lock, flags); 8.1422 - nr_ioapic_registers[i] = reg_01.entries+1; 8.1423 + nr_ioapic_registers[i] = reg_01.bits.entries+1; 8.1424 } 8.1425 8.1426 /* 8.1427 @@ -1103,18 +1428,22 @@ void disable_IO_APIC(void) 8.1428 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 8.1429 */ 8.1430 8.1431 -static void __init setup_ioapic_ids_from_mpc (void) 8.1432 +#ifndef CONFIG_X86_NUMAQ 8.1433 +static void __init setup_ioapic_ids_from_mpc(void) 8.1434 { 8.1435 - struct IO_APIC_reg_00 reg_00; 8.1436 - unsigned long phys_id_present_map = phys_cpu_present_map; 8.1437 + union IO_APIC_reg_00 reg_00; 8.1438 + physid_mask_t phys_id_present_map; 8.1439 int apic; 8.1440 int i; 8.1441 unsigned char old_id; 8.1442 unsigned long flags; 8.1443 8.1444 - if (clustered_apic_mode) 8.1445 - /* We don't have a good way to do this yet - hack */ 8.1446 - phys_id_present_map = (u_long) 0xf; 8.1447 + /* 8.1448 + * This is broken; anything with a real cpu count has to 8.1449 + * circumvent this idiocy regardless. 8.1450 + */ 8.1451 + phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map); 8.1452 + 8.1453 /* 8.1454 * Set the IOAPIC ID to the value stored in the MPC table. 8.1455 */ 8.1456 @@ -1122,41 +1451,48 @@ static void __init setup_ioapic_ids_from 8.1457 8.1458 /* Read the register 0 value */ 8.1459 spin_lock_irqsave(&ioapic_lock, flags); 8.1460 - *(int *)®_00 = io_apic_read(apic, 0); 8.1461 + reg_00.raw = io_apic_read(apic, 0); 8.1462 spin_unlock_irqrestore(&ioapic_lock, flags); 8.1463 8.1464 old_id = mp_ioapics[apic].mpc_apicid; 8.1465 8.1466 - if (mp_ioapics[apic].mpc_apicid >= apic_broadcast_id) { 8.1467 + if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) { 8.1468 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 8.1469 apic, mp_ioapics[apic].mpc_apicid); 8.1470 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 8.1471 - reg_00.ID); 8.1472 - mp_ioapics[apic].mpc_apicid = reg_00.ID; 8.1473 + reg_00.bits.ID); 8.1474 + mp_ioapics[apic].mpc_apicid = reg_00.bits.ID; 8.1475 } 8.1476 8.1477 + /* Don't check I/O APIC IDs for some xAPIC systems. They have 8.1478 + * no meaning without the serial APIC bus. */ 8.1479 + if (NO_IOAPIC_CHECK) 8.1480 + continue; 8.1481 /* 8.1482 * Sanity check, is the ID really free? Every APIC in a 8.1483 * system must have a unique ID or we get lots of nice 8.1484 * 'stuck on smp_invalidate_needed IPI wait' messages. 8.1485 - * I/O APIC IDs no longer have any meaning for xAPICs and SAPICs. 8.1486 */ 8.1487 - if ((clustered_apic_mode != CLUSTERED_APIC_XAPIC) && 8.1488 - (phys_id_present_map & (1 << mp_ioapics[apic].mpc_apicid))) { 8.1489 + if (check_apicid_used(phys_id_present_map, 8.1490 + mp_ioapics[apic].mpc_apicid)) { 8.1491 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 8.1492 apic, mp_ioapics[apic].mpc_apicid); 8.1493 - for (i = 0; i < 0xf; i++) 8.1494 - if (!(phys_id_present_map & (1 << i))) 8.1495 + for (i = 0; i < get_physical_broadcast(); i++) 8.1496 + if (!physid_isset(i, phys_id_present_map)) 8.1497 break; 8.1498 - if (i >= apic_broadcast_id) 8.1499 + if (i >= get_physical_broadcast()) 8.1500 panic("Max APIC ID exceeded!\n"); 8.1501 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 8.1502 i); 8.1503 - phys_id_present_map |= 1 << i; 8.1504 + physid_set(i, phys_id_present_map); 8.1505 mp_ioapics[apic].mpc_apicid = i; 8.1506 } else { 8.1507 - printk("Setting %d in the phys_id_present_map\n", mp_ioapics[apic].mpc_apicid); 8.1508 - phys_id_present_map |= 1 << mp_ioapics[apic].mpc_apicid; 8.1509 + physid_mask_t tmp; 8.1510 + tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid); 8.1511 + apic_printk(APIC_VERBOSE, "Setting %d in the " 8.1512 + "phys_id_present_map\n", 8.1513 + mp_ioapics[apic].mpc_apicid); 8.1514 + physids_or(phys_id_present_map, phys_id_present_map, tmp); 8.1515 } 8.1516 8.1517 8.1518 @@ -1174,26 +1510,30 @@ static void __init setup_ioapic_ids_from 8.1519 * Read the right value from the MPC table and 8.1520 * write it into the ID register. 8.1521 */ 8.1522 - printk(KERN_INFO "...changing IO-APIC physical APIC ID to %d ...", 8.1523 - mp_ioapics[apic].mpc_apicid); 8.1524 + apic_printk(APIC_VERBOSE, KERN_INFO 8.1525 + "...changing IO-APIC physical APIC ID to %d ...", 8.1526 + mp_ioapics[apic].mpc_apicid); 8.1527 8.1528 - reg_00.ID = mp_ioapics[apic].mpc_apicid; 8.1529 + reg_00.bits.ID = mp_ioapics[apic].mpc_apicid; 8.1530 spin_lock_irqsave(&ioapic_lock, flags); 8.1531 - io_apic_write(apic, 0, *(int *)®_00); 8.1532 + io_apic_write(apic, 0, reg_00.raw); 8.1533 spin_unlock_irqrestore(&ioapic_lock, flags); 8.1534 8.1535 /* 8.1536 * Sanity check 8.1537 */ 8.1538 spin_lock_irqsave(&ioapic_lock, flags); 8.1539 - *(int *)®_00 = io_apic_read(apic, 0); 8.1540 + reg_00.raw = io_apic_read(apic, 0); 8.1541 spin_unlock_irqrestore(&ioapic_lock, flags); 8.1542 - if (reg_00.ID != mp_ioapics[apic].mpc_apicid) 8.1543 - panic("could not set ID!\n"); 8.1544 + if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid) 8.1545 + printk("could not set ID!\n"); 8.1546 else 8.1547 - printk(" ok.\n"); 8.1548 + apic_printk(APIC_VERBOSE, " ok.\n"); 8.1549 } 8.1550 } 8.1551 +#else 8.1552 +static void __init setup_ioapic_ids_from_mpc(void) { } 8.1553 +#endif 8.1554 8.1555 /* 8.1556 * There is a nasty bug in some older SMP boards, their mptable lies 8.1557 @@ -1205,9 +1545,9 @@ static void __init setup_ioapic_ids_from 8.1558 */ 8.1559 static int __init timer_irq_works(void) 8.1560 { 8.1561 - unsigned int t1 = jiffies; 8.1562 + unsigned long t1 = jiffies; 8.1563 8.1564 - __sti(); 8.1565 + local_irq_enable(); 8.1566 /* Let ten ticks pass... */ 8.1567 mdelay((10 * 1000) / HZ); 8.1568 8.1569 @@ -1224,7 +1564,18 @@ static int __init timer_irq_works(void) 8.1570 return 0; 8.1571 } 8.1572 8.1573 -static void disable_edge_ioapic_irq (unsigned int irq) { /* nothing */ } 8.1574 +/* 8.1575 + * In the SMP+IOAPIC case it might happen that there are an unspecified 8.1576 + * number of pending IRQ events unhandled. These cases are very rare, 8.1577 + * so we 'resend' these IRQs via IPIs, to the same CPU. It's much 8.1578 + * better to do it this way as thus we do not have to be aware of 8.1579 + * 'pending' interrupts in the IRQ path, except at this point. 8.1580 + */ 8.1581 +/* 8.1582 + * Edge triggered needs to resend any interrupt 8.1583 + * that was delayed but this is now handled in the device 8.1584 + * independent code. 8.1585 + */ 8.1586 8.1587 /* 8.1588 * Starting up a edge-triggered IO-APIC interrupt is 8.1589 @@ -1235,7 +1586,6 @@ static void disable_edge_ioapic_irq (uns 8.1590 * This is not complete - we should be able to fake 8.1591 * an edge even if it isn't on the 8259A... 8.1592 */ 8.1593 - 8.1594 static unsigned int startup_edge_ioapic_irq(unsigned int irq) 8.1595 { 8.1596 int was_pending = 0; 8.1597 @@ -1260,16 +1610,13 @@ static unsigned int startup_edge_ioapic_ 8.1598 */ 8.1599 static void ack_edge_ioapic_irq(unsigned int irq) 8.1600 { 8.1601 - balance_irq(irq); 8.1602 + move_irq(irq); 8.1603 if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED)) 8.1604 == (IRQ_PENDING | IRQ_DISABLED)) 8.1605 mask_IO_APIC_irq(irq); 8.1606 ack_APIC_irq(); 8.1607 } 8.1608 8.1609 -static void end_edge_ioapic_irq (unsigned int i) { /* nothing */ } 8.1610 - 8.1611 - 8.1612 /* 8.1613 * Level triggered interrupts can just be masked, 8.1614 * and shutting down and starting up the interrupt 8.1615 @@ -1291,15 +1638,12 @@ static unsigned int startup_level_ioapic 8.1616 return 0; /* don't check for pending */ 8.1617 } 8.1618 8.1619 -static void mask_and_ack_level_ioapic_irq(unsigned int irq) 8.1620 +static void end_level_ioapic_irq (unsigned int irq) 8.1621 { 8.1622 unsigned long v; 8.1623 int i; 8.1624 8.1625 - balance_irq(irq); 8.1626 - 8.1627 - mask_IO_APIC_irq(irq); 8.1628 - 8.1629 + move_irq(irq); 8.1630 /* 8.1631 * It appears there is an erratum which affects at least version 0x11 8.1632 * of I/O APIC (that's the 82093AA and cores integrated into various 8.1633 @@ -1320,45 +1664,102 @@ static void mask_and_ack_level_ioapic_ir 8.1634 * The idea is from Manfred Spraul. --macro 8.1635 */ 8.1636 i = IO_APIC_VECTOR(irq); 8.1637 + 8.1638 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 8.1639 8.1640 ack_APIC_irq(); 8.1641 8.1642 if (!(v & (1 << (i & 0x1f)))) { 8.1643 -#ifdef APIC_LOCKUP_DEBUG 8.1644 - struct irq_pin_list *entry; 8.1645 -#endif 8.1646 - 8.1647 -#ifdef APIC_MISMATCH_DEBUG 8.1648 atomic_inc(&irq_mis_count); 8.1649 -#endif 8.1650 spin_lock(&ioapic_lock); 8.1651 - __edge_IO_APIC_irq(irq); 8.1652 -#ifdef APIC_LOCKUP_DEBUG 8.1653 - for (entry = irq_2_pin + irq;;) { 8.1654 - unsigned int reg; 8.1655 - 8.1656 - if (entry->pin == -1) 8.1657 - break; 8.1658 - reg = io_apic_read(entry->apic, 0x10 + entry->pin * 2); 8.1659 - if (reg & 0x00004000) 8.1660 - printk(KERN_CRIT "Aieee!!! Remote IRR" 8.1661 - " still set after unlock!\n"); 8.1662 - if (!entry->next) 8.1663 - break; 8.1664 - entry = irq_2_pin + entry->next; 8.1665 - } 8.1666 -#endif 8.1667 - __level_IO_APIC_irq(irq); 8.1668 + __mask_and_edge_IO_APIC_irq(irq); 8.1669 + __unmask_and_level_IO_APIC_irq(irq); 8.1670 spin_unlock(&ioapic_lock); 8.1671 } 8.1672 } 8.1673 8.1674 -static void end_level_ioapic_irq(unsigned int irq) 8.1675 +#ifdef CONFIG_PCI_MSI 8.1676 +static unsigned int startup_edge_ioapic_vector(unsigned int vector) 8.1677 +{ 8.1678 + int irq = vector_to_irq(vector); 8.1679 + 8.1680 + return startup_edge_ioapic_irq(irq); 8.1681 +} 8.1682 + 8.1683 +static void ack_edge_ioapic_vector(unsigned int vector) 8.1684 +{ 8.1685 + int irq = vector_to_irq(vector); 8.1686 + 8.1687 + ack_edge_ioapic_irq(irq); 8.1688 +} 8.1689 + 8.1690 +static unsigned int startup_level_ioapic_vector (unsigned int vector) 8.1691 { 8.1692 + int irq = vector_to_irq(vector); 8.1693 + 8.1694 + return startup_level_ioapic_irq (irq); 8.1695 +} 8.1696 + 8.1697 +static void end_level_ioapic_vector (unsigned int vector) 8.1698 +{ 8.1699 + int irq = vector_to_irq(vector); 8.1700 + 8.1701 + end_level_ioapic_irq(irq); 8.1702 +} 8.1703 + 8.1704 +static void mask_IO_APIC_vector (unsigned int vector) 8.1705 +{ 8.1706 + int irq = vector_to_irq(vector); 8.1707 + 8.1708 + mask_IO_APIC_irq(irq); 8.1709 +} 8.1710 + 8.1711 +static void unmask_IO_APIC_vector (unsigned int vector) 8.1712 +{ 8.1713 + int irq = vector_to_irq(vector); 8.1714 + 8.1715 unmask_IO_APIC_irq(irq); 8.1716 } 8.1717 8.1718 +static void set_ioapic_affinity_vector (unsigned int vector, 8.1719 + cpumask_t cpu_mask) 8.1720 +{ 8.1721 + int irq = vector_to_irq(vector); 8.1722 + 8.1723 + set_ioapic_affinity_irq(irq, cpu_mask); 8.1724 +} 8.1725 +#endif 8.1726 + 8.1727 +/* 8.1728 + * Level and edge triggered IO-APIC interrupts need different handling, 8.1729 + * so we use two separate IRQ descriptors. Edge triggered IRQs can be 8.1730 + * handled with the level-triggered descriptor, but that one has slightly 8.1731 + * more overhead. Level-triggered interrupts cannot be handled with the 8.1732 + * edge-triggered handler, without risking IRQ storms and other ugly 8.1733 + * races. 8.1734 + */ 8.1735 +static struct hw_interrupt_type ioapic_edge_type = { 8.1736 + .typename = "IO-APIC-edge", 8.1737 + .startup = startup_edge_ioapic, 8.1738 + .shutdown = shutdown_edge_ioapic, 8.1739 + .enable = enable_edge_ioapic, 8.1740 + .disable = disable_edge_ioapic, 8.1741 + .ack = ack_edge_ioapic, 8.1742 + .end = end_edge_ioapic, 8.1743 + .set_affinity = set_ioapic_affinity, 8.1744 +}; 8.1745 + 8.1746 +static struct hw_interrupt_type ioapic_level_type = { 8.1747 + .typename = "IO-APIC-level", 8.1748 + .startup = startup_level_ioapic, 8.1749 + .shutdown = shutdown_level_ioapic, 8.1750 + .enable = enable_level_ioapic, 8.1751 + .disable = disable_level_ioapic, 8.1752 + .ack = mask_and_ack_level_ioapic, 8.1753 + .end = end_level_ioapic, 8.1754 + .set_affinity = set_ioapic_affinity, 8.1755 +}; 8.1756 + 8.1757 static inline void init_IO_APIC_traps(void) 8.1758 { 8.1759 int irq; 8.1760 @@ -1375,7 +1776,13 @@ static inline void init_IO_APIC_traps(vo 8.1761 * 0x80, because int 0x80 is hm, kind of importantish. ;) 8.1762 */ 8.1763 for (irq = 0; irq < NR_IRQS ; irq++) { 8.1764 - if (IO_APIC_IRQ(irq) && !IO_APIC_VECTOR(irq)) { 8.1765 + int tmp = irq; 8.1766 + if (use_pci_vector()) { 8.1767 + if (!platform_legacy_irq(tmp)) 8.1768 + if ((tmp = vector_to_irq(tmp)) == -1) 8.1769 + continue; 8.1770 + } 8.1771 + if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) { 8.1772 /* 8.1773 * Hmm.. We don't have an entry for this, 8.1774 * so default to an old-fashioned 8259 8.1775 @@ -1414,15 +1821,35 @@ static void ack_lapic_irq (unsigned int 8.1776 static void end_lapic_irq (unsigned int i) { /* nothing */ } 8.1777 8.1778 static struct hw_interrupt_type lapic_irq_type = { 8.1779 - "local-APIC-edge", 8.1780 - NULL, /* startup_irq() not used for IRQ0 */ 8.1781 - NULL, /* shutdown_irq() not used for IRQ0 */ 8.1782 - enable_lapic_irq, 8.1783 - disable_lapic_irq, 8.1784 - ack_lapic_irq, 8.1785 - end_lapic_irq 8.1786 + .typename = "local-APIC-edge", 8.1787 + .startup = NULL, /* startup_irq() not used for IRQ0 */ 8.1788 + .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */ 8.1789 + .enable = enable_lapic_irq, 8.1790 + .disable = disable_lapic_irq, 8.1791 + .ack = ack_lapic_irq, 8.1792 + .end = end_lapic_irq 8.1793 }; 8.1794 8.1795 +#if 0 8.1796 +static void setup_nmi (void) 8.1797 +{ 8.1798 + /* 8.1799 + * Dirty trick to enable the NMI watchdog ... 8.1800 + * We put the 8259A master into AEOI mode and 8.1801 + * unmask on all local APICs LVT0 as NMI. 8.1802 + * 8.1803 + * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') 8.1804 + * is from Maciej W. Rozycki - so we do not have to EOI from 8.1805 + * the NMI handler or the timer interrupt. 8.1806 + */ 8.1807 + apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); 8.1808 + 8.1809 + on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1); 8.1810 + 8.1811 + apic_printk(APIC_VERBOSE, " done.\n"); 8.1812 +} 8.1813 +#endif 8.1814 + 8.1815 /* 8.1816 * This looks a bit hackish but it's about the only one way of sending 8.1817 * a few INTA cycles to 8259As and any associated glue logic. ICR does 8.1818 @@ -1493,7 +1920,6 @@ static inline void unlock_ExtINT_logic(v 8.1819 */ 8.1820 static inline void check_timer(void) 8.1821 { 8.1822 - extern int timer_ack; 8.1823 int pin1, pin2; 8.1824 int vector; 8.1825 8.1826 @@ -1526,8 +1952,17 @@ static inline void check_timer(void) 8.1827 * Ok, does IRQ0 through the IOAPIC work? 8.1828 */ 8.1829 unmask_IO_APIC_irq(0); 8.1830 - if (timer_irq_works()) 8.1831 + if (timer_irq_works()) { 8.1832 +#if 0 8.1833 + if (nmi_watchdog == NMI_IO_APIC) { 8.1834 + disable_8259A_irq(0); 8.1835 + setup_nmi(); 8.1836 + enable_8259A_irq(0); 8.1837 + check_nmi_watchdog(); 8.1838 + } 8.1839 +#endif 8.1840 return; 8.1841 + } 8.1842 clear_IO_APIC_pin(0, pin1); 8.1843 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n"); 8.1844 } 8.1845 @@ -1545,6 +1980,12 @@ static inline void check_timer(void) 8.1846 replace_pin_at_irq(0, 0, pin1, 0, pin2); 8.1847 else 8.1848 add_pin_to_irq(0, 0, pin2); 8.1849 +#if 0 8.1850 + if (nmi_watchdog == NMI_IO_APIC) { 8.1851 + setup_nmi(); 8.1852 + check_nmi_watchdog(); 8.1853 + } 8.1854 +#endif 8.1855 return; 8.1856 } 8.1857 /* 8.1858 @@ -1554,6 +1995,11 @@ static inline void check_timer(void) 8.1859 } 8.1860 printk(" failed.\n"); 8.1861 8.1862 + if (nmi_watchdog == NMI_IO_APIC) { 8.1863 + printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); 8.1864 + nmi_watchdog = 0; 8.1865 + } 8.1866 + 8.1867 printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); 8.1868 8.1869 disable_8259A_irq(0); 8.1870 @@ -1570,6 +2016,7 @@ static inline void check_timer(void) 8.1871 8.1872 printk(KERN_INFO "...trying to set up timer as ExtINT IRQ..."); 8.1873 8.1874 + timer_ack = 0; 8.1875 init_8259A(0); 8.1876 make_8259A_irq(0); 8.1877 apic_write_around(APIC_LVT0, APIC_DM_EXTINT); 8.1878 @@ -1581,7 +2028,8 @@ static inline void check_timer(void) 8.1879 return; 8.1880 } 8.1881 printk(" failed :(.\n"); 8.1882 - panic("IO-APIC + timer doesn't work! pester mingo@redhat.com"); 8.1883 + panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 8.1884 + "report. Then try booting with the 'noapic' option"); 8.1885 } 8.1886 8.1887 #define NR_IOAPIC_BIOSIDS 256 8.1888 @@ -1596,20 +2044,12 @@ static void store_ioapic_biosid_mapping( 8.1889 8.1890 /* 8.1891 * 8.1892 - * IRQ's that are handled by the old PIC in all cases: 8.1893 + * IRQ's that are handled by the PIC in the MPS IOAPIC case. 8.1894 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. 8.1895 * Linux doesn't really care, as it's not actually used 8.1896 * for any interrupt handling anyway. 8.1897 - * - There used to be IRQ13 here as well, but all 8.1898 - * MPS-compliant must not use it for FPU coupling and we 8.1899 - * want to use exception 16 anyway. And there are 8.1900 - * systems who connect it to an I/O APIC for other uses. 8.1901 - * Thus we don't mark it special any longer. 8.1902 - * 8.1903 - * Additionally, something is definitely wrong with irq9 8.1904 - * on PIIX4 boards. 8.1905 */ 8.1906 -#define PIC_IRQS (1<<2) 8.1907 +#define PIC_IRQS (1 << PIC_CASCADE_IR) 8.1908 8.1909 void __init setup_IO_APIC(void) 8.1910 { 8.1911 @@ -1617,7 +2057,11 @@ void __init setup_IO_APIC(void) 8.1912 8.1913 enable_IO_APIC(); 8.1914 8.1915 - io_apic_irqs = ~PIC_IRQS; 8.1916 + if (acpi_ioapic) 8.1917 + io_apic_irqs = ~0; /* all IRQs go through IOAPIC */ 8.1918 + else 8.1919 + io_apic_irqs = ~PIC_IRQS; 8.1920 + 8.1921 printk("ENABLING IO-APIC IRQs\n"); 8.1922 8.1923 /* 8.1924 @@ -1632,22 +2076,17 @@ void __init setup_IO_APIC(void) 8.1925 print_IO_APIC(); 8.1926 } 8.1927 8.1928 -#endif /* CONFIG_X86_IO_APIC */ 8.1929 - 8.1930 - 8.1931 - 8.1932 /* -------------------------------------------------------------------------- 8.1933 ACPI-based IOAPIC Configuration 8.1934 -------------------------------------------------------------------------- */ 8.1935 8.1936 #ifdef CONFIG_ACPI_BOOT 8.1937 8.1938 -#define IO_APIC_MAX_ID 15 8.1939 - 8.1940 int __init io_apic_get_unique_id (int ioapic, int apic_id) 8.1941 { 8.1942 - struct IO_APIC_reg_00 reg_00; 8.1943 - static unsigned long apic_id_map = 0; 8.1944 + union IO_APIC_reg_00 reg_00; 8.1945 + static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 8.1946 + physid_mask_t tmp; 8.1947 unsigned long flags; 8.1948 int i = 0; 8.1949 8.1950 @@ -1660,38 +2099,31 @@ int __init io_apic_get_unique_id (int io 8.1951 * advantage of new APIC bus architecture. 8.1952 */ 8.1953 8.1954 - if (!apic_id_map) 8.1955 - apic_id_map = phys_cpu_present_map; 8.1956 + if (physids_empty(apic_id_map)) 8.1957 + apic_id_map = ioapic_phys_id_map(phys_cpu_present_map); 8.1958 8.1959 spin_lock_irqsave(&ioapic_lock, flags); 8.1960 - *(int *)®_00 = io_apic_read(ioapic, 0); 8.1961 + reg_00.raw = io_apic_read(ioapic, 0); 8.1962 spin_unlock_irqrestore(&ioapic_lock, flags); 8.1963 8.1964 - if (apic_id >= IO_APIC_MAX_ID) { 8.1965 + if (apic_id >= get_physical_broadcast()) { 8.1966 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 8.1967 - "%d\n", ioapic, apic_id, reg_00.ID); 8.1968 - apic_id = reg_00.ID; 8.1969 - } 8.1970 - 8.1971 - /* XAPICs do not need unique IDs */ 8.1972 - if (clustered_apic_mode == CLUSTERED_APIC_XAPIC){ 8.1973 - printk(KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", 8.1974 - ioapic, apic_id); 8.1975 - return apic_id; 8.1976 + "%d\n", ioapic, apic_id, reg_00.bits.ID); 8.1977 + apic_id = reg_00.bits.ID; 8.1978 } 8.1979 8.1980 /* 8.1981 * Every APIC in a system must have a unique ID or we get lots of nice 8.1982 * 'stuck on smp_invalidate_needed IPI wait' messages. 8.1983 */ 8.1984 - if (apic_id_map & (1 << apic_id)) { 8.1985 + if (check_apicid_used(apic_id_map, apic_id)) { 8.1986 8.1987 - for (i = 0; i < IO_APIC_MAX_ID; i++) { 8.1988 - if (!(apic_id_map & (1 << i))) 8.1989 + for (i = 0; i < get_physical_broadcast(); i++) { 8.1990 + if (!check_apicid_used(apic_id_map, i)) 8.1991 break; 8.1992 } 8.1993 8.1994 - if (i == IO_APIC_MAX_ID) 8.1995 + if (i == get_physical_broadcast()) 8.1996 panic("Max apic_id exceeded!\n"); 8.1997 8.1998 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " 8.1999 @@ -1700,22 +2132,24 @@ int __init io_apic_get_unique_id (int io 8.2000 apic_id = i; 8.2001 } 8.2002 8.2003 - apic_id_map |= (1 << apic_id); 8.2004 + tmp = apicid_to_cpu_present(apic_id); 8.2005 + physids_or(apic_id_map, apic_id_map, tmp); 8.2006 8.2007 - if (reg_00.ID != apic_id) { 8.2008 - reg_00.ID = apic_id; 8.2009 + if (reg_00.bits.ID != apic_id) { 8.2010 + reg_00.bits.ID = apic_id; 8.2011 8.2012 spin_lock_irqsave(&ioapic_lock, flags); 8.2013 - io_apic_write(ioapic, 0, *(int *)®_00); 8.2014 - *(int *)®_00 = io_apic_read(ioapic, 0); 8.2015 + io_apic_write(ioapic, 0, reg_00.raw); 8.2016 + reg_00.raw = io_apic_read(ioapic, 0); 8.2017 spin_unlock_irqrestore(&ioapic_lock, flags); 8.2018 8.2019 /* Sanity check */ 8.2020 - if (reg_00.ID != apic_id) 8.2021 + if (reg_00.bits.ID != apic_id) 8.2022 panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic); 8.2023 } 8.2024 8.2025 - printk(KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 8.2026 + apic_printk(APIC_VERBOSE, KERN_INFO 8.2027 + "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 8.2028 8.2029 return apic_id; 8.2030 } 8.2031 @@ -1723,27 +2157,27 @@ int __init io_apic_get_unique_id (int io 8.2032 8.2033 int __init io_apic_get_version (int ioapic) 8.2034 { 8.2035 - struct IO_APIC_reg_01 reg_01; 8.2036 + union IO_APIC_reg_01 reg_01; 8.2037 unsigned long flags; 8.2038 8.2039 spin_lock_irqsave(&ioapic_lock, flags); 8.2040 - *(int *)®_01 = io_apic_read(ioapic, 1); 8.2041 + reg_01.raw = io_apic_read(ioapic, 1); 8.2042 spin_unlock_irqrestore(&ioapic_lock, flags); 8.2043 8.2044 - return reg_01.version; 8.2045 + return reg_01.bits.version; 8.2046 } 8.2047 8.2048 8.2049 int __init io_apic_get_redir_entries (int ioapic) 8.2050 { 8.2051 - struct IO_APIC_reg_01 reg_01; 8.2052 + union IO_APIC_reg_01 reg_01; 8.2053 unsigned long flags; 8.2054 8.2055 spin_lock_irqsave(&ioapic_lock, flags); 8.2056 - *(int *)®_01 = io_apic_read(ioapic, 1); 8.2057 + reg_01.raw = io_apic_read(ioapic, 1); 8.2058 spin_unlock_irqrestore(&ioapic_lock, flags); 8.2059 8.2060 - return reg_01.entries; 8.2061 + return reg_01.bits.entries; 8.2062 } 8.2063 8.2064 8.2065 @@ -1753,7 +2187,7 @@ int io_apic_set_pci_routing (int ioapic, 8.2066 unsigned long flags; 8.2067 8.2068 if (!IO_APIC_IRQ(irq)) { 8.2069 - printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0/n", 8.2070 + printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 8.2071 ioapic); 8.2072 return -EINVAL; 8.2073 } 8.2074 @@ -1766,12 +2200,12 @@ int io_apic_set_pci_routing (int ioapic, 8.2075 8.2076 memset(&entry,0,sizeof(entry)); 8.2077 8.2078 - entry.delivery_mode = dest_LowestPrio; 8.2079 - entry.dest_mode = INT_DELIVERY_MODE; 8.2080 - entry.dest.logical.logical_dest = target_cpus(); 8.2081 - entry.mask = 1; /* Disabled (masked) */ 8.2082 + entry.delivery_mode = INT_DELIVERY_MODE; 8.2083 + entry.dest_mode = INT_DEST_MODE; 8.2084 + entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 8.2085 entry.trigger = edge_level; 8.2086 entry.polarity = active_high_low; 8.2087 + entry.mask = 1; 8.2088 8.2089 /* 8.2090 * IRQs < 16 are already in the irq_2_pin[] map 8.2091 @@ -1781,17 +2215,12 @@ int io_apic_set_pci_routing (int ioapic, 8.2092 8.2093 entry.vector = assign_irq_vector(irq); 8.2094 8.2095 - printk(KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> " 8.2096 - "IRQ %d Mode:%i Active:%i)\n", ioapic, 8.2097 - mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, edge_level, active_high_low); 8.2098 + apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry " 8.2099 + "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic, 8.2100 + mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, 8.2101 + edge_level, active_high_low); 8.2102 8.2103 - if (edge_level) { 8.2104 - irq_desc[irq].handler = &ioapic_level_irq_type; 8.2105 - } else { 8.2106 - irq_desc[irq].handler = &ioapic_edge_irq_type; 8.2107 - } 8.2108 - 8.2109 - set_intr_gate(entry.vector, interrupt[irq]); 8.2110 + ioapic_register_intr(irq, entry.vector, edge_level); 8.2111 8.2112 if (!ioapic && (irq < 16)) 8.2113 disable_8259A_irq(irq); 8.2114 @@ -1806,123 +2235,12 @@ int io_apic_set_pci_routing (int ioapic, 8.2115 8.2116 #endif /*CONFIG_ACPI_BOOT*/ 8.2117 8.2118 -/* opt_leveltrigger, opt_edgetrigger: Force an IO-APIC-routed IRQ to be */ 8.2119 -/* level- or edge-triggered. */ 8.2120 -/* Example: 'leveltrigger=4,5,6,20 edgetrigger=21'. */ 8.2121 -static char opt_leveltrigger[30] = "", opt_edgetrigger[30] = ""; 8.2122 -string_param("leveltrigger", opt_leveltrigger); 8.2123 -string_param("edgetrigger", opt_edgetrigger); 8.2124 - 8.2125 -static int __init ioapic_trigger_setup(void) 8.2126 -{ 8.2127 - char *p; 8.2128 - irq_desc_t *desc; 8.2129 - long irq; 8.2130 - 8.2131 - p = opt_leveltrigger; 8.2132 - while ( *p != '\0' ) 8.2133 - { 8.2134 - irq = simple_strtol(p, &p, 10); 8.2135 - if ( (irq <= 0) || (irq >= NR_IRQS) ) 8.2136 - { 8.2137 - printk("IRQ '%ld' out of range in level-trigger list '%s'\n", 8.2138 - irq, opt_leveltrigger); 8.2139 - break; 8.2140 - } 8.2141 - 8.2142 - printk("Forcing IRQ %ld to level-trigger: ", irq); 8.2143 - 8.2144 - desc = &irq_desc[irq]; 8.2145 - spin_lock_irq(&desc->lock); 8.2146 - 8.2147 - if ( desc->handler == &ioapic_level_irq_type ) 8.2148 - { 8.2149 - printk("already level-triggered (no force applied).\n"); 8.2150 - } 8.2151 - else if ( desc->handler != &ioapic_edge_irq_type ) 8.2152 - { 8.2153 - printk("cannot force (can only force IO-APIC-edge IRQs).\n"); 8.2154 - } 8.2155 - else 8.2156 - { 8.2157 - desc->handler = &ioapic_level_irq_type; 8.2158 - __mask_IO_APIC_irq(irq); 8.2159 - __level_IO_APIC_irq(irq); 8.2160 - printk("done.\n"); 8.2161 - } 8.2162 - 8.2163 - spin_unlock_irq(&desc->lock); 8.2164 - 8.2165 - if ( *p == '\0' ) 8.2166 - break; 8.2167 - 8.2168 - if ( *p != ',' ) 8.2169 - { 8.2170 - printk("Unexpected character '%c' in level-trigger list '%s'\n", 8.2171 - *p, opt_leveltrigger); 8.2172 - break; 8.2173 - } 8.2174 - 8.2175 - p++; 8.2176 - } 8.2177 - 8.2178 - p = opt_edgetrigger; 8.2179 - while ( *p != '\0' ) 8.2180 - { 8.2181 - irq = simple_strtol(p, &p, 10); 8.2182 - if ( (irq <= 0) || (irq >= NR_IRQS) ) 8.2183 - { 8.2184 - printk("IRQ '%ld' out of range in edge-trigger list '%s'\n", 8.2185 - irq, opt_edgetrigger); 8.2186 - break; 8.2187 - } 8.2188 - 8.2189 - printk("Forcing IRQ %ld to edge-trigger: ", irq); 8.2190 - 8.2191 - desc = &irq_desc[irq]; 8.2192 - spin_lock_irq(&desc->lock); 8.2193 - 8.2194 - if ( desc->handler == &ioapic_edge_irq_type ) 8.2195 - { 8.2196 - printk("already edge-triggered (no force applied).\n"); 8.2197 - } 8.2198 - else if ( desc->handler != &ioapic_level_irq_type ) 8.2199 - { 8.2200 - printk("cannot force (can only force IO-APIC-level IRQs).\n"); 8.2201 - } 8.2202 - else 8.2203 - { 8.2204 - desc->handler = &ioapic_edge_irq_type; 8.2205 - __edge_IO_APIC_irq(irq); 8.2206 - desc->status |= IRQ_PENDING; /* may have lost a masked edge */ 8.2207 - printk("done.\n"); 8.2208 - } 8.2209 - 8.2210 - spin_unlock_irq(&desc->lock); 8.2211 - 8.2212 - if ( *p == '\0' ) 8.2213 - break; 8.2214 - 8.2215 - if ( *p != ',' ) 8.2216 - { 8.2217 - printk("Unexpected character '%c' in edge-trigger list '%s'\n", 8.2218 - *p, opt_edgetrigger); 8.2219 - break; 8.2220 - } 8.2221 - 8.2222 - p++; 8.2223 - } 8.2224 - 8.2225 - return 0; 8.2226 -} 8.2227 - 8.2228 -__initcall(ioapic_trigger_setup); 8.2229 8.2230 int ioapic_guest_read(int apicid, int address, u32 *pval) 8.2231 { 8.2232 u32 val; 8.2233 int apicenum; 8.2234 - struct IO_APIC_reg_00 reg_00; 8.2235 + union IO_APIC_reg_00 reg_00; 8.2236 unsigned long flags; 8.2237 8.2238 if ( (apicid >= NR_IOAPIC_BIOSIDS) || 8.2239 @@ -1936,9 +2254,9 @@ int ioapic_guest_read(int apicid, int ad 8.2240 /* Rewrite APIC ID to what the BIOS originally specified. */ 8.2241 if ( address == 0 ) 8.2242 { 8.2243 - *(int *)®_00 = val; 8.2244 - reg_00.ID = apicid; 8.2245 - val = *(u32 *)®_00; 8.2246 + reg_00.raw = val; 8.2247 + reg_00.bits.ID = apicid; 8.2248 + val = reg_00.raw; 8.2249 } 8.2250 8.2251 *pval = val; 8.2252 @@ -1974,7 +2292,7 @@ int ioapic_guest_write(int apicid, int a 8.2253 8.2254 /* Set the correct irq-handling type. */ 8.2255 irq_desc[irq].handler = rte.trigger ? 8.2256 - &ioapic_level_irq_type: &ioapic_edge_irq_type; 8.2257 + &ioapic_level_type: &ioapic_edge_type; 8.2258 8.2259 /* Record the pin<->irq mapping. */ 8.2260 for ( entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next] )
9.1 --- a/xen/arch/x86/irq.c Mon May 09 14:34:59 2005 +0000 9.2 +++ b/xen/arch/x86/irq.c Mon May 09 17:50:11 2005 +0000 9.3 @@ -42,7 +42,6 @@ struct hw_interrupt_type no_irq_type = { 9.4 }; 9.5 9.6 atomic_t irq_err_count; 9.7 -atomic_t irq_mis_count; 9.8 9.9 inline void disable_irq_nosync(unsigned int irq) 9.10 {
10.1 --- a/xen/arch/x86/microcode.c Mon May 09 14:34:59 2005 +0000 10.2 +++ b/xen/arch/x86/microcode.c Mon May 09 17:50:11 2005 +0000 10.3 @@ -87,13 +87,6 @@ 10.4 #define vmalloc(_s) xmalloc_bytes(_s) 10.5 #define vfree(_p) xfree(_p) 10.6 #define num_online_cpus() smp_num_cpus 10.7 -static inline int on_each_cpu( 10.8 - void (*func) (void *info), void *info, int retry, int wait) 10.9 -{ 10.10 - int ret = smp_call_function(func, info, retry, wait); 10.11 - func(info); 10.12 - return ret; 10.13 -} 10.14 10.15 #if 0 10.16 MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
11.1 --- a/xen/arch/x86/mpparse.c Mon May 09 14:34:59 2005 +0000 11.2 +++ b/xen/arch/x86/mpparse.c Mon May 09 17:50:11 2005 +0000 11.3 @@ -1,5 +1,5 @@ 11.4 /* 11.5 - * Intel Multiprocessor Specificiation 1.1 and 1.4 11.6 + * Intel Multiprocessor Specification 1.1 and 1.4 11.7 * compliant MP-table parsing routines. 11.8 * 11.9 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> 11.10 @@ -14,44 +14,48 @@ 11.11 */ 11.12 11.13 #include <xen/config.h> 11.14 -#include <xen/init.h> 11.15 -#include <xen/lib.h> 11.16 -#include <xen/kernel.h> 11.17 +#include <xen/types.h> 11.18 #include <xen/irq.h> 11.19 -#include <xen/smp.h> 11.20 -#include <xen/mm.h> 11.21 +#include <xen/init.h> 11.22 #include <xen/acpi.h> 11.23 +#include <xen/delay.h> 11.24 +#include <xen/sched.h> 11.25 + 11.26 +#include <asm/mc146818rtc.h> 11.27 +#include <asm/bitops.h> 11.28 +#include <asm/smp.h> 11.29 #include <asm/acpi.h> 11.30 -#include <asm/io.h> 11.31 -#include <asm/apic.h> 11.32 +#include <asm/mtrr.h> 11.33 #include <asm/mpspec.h> 11.34 -#include <asm/flushtlb.h> 11.35 -#include <asm/smpboot.h> 11.36 +#include <asm/io_apic.h> 11.37 11.38 -int numnodes = 1; /* XXX Xen */ 11.39 +#include <mach_apic.h> 11.40 +#include <mach_mpparse.h> 11.41 +#include <bios_ebda.h> 11.42 + 11.43 +#define es7000_plat 0 /* XXX XEN */ 11.44 11.45 /* Have we found an MP table */ 11.46 int smp_found_config; 11.47 +unsigned int __initdata maxcpus = NR_CPUS; 11.48 11.49 /* 11.50 * Various Linux-internal data structures created from the 11.51 * MP-table. 11.52 */ 11.53 int apic_version [MAX_APICS]; 11.54 +int mp_bus_id_to_type [MAX_MP_BUSSES]; 11.55 +int mp_bus_id_to_node [MAX_MP_BUSSES]; 11.56 +int mp_bus_id_to_local [MAX_MP_BUSSES]; 11.57 int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 11.58 +int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; 11.59 int mp_current_pci_id; 11.60 -int *mp_bus_id_to_type; 11.61 -int *mp_bus_id_to_node; 11.62 -int *mp_bus_id_to_local; 11.63 -int *mp_bus_id_to_pci_bus; 11.64 -int max_mp_busses; 11.65 -int max_irq_sources; 11.66 11.67 /* I/O APIC entries */ 11.68 struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; 11.69 11.70 /* # of MP IRQ source entries */ 11.71 -struct mpc_config_intsrc *mp_irqs; 11.72 +struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 11.73 11.74 /* MP IRQ source entries */ 11.75 int mp_irq_entries; 11.76 @@ -65,24 +69,18 @@ unsigned long mp_lapic_addr; 11.77 unsigned int boot_cpu_physical_apicid = -1U; 11.78 unsigned int boot_cpu_logical_apicid = -1U; 11.79 /* Internal processor count */ 11.80 -static unsigned int num_processors; 11.81 +static unsigned int __initdata num_processors; 11.82 11.83 /* Bitmask of physically existing CPUs */ 11.84 -unsigned long phys_cpu_present_map; 11.85 -unsigned long logical_cpu_present_map; 11.86 +physid_mask_t phys_cpu_present_map; 11.87 11.88 -#ifdef CONFIG_X86_CLUSTERED_APIC 11.89 -unsigned char esr_disable = 0; 11.90 -unsigned char clustered_apic_mode = CLUSTERED_APIC_NONE; 11.91 -unsigned int apic_broadcast_id = APIC_BROADCAST_ID_APIC; 11.92 -#endif 11.93 -unsigned char raw_phys_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; 11.94 +u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; 11.95 11.96 /* 11.97 * Intel MP BIOS table parsing routines: 11.98 */ 11.99 11.100 -#ifndef CONFIG_X86_VISWS_APIC 11.101 + 11.102 /* 11.103 * Checksum an MP configuration block. 11.104 */ 11.105 @@ -98,48 +96,6 @@ static int __init mpf_checksum(unsigned 11.106 } 11.107 11.108 /* 11.109 - * Processor encoding in an MP configuration block 11.110 - */ 11.111 - 11.112 -static char __init *mpc_family(int family,int model) 11.113 -{ 11.114 - static char n[32]; 11.115 - static char *model_defs[]= 11.116 - { 11.117 - "80486DX","80486DX", 11.118 - "80486SX","80486DX/2 or 80487", 11.119 - "80486SL","80486SX/2", 11.120 - "Unknown","80486DX/2-WB", 11.121 - "80486DX/4","80486DX/4-WB" 11.122 - }; 11.123 - 11.124 - switch (family) { 11.125 - case 0x04: 11.126 - if (model < 10) 11.127 - return model_defs[model]; 11.128 - break; 11.129 - 11.130 - case 0x05: 11.131 - return("Pentium(tm)"); 11.132 - 11.133 - case 0x06: 11.134 - return("Pentium(tm) Pro"); 11.135 - 11.136 - case 0x0F: 11.137 - if (model == 0x00) 11.138 - return("Pentium 4(tm)"); 11.139 - if (model == 0x01) 11.140 - return("Pentium 4(tm)"); 11.141 - if (model == 0x02) 11.142 - return("Pentium 4(tm) XEON(tm)"); 11.143 - if (model == 0x0F) 11.144 - return("Special controller"); 11.145 - } 11.146 - sprintf(n,"Unknown CPU [%d:%d]",family, model); 11.147 - return n; 11.148 -} 11.149 - 11.150 -/* 11.151 * Have to match translation table entries to main table entries by counter 11.152 * hence the mpc_record variable .... can't see a less disgusting way of 11.153 * doing this .... 11.154 @@ -148,30 +104,30 @@ static char __init *mpc_family(int famil 11.155 static int mpc_record; 11.156 static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata; 11.157 11.158 +#ifdef CONFIG_X86_NUMAQ 11.159 +static int MP_valid_apicid(int apicid, int version) 11.160 +{ 11.161 + return hweight_long(apicid & 0xf) == 1 && (apicid >> 4) != 0xf; 11.162 +} 11.163 +#else 11.164 +static int MP_valid_apicid(int apicid, int version) 11.165 +{ 11.166 + if (version >= 0x14) 11.167 + return apicid < 0xff; 11.168 + else 11.169 + return apicid < 0xf; 11.170 +} 11.171 +#endif 11.172 + 11.173 void __init MP_processor_info (struct mpc_config_processor *m) 11.174 { 11.175 - int ver, quad, logical_apicid; 11.176 + int ver, apicid; 11.177 + physid_mask_t tmp; 11.178 11.179 if (!(m->mpc_cpuflag & CPU_ENABLED)) 11.180 return; 11.181 11.182 - logical_apicid = m->mpc_apicid; 11.183 - if (clustered_apic_mode == CLUSTERED_APIC_NUMAQ) { 11.184 - quad = translation_table[mpc_record]->trans_quad; 11.185 - logical_apicid = (quad << 4) + 11.186 - (m->mpc_apicid ? m->mpc_apicid << 1 : 1); 11.187 - printk("Processor #%d %s APIC version %d (quad %d, apic %d)\n", 11.188 - m->mpc_apicid, 11.189 - mpc_family((m->mpc_cpufeature & CPU_FAMILY_MASK)>>8 , 11.190 - (m->mpc_cpufeature & CPU_MODEL_MASK)>>4), 11.191 - m->mpc_apicver, quad, logical_apicid); 11.192 - } else { 11.193 - printk("Processor #%d %s APIC version %d\n", 11.194 - m->mpc_apicid, 11.195 - mpc_family((m->mpc_cpufeature & CPU_FAMILY_MASK)>>8 , 11.196 - (m->mpc_cpufeature & CPU_MODEL_MASK)>>4), 11.197 - m->mpc_apicver); 11.198 - } 11.199 + apicid = mpc_apic_id(m, translation_table[mpc_record]); 11.200 11.201 if (m->mpc_featureflag&(1<<0)) 11.202 Dprintk(" Floating point unit present.\n"); 11.203 @@ -224,68 +180,68 @@ void __init MP_processor_info (struct mp 11.204 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { 11.205 Dprintk(" Bootup CPU\n"); 11.206 boot_cpu_physical_apicid = m->mpc_apicid; 11.207 - boot_cpu_logical_apicid = logical_apicid; 11.208 + boot_cpu_logical_apicid = apicid; 11.209 } 11.210 11.211 - if (num_processors >= NR_CPUS){ 11.212 - printk(KERN_WARNING "NR_CPUS limit of %i reached. Cannot " 11.213 - "boot CPU(apicid 0x%x).\n", NR_CPUS, m->mpc_apicid); 11.214 + if (num_processors >= NR_CPUS) { 11.215 + printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." 11.216 + " Processor ignored.\n", NR_CPUS); 11.217 + return; 11.218 + } 11.219 + 11.220 + if (num_processors >= maxcpus) { 11.221 + printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." 11.222 + " Processor ignored.\n", maxcpus); 11.223 return; 11.224 } 11.225 num_processors++; 11.226 + ver = m->mpc_apicver; 11.227 11.228 - if (m->mpc_apicid > MAX_APICS) { 11.229 - printk("Processor #%d INVALID. (Max ID: %d).\n", 11.230 + if (!MP_valid_apicid(apicid, ver)) { 11.231 + printk(KERN_WARNING "Processor #%d INVALID. (Max ID: %d).\n", 11.232 m->mpc_apicid, MAX_APICS); 11.233 --num_processors; 11.234 return; 11.235 } 11.236 - ver = m->mpc_apicver; 11.237 11.238 - logical_cpu_present_map |= 1 << (num_processors-1); 11.239 - phys_cpu_present_map |= apicid_to_phys_cpu_present(m->mpc_apicid); 11.240 - 11.241 + tmp = apicid_to_cpu_present(apicid); 11.242 + physids_or(phys_cpu_present_map, phys_cpu_present_map, tmp); 11.243 + 11.244 /* 11.245 * Validate version 11.246 */ 11.247 if (ver == 0x0) { 11.248 - printk("BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid); 11.249 + printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid); 11.250 ver = 0x10; 11.251 } 11.252 apic_version[m->mpc_apicid] = ver; 11.253 - raw_phys_apicid[num_processors - 1] = m->mpc_apicid; 11.254 + bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; 11.255 } 11.256 11.257 static void __init MP_bus_info (struct mpc_config_bus *m) 11.258 { 11.259 char str[7]; 11.260 - int quad; 11.261 11.262 memcpy(str, m->mpc_bustype, 6); 11.263 str[6] = 0; 11.264 - 11.265 - if (clustered_apic_mode == CLUSTERED_APIC_NUMAQ) { 11.266 - quad = translation_table[mpc_record]->trans_quad; 11.267 - mp_bus_id_to_node[m->mpc_busid] = quad; 11.268 - mp_bus_id_to_local[m->mpc_busid] = translation_table[mpc_record]->trans_local; 11.269 - quad_local_to_mp_bus_id[quad][translation_table[mpc_record]->trans_local] = m->mpc_busid; 11.270 - printk("Bus #%d is %s (node %d)\n", m->mpc_busid, str, quad); 11.271 - } else { 11.272 - Dprintk("Bus #%d is %s\n", m->mpc_busid, str); 11.273 - } 11.274 + 11.275 + mpc_oem_bus_info(m, str, translation_table[mpc_record]); 11.276 11.277 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { 11.278 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; 11.279 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { 11.280 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; 11.281 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { 11.282 + mpc_oem_pci_bus(m, translation_table[mpc_record]); 11.283 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; 11.284 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; 11.285 mp_current_pci_id++; 11.286 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) { 11.287 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; 11.288 + } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) { 11.289 + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98; 11.290 } else { 11.291 - printk("Unknown bustype %s - ignoring\n", str); 11.292 + printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); 11.293 } 11.294 } 11.295 11.296 @@ -294,10 +250,10 @@ static void __init MP_ioapic_info (struc 11.297 if (!(m->mpc_flags & MPC_APIC_USABLE)) 11.298 return; 11.299 11.300 - printk("I/O APIC #%d Version %d at 0x%X.\n", 11.301 + printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", 11.302 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); 11.303 if (nr_ioapics >= MAX_IO_APICS) { 11.304 - printk("Max # of I/O APICs (%d) exceeded (found %d).\n", 11.305 + printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n", 11.306 MAX_IO_APICS, nr_ioapics); 11.307 panic("Recompile kernel with bigger MAX_IO_APICS!.\n"); 11.308 } 11.309 @@ -318,7 +274,7 @@ static void __init MP_intsrc_info (struc 11.310 m->mpc_irqtype, m->mpc_irqflag & 3, 11.311 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, 11.312 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); 11.313 - if (++mp_irq_entries == max_irq_sources) 11.314 + if (++mp_irq_entries == MAX_IRQ_SOURCES) 11.315 panic("Max # of irq sources exceeded!!\n"); 11.316 } 11.317 11.318 @@ -344,16 +300,17 @@ static void __init MP_lintsrc_info (stru 11.319 BUG(); 11.320 } 11.321 11.322 +#ifdef CONFIG_X86_NUMAQ 11.323 static void __init MP_translation_info (struct mpc_config_translation *m) 11.324 { 11.325 - printk("Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local); 11.326 + printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local); 11.327 11.328 if (mpc_record >= MAX_MPC_ENTRY) 11.329 - printk("MAX_MPC_ENTRY exceeded!\n"); 11.330 + printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); 11.331 else 11.332 translation_table[mpc_record] = m; /* stash this for later */ 11.333 - if (m->trans_quad+1 > numnodes) 11.334 - numnodes = m->trans_quad+1; 11.335 + if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) 11.336 + node_set_online(m->trans_quad); 11.337 } 11.338 11.339 /* 11.340 @@ -366,10 +323,11 @@ static void __init smp_read_mpc_oem(stru 11.341 int count = sizeof (*oemtable); /* the header size */ 11.342 unsigned char *oemptr = ((unsigned char *)oemtable)+count; 11.343 11.344 - printk("Found an OEM MPC table at %8p - parsing it ... \n", oemtable); 11.345 + mpc_record = 0; 11.346 + printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); 11.347 if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4)) 11.348 { 11.349 - printk("SMP mpc oemtable: bad signature [%c%c%c%c]!\n", 11.350 + printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", 11.351 oemtable->oem_signature[0], 11.352 oemtable->oem_signature[1], 11.353 oemtable->oem_signature[2], 11.354 @@ -378,7 +336,7 @@ static void __init smp_read_mpc_oem(stru 11.355 } 11.356 if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length)) 11.357 { 11.358 - printk("SMP oem mptable: checksum error!\n"); 11.359 + printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); 11.360 return; 11.361 } 11.362 while (count < oemtable->oem_length) { 11.363 @@ -395,36 +353,42 @@ static void __init smp_read_mpc_oem(stru 11.364 } 11.365 default: 11.366 { 11.367 - printk("Unrecognised OEM table entry type! - %d\n", (int) *oemptr); 11.368 + printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr); 11.369 return; 11.370 } 11.371 } 11.372 } 11.373 } 11.374 11.375 +static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, 11.376 + char *productid) 11.377 +{ 11.378 + if (strncmp(oem, "IBM NUMA", 8)) 11.379 + printk("Warning! May not be a NUMA-Q system!\n"); 11.380 + if (mpc->mpc_oemptr) 11.381 + smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr, 11.382 + mpc->mpc_oemsize); 11.383 +} 11.384 +#endif /* CONFIG_X86_NUMAQ */ 11.385 + 11.386 /* 11.387 * Read/parse the MPC 11.388 */ 11.389 11.390 static int __init smp_read_mpc(struct mp_config_table *mpc) 11.391 { 11.392 - char oem[16], prod[14]; 11.393 + char str[16]; 11.394 + char oem[10]; 11.395 int count=sizeof(*mpc); 11.396 unsigned char *mpt=((unsigned char *)mpc)+count; 11.397 - int num_bus = 0; 11.398 - int num_irq = 0; 11.399 - unsigned char *bus_data; 11.400 11.401 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { 11.402 - panic("SMP mptable: bad signature [%c%c%c%c]!\n", 11.403 - mpc->mpc_signature[0], 11.404 - mpc->mpc_signature[1], 11.405 - mpc->mpc_signature[2], 11.406 - mpc->mpc_signature[3]); 11.407 + printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n", 11.408 + *(u32 *)mpc->mpc_signature); 11.409 return 0; 11.410 } 11.411 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { 11.412 - panic("SMP mptable: checksum error!\n"); 11.413 + printk(KERN_ERR "SMP mptable: checksum error!\n"); 11.414 return 0; 11.415 } 11.416 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { 11.417 @@ -438,14 +402,14 @@ static int __init smp_read_mpc(struct mp 11.418 } 11.419 memcpy(oem,mpc->mpc_oem,8); 11.420 oem[8]=0; 11.421 - printk("OEM ID: %s ",oem); 11.422 + printk(KERN_INFO "OEM ID: %s ",oem); 11.423 11.424 - memcpy(prod,mpc->mpc_productid,12); 11.425 - prod[12]=0; 11.426 - printk("Product ID: %s ",prod); 11.427 + memcpy(str,mpc->mpc_productid,12); 11.428 + str[12]=0; 11.429 + printk("Product ID: %s ",str); 11.430 11.431 - detect_clustered_apic(oem, prod); 11.432 - 11.433 + mps_oem_check(mpc, oem, str); 11.434 + 11.435 printk("APIC at: 0x%X\n",mpc->mpc_lapic); 11.436 11.437 /* 11.438 @@ -455,77 +419,10 @@ static int __init smp_read_mpc(struct mp 11.439 if (!acpi_lapic) 11.440 mp_lapic_addr = mpc->mpc_lapic; 11.441 11.442 - if ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ) && mpc->mpc_oemptr) { 11.443 - /* We need to process the oem mpc tables to tell us which quad things are in ... */ 11.444 - mpc_record = 0; 11.445 - smp_read_mpc_oem((struct mp_config_oemtable *)(unsigned long)mpc->mpc_oemptr, mpc->mpc_oemsize); 11.446 - mpc_record = 0; 11.447 - } 11.448 - 11.449 - /* Pre-scan to determine the number of bus and 11.450 - * interrupts records we have 11.451 - */ 11.452 - while (count < mpc->mpc_length) { 11.453 - switch (*mpt) { 11.454 - case MP_PROCESSOR: 11.455 - mpt += sizeof(struct mpc_config_processor); 11.456 - count += sizeof(struct mpc_config_processor); 11.457 - break; 11.458 - case MP_BUS: 11.459 - ++num_bus; 11.460 - mpt += sizeof(struct mpc_config_bus); 11.461 - count += sizeof(struct mpc_config_bus); 11.462 - break; 11.463 - case MP_INTSRC: 11.464 - ++num_irq; 11.465 - mpt += sizeof(struct mpc_config_intsrc); 11.466 - count += sizeof(struct mpc_config_intsrc); 11.467 - break; 11.468 - case MP_IOAPIC: 11.469 - mpt += sizeof(struct mpc_config_ioapic); 11.470 - count += sizeof(struct mpc_config_ioapic); 11.471 - break; 11.472 - case MP_LINTSRC: 11.473 - mpt += sizeof(struct mpc_config_lintsrc); 11.474 - count += sizeof(struct mpc_config_lintsrc); 11.475 - break; 11.476 - default: 11.477 - count = mpc->mpc_length; 11.478 - break; 11.479 - } 11.480 - } 11.481 - /* 11.482 - * Paranoia: Allocate one extra of both the number of busses and number 11.483 - * of irqs, and make sure that we have at least 4 interrupts per PCI 11.484 - * slot. But some machines do not report very many busses, so we need 11.485 - * to fall back on the older defaults. 11.486 - */ 11.487 - ++num_bus; 11.488 - max_mp_busses = max(num_bus, MAX_MP_BUSSES); 11.489 - if (num_irq < (4 * max_mp_busses)) 11.490 - num_irq = 4 * num_bus; /* 4 intr/PCI slot */ 11.491 - ++num_irq; 11.492 - max_irq_sources = max(num_irq, MAX_IRQ_SOURCES); 11.493 - 11.494 - count = (max_mp_busses * sizeof(int)) * 4; 11.495 - count += (max_irq_sources * sizeof(struct mpc_config_intsrc)); 11.496 - bus_data = (void *)alloc_xenheap_pages(get_order(count)); 11.497 - if (!bus_data) { 11.498 - printk(KERN_ERR "SMP mptable: out of memory!\n"); 11.499 - return 0; 11.500 - } 11.501 - mp_bus_id_to_type = (int *)&bus_data[0]; 11.502 - mp_bus_id_to_node = (int *)&bus_data[(max_mp_busses * sizeof(int))]; 11.503 - mp_bus_id_to_local = (int *)&bus_data[(max_mp_busses * sizeof(int)) * 2]; 11.504 - mp_bus_id_to_pci_bus = (int *)&bus_data[(max_mp_busses * sizeof(int)) * 3]; 11.505 - mp_irqs = (struct mpc_config_intsrc *)&bus_data[(max_mp_busses * sizeof(int)) * 4]; 11.506 - memset(mp_bus_id_to_pci_bus, -1, max_mp_busses * sizeof(int)); 11.507 - 11.508 /* 11.509 * Now process the configuration blocks. 11.510 */ 11.511 - count = sizeof(*mpc); 11.512 - mpt = ((unsigned char *)mpc)+count; 11.513 + mpc_record = 0; 11.514 while (count < mpc->mpc_length) { 11.515 switch(*mpt) { 11.516 case MP_PROCESSOR: 11.517 @@ -584,21 +481,7 @@ static int __init smp_read_mpc(struct mp 11.518 } 11.519 ++mpc_record; 11.520 } 11.521 - 11.522 - if (clustered_apic_mode){ 11.523 - phys_cpu_present_map = logical_cpu_present_map; 11.524 - } 11.525 - 11.526 - 11.527 - printk("Enabling APIC mode: "); 11.528 - if(clustered_apic_mode == CLUSTERED_APIC_NUMAQ) 11.529 - printk("Clustered Logical. "); 11.530 - else if(clustered_apic_mode == CLUSTERED_APIC_XAPIC) 11.531 - printk("Physical. "); 11.532 - else 11.533 - printk("Flat. "); 11.534 - printk("Using %d I/O APICs\n",nr_ioapics); 11.535 - 11.536 + clustered_apic_check(); 11.537 if (!num_processors) 11.538 printk(KERN_ERR "SMP mptable: no processors registered!\n"); 11.539 return num_processors; 11.540 @@ -634,12 +517,12 @@ static void __init construct_default_ioi 11.541 * If it does, we assume it's valid. 11.542 */ 11.543 if (mpc_default_type == 5) { 11.544 - printk("ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); 11.545 + printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); 11.546 11.547 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) 11.548 - printk("ELCR contains invalid data... not using ELCR\n"); 11.549 + printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n"); 11.550 else { 11.551 - printk("Using ELCR to identify PCI interrupts\n"); 11.552 + printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); 11.553 ELCR_fallback = 1; 11.554 } 11.555 } 11.556 @@ -686,24 +569,6 @@ static inline void __init construct_defa 11.557 struct mpc_config_lintsrc lintsrc; 11.558 int linttypes[2] = { mp_ExtINT, mp_NMI }; 11.559 int i; 11.560 - struct { 11.561 - int mp_bus_id_to_type[MAX_MP_BUSSES]; 11.562 - int mp_bus_id_to_node[MAX_MP_BUSSES]; 11.563 - int mp_bus_id_to_local[MAX_MP_BUSSES]; 11.564 - int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; 11.565 - struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 11.566 - } *bus_data; 11.567 - 11.568 - bus_data = (void *)alloc_xenheap_pages(get_order(sizeof(*bus_data))); 11.569 - if (!bus_data) 11.570 - panic("SMP mptable: out of memory!\n"); 11.571 - mp_bus_id_to_type = bus_data->mp_bus_id_to_type; 11.572 - mp_bus_id_to_node = bus_data->mp_bus_id_to_node; 11.573 - mp_bus_id_to_local = bus_data->mp_bus_id_to_local; 11.574 - mp_bus_id_to_pci_bus = bus_data->mp_bus_id_to_pci_bus; 11.575 - mp_irqs = bus_data->mp_irqs; 11.576 - for (i = 0; i < MAX_MP_BUSSES; ++i) 11.577 - mp_bus_id_to_pci_bus[i] = -1; 11.578 11.579 /* 11.580 * local APIC has default address 11.581 @@ -732,7 +597,8 @@ static inline void __init construct_defa 11.582 bus.mpc_busid = 0; 11.583 switch (mpc_default_type) { 11.584 default: 11.585 - printk("???\nUnknown standard configuration %d\n", 11.586 + printk("???\n"); 11.587 + printk(KERN_ERR "Unknown standard configuration %d\n", 11.588 mpc_default_type); 11.589 /* fall through */ 11.590 case 1: 11.591 @@ -790,7 +656,7 @@ void __init get_smp_config (void) 11.592 11.593 /* 11.594 * ACPI may be used to obtain the entire SMP configuration or just to 11.595 - * enumerate/configure processors (CONFIG_ACPI_HT_ONLY). Note that 11.596 + * enumerate/configure processors (CONFIG_ACPI_BOOT). Note that 11.597 * ACPI supports both logical (e.g. Hyper-Threading) and physical 11.598 * processors, where MPS only supports physical. 11.599 */ 11.600 @@ -801,12 +667,12 @@ void __init get_smp_config (void) 11.601 else if (acpi_lapic) 11.602 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); 11.603 11.604 - printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); 11.605 + printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); 11.606 if (mpf->mpf_feature2 & (1<<7)) { 11.607 - printk(" IMCR and PIC compatibility mode.\n"); 11.608 + printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); 11.609 pic_mode = 1; 11.610 } else { 11.611 - printk(" Virtual Wire compatibility mode.\n"); 11.612 + printk(KERN_INFO " Virtual Wire compatibility mode.\n"); 11.613 pic_mode = 0; 11.614 } 11.615 11.616 @@ -815,7 +681,7 @@ void __init get_smp_config (void) 11.617 */ 11.618 if (mpf->mpf_feature1 != 0) { 11.619 11.620 - printk("Default MP configuration #%d\n", mpf->mpf_feature1); 11.621 + printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); 11.622 construct_default_ISA_mptable(mpf->mpf_feature1); 11.623 11.624 } else if (mpf->mpf_physptr) { 11.625 @@ -824,7 +690,7 @@ void __init get_smp_config (void) 11.626 * Read the physical hardware table. Anything here will 11.627 * override the defaults. 11.628 */ 11.629 - if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) { 11.630 + if (!smp_read_mpc((void *)mpf->mpf_physptr)) { 11.631 smp_found_config = 0; 11.632 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); 11.633 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); 11.634 @@ -838,7 +704,7 @@ void __init get_smp_config (void) 11.635 if (!mp_irq_entries) { 11.636 struct mpc_config_bus bus; 11.637 11.638 - printk("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); 11.639 + printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); 11.640 11.641 bus.mpc_type = MP_BUS; 11.642 bus.mpc_busid = 0; 11.643 @@ -851,7 +717,7 @@ void __init get_smp_config (void) 11.644 } else 11.645 BUG(); 11.646 11.647 - printk("Processors: %d\n", num_processors); 11.648 + printk(KERN_INFO "Processors: %d\n", num_processors); 11.649 /* 11.650 * Only use the first configuration found. 11.651 */ 11.652 @@ -859,7 +725,7 @@ void __init get_smp_config (void) 11.653 11.654 static int __init smp_scan_config (unsigned long base, unsigned long length) 11.655 { 11.656 - unsigned int *bp = phys_to_virt(base); 11.657 + unsigned long *bp = phys_to_virt(base); 11.658 struct intel_mp_floating *mpf; 11.659 11.660 Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length); 11.661 @@ -875,11 +741,27 @@ static int __init smp_scan_config (unsig 11.662 || (mpf->mpf_specification == 4)) ) { 11.663 11.664 smp_found_config = 1; 11.665 - printk("found SMP MP-table at %08lx\n", 11.666 + printk(KERN_INFO "found SMP MP-table at %08lx\n", 11.667 virt_to_phys(mpf)); 11.668 +#if 0 11.669 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE); 11.670 - if (mpf->mpf_physptr) 11.671 - reserve_bootmem((unsigned long)mpf->mpf_physptr, PAGE_SIZE); 11.672 + if (mpf->mpf_physptr) { 11.673 + /* 11.674 + * We cannot access to MPC table to compute 11.675 + * table size yet, as only few megabytes from 11.676 + * the bottom is mapped now. 11.677 + * PC-9800's MPC table places on the very last 11.678 + * of physical memory; so that simply reserving 11.679 + * PAGE_SIZE from mpg->mpf_physptr yields BUG() 11.680 + * in reserve_bootmem. 11.681 + */ 11.682 + unsigned long size = PAGE_SIZE; 11.683 + unsigned long end = max_low_pfn * PAGE_SIZE; 11.684 + if (mpf->mpf_physptr + size > end) 11.685 + size = end - mpf->mpf_physptr; 11.686 + reserve_bootmem(mpf->mpf_physptr, size); 11.687 + } 11.688 +#endif 11.689 mpf_found = mpf; 11.690 return 1; 11.691 } 11.692 @@ -889,7 +771,7 @@ static int __init smp_scan_config (unsig 11.693 return 0; 11.694 } 11.695 11.696 -void __init find_intel_smp (void) 11.697 +void __init find_smp_config (void) 11.698 { 11.699 unsigned int address; 11.700 11.701 @@ -913,53 +795,20 @@ void __init find_intel_smp (void) 11.702 * there is a real-mode segmented pointer pointing to the 11.703 * 4K EBDA area at 0x40E, calculate and scan it here. 11.704 * 11.705 - * NOTE! There were Linux loaders that will corrupt the EBDA 11.706 + * NOTE! There are Linux loaders that will corrupt the EBDA 11.707 * area, and as such this kind of SMP config may be less 11.708 * trustworthy, simply because the SMP table may have been 11.709 - * stomped on during early boot. Thankfully the bootloaders 11.710 - * now honour the EBDA. 11.711 + * stomped on during early boot. These loaders are buggy and 11.712 + * should be fixed. 11.713 + * 11.714 + * MP1.4 SPEC states to only scan first 1K of 4K EBDA. 11.715 */ 11.716 11.717 - address = *(unsigned short *)phys_to_virt(0x40E); 11.718 - address <<= 4; 11.719 - smp_scan_config(address, 0x1000); 11.720 + address = get_bios_ebda(); 11.721 + if (address) 11.722 + smp_scan_config(address, 0x400); 11.723 } 11.724 11.725 -#else 11.726 - 11.727 -/* 11.728 - * The Visual Workstation is Intel MP compliant in the hardware 11.729 - * sense, but it doesn't have a BIOS(-configuration table). 11.730 - * No problem for Linux. 11.731 - */ 11.732 -void __init find_visws_smp(void) 11.733 -{ 11.734 - smp_found_config = 1; 11.735 - 11.736 - phys_cpu_present_map |= 2; /* or in id 1 */ 11.737 - apic_version[1] |= 0x10; /* integrated APIC */ 11.738 - apic_version[0] |= 0x10; 11.739 - 11.740 - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 11.741 -} 11.742 - 11.743 -#endif 11.744 - 11.745 -/* 11.746 - * - Intel MP Configuration Table 11.747 - * - or SGI Visual Workstation configuration 11.748 - */ 11.749 -void __init find_smp_config (void) 11.750 -{ 11.751 -#ifdef CONFIG_X86_LOCAL_APIC 11.752 - find_intel_smp(); 11.753 -#endif 11.754 -#ifdef CONFIG_VISWS 11.755 - find_visws_smp(); 11.756 -#endif 11.757 -} 11.758 - 11.759 - 11.760 /* -------------------------------------------------------------------------- 11.761 ACPI-based MP Configuration 11.762 -------------------------------------------------------------------------- */ 11.763 @@ -987,7 +836,7 @@ void __init mp_register_lapic ( 11.764 struct mpc_config_processor processor; 11.765 int boot_cpu = 0; 11.766 11.767 - if (id >= MAX_APICS) { 11.768 + if (MAX_APICS - id <= 0) { 11.769 printk(KERN_WARNING "Processor #%d invalid (max %d)\n", 11.770 id, MAX_APICS); 11.771 return; 11.772 @@ -998,14 +847,7 @@ void __init mp_register_lapic ( 11.773 11.774 processor.mpc_type = MP_PROCESSOR; 11.775 processor.mpc_apicid = id; 11.776 - 11.777 - /* 11.778 - * mp_register_lapic_address() which is called before the 11.779 - * current function does the fixmap of FIX_APIC_BASE. 11.780 - * Read in the correct APIC version from there 11.781 - */ 11.782 - processor.mpc_apicver = apic_read(APIC_LVR); 11.783 - 11.784 + processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)); 11.785 processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); 11.786 processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); 11.787 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 11.788 @@ -1017,32 +859,32 @@ void __init mp_register_lapic ( 11.789 MP_processor_info(&processor); 11.790 } 11.791 11.792 -#if defined(CONFIG_X86_IO_APIC) /*&& defined(CONFIG_ACPI_INTERPRETER)*/ 11.793 +#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT)) 11.794 11.795 #define MP_ISA_BUS 0 11.796 #define MP_MAX_IOAPIC_PIN 127 11.797 11.798 struct mp_ioapic_routing { 11.799 int apic_id; 11.800 - int irq_start; 11.801 - int irq_end; 11.802 + int gsi_base; 11.803 + int gsi_end; 11.804 u32 pin_programmed[4]; 11.805 } mp_ioapic_routing[MAX_IO_APICS]; 11.806 11.807 11.808 -static int __init mp_find_ioapic ( 11.809 - int irq) 11.810 +static int mp_find_ioapic ( 11.811 + int gsi) 11.812 { 11.813 int i = 0; 11.814 11.815 - /* Find the IOAPIC that manages this IRQ. */ 11.816 + /* Find the IOAPIC that manages this GSI. */ 11.817 for (i = 0; i < nr_ioapics; i++) { 11.818 - if ((irq >= mp_ioapic_routing[i].irq_start) 11.819 - && (irq <= mp_ioapic_routing[i].irq_end)) 11.820 + if ((gsi >= mp_ioapic_routing[i].gsi_base) 11.821 + && (gsi <= mp_ioapic_routing[i].gsi_end)) 11.822 return i; 11.823 } 11.824 11.825 - printk(KERN_ERR "ERROR: Unable to locate IOAPIC for IRQ %d\n", irq); 11.826 + printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 11.827 11.828 return -1; 11.829 } 11.830 @@ -1051,7 +893,7 @@ static int __init mp_find_ioapic ( 11.831 void __init mp_register_ioapic ( 11.832 u8 id, 11.833 u32 address, 11.834 - u32 irq_base) 11.835 + u32 gsi_base) 11.836 { 11.837 int idx = 0; 11.838 11.839 @@ -1077,19 +919,19 @@ void __init mp_register_ioapic ( 11.840 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); 11.841 11.842 /* 11.843 - * Build basic IRQ lookup table to facilitate irq->io_apic lookups 11.844 - * and to prevent reprogramming of IOAPIC pins (PCI IRQs). 11.845 + * Build basic GSI lookup table to facilitate gsi->io_apic lookups 11.846 + * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 11.847 */ 11.848 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; 11.849 - mp_ioapic_routing[idx].irq_start = irq_base; 11.850 - mp_ioapic_routing[idx].irq_end = irq_base + 11.851 + mp_ioapic_routing[idx].gsi_base = gsi_base; 11.852 + mp_ioapic_routing[idx].gsi_end = gsi_base + 11.853 io_apic_get_redir_entries(idx); 11.854 11.855 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " 11.856 - "IRQ %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 11.857 + "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 11.858 mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, 11.859 - mp_ioapic_routing[idx].irq_start, 11.860 - mp_ioapic_routing[idx].irq_end); 11.861 + mp_ioapic_routing[idx].gsi_base, 11.862 + mp_ioapic_routing[idx].gsi_end); 11.863 11.864 return; 11.865 } 11.866 @@ -1099,21 +941,19 @@ void __init mp_override_legacy_irq ( 11.867 u8 bus_irq, 11.868 u8 polarity, 11.869 u8 trigger, 11.870 - u32 global_irq) 11.871 + u32 gsi) 11.872 { 11.873 struct mpc_config_intsrc intsrc; 11.874 - int i = 0; 11.875 - int found = 0; 11.876 int ioapic = -1; 11.877 int pin = -1; 11.878 11.879 /* 11.880 - * Convert 'global_irq' to 'ioapic.pin'. 11.881 + * Convert 'gsi' to 'ioapic.pin'. 11.882 */ 11.883 - ioapic = mp_find_ioapic(global_irq); 11.884 + ioapic = mp_find_ioapic(gsi); 11.885 if (ioapic < 0) 11.886 return; 11.887 - pin = global_irq - mp_ioapic_routing[ioapic].irq_start; 11.888 + pin = gsi - mp_ioapic_routing[ioapic].gsi_base; 11.889 11.890 /* 11.891 * TBD: This check is for faulty timer entries, where the override 11.892 @@ -1136,23 +976,9 @@ void __init mp_override_legacy_irq ( 11.893 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 11.894 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); 11.895 11.896 - /* 11.897 - * If an existing [IOAPIC.PIN -> IRQ] routing entry exists we override it. 11.898 - * Otherwise create a new entry (e.g. global_irq == 2). 11.899 - */ 11.900 - for (i = 0; i < mp_irq_entries; i++) { 11.901 - if ((mp_irqs[i].mpc_srcbus == intsrc.mpc_srcbus) 11.902 - && (mp_irqs[i].mpc_srcbusirq == intsrc.mpc_srcbusirq)) { 11.903 - mp_irqs[i] = intsrc; 11.904 - found = 1; 11.905 - break; 11.906 - } 11.907 - } 11.908 - if (!found) { 11.909 - mp_irqs[mp_irq_entries] = intsrc; 11.910 - if (++mp_irq_entries == MAX_IRQ_SOURCES) 11.911 - panic("Max # of irq sources exceeded!\n"); 11.912 - } 11.913 + mp_irqs[mp_irq_entries] = intsrc; 11.914 + if (++mp_irq_entries == MAX_IRQ_SOURCES) 11.915 + panic("Max # of irq sources exceeded!\n"); 11.916 11.917 return; 11.918 } 11.919 @@ -1160,35 +986,22 @@ void __init mp_override_legacy_irq ( 11.920 11.921 void __init mp_config_acpi_legacy_irqs (void) 11.922 { 11.923 + struct mpc_config_intsrc intsrc; 11.924 int i = 0; 11.925 int ioapic = -1; 11.926 11.927 - /* 11.928 - * Initialize mp_irqs for IRQ configuration. 11.929 - */ 11.930 - unsigned char *bus_data; 11.931 - int count; 11.932 - 11.933 - count = (MAX_MP_BUSSES * sizeof(int)) * 4; 11.934 - count += (MAX_IRQ_SOURCES * sizeof(int)) * 4; 11.935 - bus_data = (void *)alloc_xenheap_pages(get_order(count)); 11.936 - if (!bus_data) { 11.937 - panic("Fatal: can't allocate bus memory for ACPI legacy IRQ!"); 11.938 - } 11.939 - mp_bus_id_to_type = (int *)&bus_data[0]; 11.940 - mp_bus_id_to_node = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int))]; 11.941 - mp_bus_id_to_local = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 2]; 11.942 - mp_bus_id_to_pci_bus = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 3]; 11.943 - mp_irqs = (struct mpc_config_intsrc *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 4]; 11.944 - for (i = 0; i < MAX_MP_BUSSES; ++i) 11.945 - mp_bus_id_to_pci_bus[i] = -1; 11.946 - 11.947 /* 11.948 * Fabricate the legacy ISA bus (bus #31). 11.949 */ 11.950 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; 11.951 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); 11.952 11.953 + /* 11.954 + * ES7000 has no legacy identity mappings 11.955 + */ 11.956 + if (es7000_plat) 11.957 + return; 11.958 + 11.959 /* 11.960 * Locate the IOAPIC that manages the ISA IRQs (0-15). 11.961 */ 11.962 @@ -1196,118 +1009,101 @@ void __init mp_config_acpi_legacy_irqs ( 11.963 if (ioapic < 0) 11.964 return; 11.965 11.966 + intsrc.mpc_type = MP_INTSRC; 11.967 + intsrc.mpc_irqflag = 0; /* Conforming */ 11.968 + intsrc.mpc_srcbus = MP_ISA_BUS; 11.969 + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; 11.970 + 11.971 /* 11.972 - * Use the default configuration for the IRQs 0-15. These may be 11.973 + * Use the default configuration for the IRQs 0-15. Unless 11.974 * overriden by (MADT) interrupt source override entries. 11.975 */ 11.976 for (i = 0; i < 16; i++) { 11.977 + int idx; 11.978 11.979 - if (i == 2) 11.980 - continue; /* Don't connect IRQ2 */ 11.981 + for (idx = 0; idx < mp_irq_entries; idx++) { 11.982 + struct mpc_config_intsrc *irq = mp_irqs + idx; 11.983 + 11.984 + /* Do we already have a mapping for this ISA IRQ? */ 11.985 + if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i) 11.986 + break; 11.987 11.988 - mp_irqs[mp_irq_entries].mpc_type = MP_INTSRC; 11.989 - mp_irqs[mp_irq_entries].mpc_irqflag = 0; /* Conforming */ 11.990 - mp_irqs[mp_irq_entries].mpc_srcbus = MP_ISA_BUS; 11.991 - mp_irqs[mp_irq_entries].mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; 11.992 - mp_irqs[mp_irq_entries].mpc_irqtype = mp_INT; 11.993 - mp_irqs[mp_irq_entries].mpc_srcbusirq = i; /* Identity mapped */ 11.994 - mp_irqs[mp_irq_entries].mpc_dstirq = i; 11.995 + /* Do we already have a mapping for this IOAPIC pin */ 11.996 + if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && 11.997 + (irq->mpc_dstirq == i)) 11.998 + break; 11.999 + } 11.1000 + 11.1001 + if (idx != mp_irq_entries) { 11.1002 + printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); 11.1003 + continue; /* IRQ already used */ 11.1004 + } 11.1005 + 11.1006 + intsrc.mpc_irqtype = mp_INT; 11.1007 + intsrc.mpc_srcbusirq = i; /* Identity mapped */ 11.1008 + intsrc.mpc_dstirq = i; 11.1009 11.1010 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " 11.1011 - "%d-%d\n", 11.1012 - mp_irqs[mp_irq_entries].mpc_irqtype, 11.1013 - mp_irqs[mp_irq_entries].mpc_irqflag & 3, 11.1014 - (mp_irqs[mp_irq_entries].mpc_irqflag >> 2) & 3, 11.1015 - mp_irqs[mp_irq_entries].mpc_srcbus, 11.1016 - mp_irqs[mp_irq_entries].mpc_srcbusirq, 11.1017 - mp_irqs[mp_irq_entries].mpc_dstapic, 11.1018 - mp_irqs[mp_irq_entries].mpc_dstirq); 11.1019 + "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 11.1020 + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 11.1021 + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 11.1022 + intsrc.mpc_dstirq); 11.1023 11.1024 + mp_irqs[mp_irq_entries] = intsrc; 11.1025 if (++mp_irq_entries == MAX_IRQ_SOURCES) 11.1026 panic("Max # of irq sources exceeded!\n"); 11.1027 } 11.1028 } 11.1029 11.1030 -#ifdef CONFIG_ACPI_PCI 11.1031 - 11.1032 -void __init mp_parse_prt (void) 11.1033 +int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) 11.1034 { 11.1035 - struct acpi_prt_entry *entry = NULL; 11.1036 int ioapic = -1; 11.1037 int ioapic_pin = 0; 11.1038 - int irq = 0; 11.1039 int idx, bit = 0; 11.1040 - int edge_level = 0; 11.1041 - int active_high_low = 0; 11.1042 + 11.1043 +#ifdef CONFIG_ACPI_BUS 11.1044 + /* Don't set up the ACPI SCI because it's already set up */ 11.1045 + if (acpi_fadt.sci_int == gsi) 11.1046 + return gsi; 11.1047 +#endif 11.1048 11.1049 - /* 11.1050 - * Parsing through the PCI Interrupt Routing Table (PRT) and program 11.1051 - * routing for all entries. 11.1052 - */ 11.1053 - list_for_each_entry(entry, &acpi_prt.entries, node) { 11.1054 - /* Need to get irq for dynamic entry */ 11.1055 - if (entry->link.handle) { 11.1056 - irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index, &edge_level, &active_high_low); 11.1057 - if (!irq) 11.1058 - continue; 11.1059 - } 11.1060 - else { 11.1061 - /* Hardwired IRQ. Assume PCI standard settings */ 11.1062 - irq = entry->link.index; 11.1063 - edge_level = 1; 11.1064 - active_high_low = 1; 11.1065 - } 11.1066 + ioapic = mp_find_ioapic(gsi); 11.1067 + if (ioapic < 0) { 11.1068 + printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); 11.1069 + return gsi; 11.1070 + } 11.1071 11.1072 - /* Don't set up the ACPI SCI because it's already set up */ 11.1073 - if (acpi_fadt.sci_int == irq) { 11.1074 - entry->irq = irq; /*we still need to set entry's irq*/ 11.1075 - continue; 11.1076 - } 11.1077 - 11.1078 - ioapic = mp_find_ioapic(irq); 11.1079 - if (ioapic < 0) 11.1080 - continue; 11.1081 - ioapic_pin = irq - mp_ioapic_routing[ioapic].irq_start; 11.1082 + ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; 11.1083 + 11.1084 + if (ioapic_renumber_irq) 11.1085 + gsi = ioapic_renumber_irq(ioapic, gsi); 11.1086 11.1087 - /* 11.1088 - * Avoid pin reprogramming. PRTs typically include entries 11.1089 - * with redundant pin->irq mappings (but unique PCI devices); 11.1090 - * we only only program the IOAPIC on the first. 11.1091 - */ 11.1092 - bit = ioapic_pin % 32; 11.1093 - idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); 11.1094 - if (idx > 3) { 11.1095 - printk(KERN_ERR "Invalid reference to IOAPIC pin " 11.1096 - "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 11.1097 - ioapic_pin); 11.1098 - continue; 11.1099 - } 11.1100 - if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { 11.1101 - Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 11.1102 - mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 11.1103 - entry->irq = irq; 11.1104 - continue; 11.1105 - } 11.1106 + /* 11.1107 + * Avoid pin reprogramming. PRTs typically include entries 11.1108 + * with redundant pin->gsi mappings (but unique PCI devices); 11.1109 + * we only program the IOAPIC on the first. 11.1110 + */ 11.1111 + bit = ioapic_pin % 32; 11.1112 + idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); 11.1113 + if (idx > 3) { 11.1114 + printk(KERN_ERR "Invalid reference to IOAPIC pin " 11.1115 + "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 11.1116 + ioapic_pin); 11.1117 + return gsi; 11.1118 + } 11.1119 + if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { 11.1120 + Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 11.1121 + mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 11.1122 + return gsi; 11.1123 + } 11.1124 11.1125 - mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 11.1126 - 11.1127 - if (!io_apic_set_pci_routing(ioapic, ioapic_pin, irq, edge_level, active_high_low)) 11.1128 - entry->irq = irq; 11.1129 + mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 11.1130 11.1131 - printk(KERN_DEBUG "%02x:%02x:%02x[%c] -> %d-%d -> IRQ %d\n", 11.1132 - entry->id.segment, entry->id.bus, 11.1133 - entry->id.device, ('A' + entry->pin), 11.1134 - mp_ioapic_routing[ioapic].apic_id, ioapic_pin, 11.1135 - entry->irq); 11.1136 - } 11.1137 - 11.1138 - print_IO_APIC(); 11.1139 - 11.1140 - return; 11.1141 + io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, 11.1142 + edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, 11.1143 + active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); 11.1144 + return gsi; 11.1145 } 11.1146 11.1147 -#endif /*CONFIG_ACPI_PCI*/ 11.1148 - 11.1149 -#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/ 11.1150 - 11.1151 -#endif /*CONFIG_ACPI*/ 11.1152 +#endif /*CONFIG_X86_IO_APIC && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)*/ 11.1153 +#endif /*CONFIG_ACPI_BOOT*/
12.1 --- a/xen/arch/x86/mtrr/main.c Mon May 09 14:34:59 2005 +0000 12.2 +++ b/xen/arch/x86/mtrr/main.c Mon May 09 17:50:11 2005 +0000 12.3 @@ -35,6 +35,7 @@ 12.4 #include <xen/init.h> 12.5 #include <xen/pci.h> 12.6 #include <xen/smp.h> 12.7 +#include <xen/spinlock.h> 12.8 #include <asm/mtrr.h> 12.9 #include <asm/uaccess.h> 12.10 #include <asm/processor.h>
13.1 --- a/xen/arch/x86/nmi.c Mon May 09 14:34:59 2005 +0000 13.2 +++ b/xen/arch/x86/nmi.c Mon May 09 17:50:11 2005 +0000 13.3 @@ -86,24 +86,20 @@ extern int logical_proc_id[]; 13.4 int __init check_nmi_watchdog (void) 13.5 { 13.6 unsigned int prev_nmi_count[NR_CPUS]; 13.7 - int j, cpu; 13.8 + int cpu; 13.9 13.10 if ( !nmi_watchdog ) 13.11 return 0; 13.12 13.13 printk("Testing NMI watchdog --- "); 13.14 13.15 - for ( j = 0; j < smp_num_cpus; j++ ) 13.16 - { 13.17 - cpu = cpu_logical_map(j); 13.18 + for ( cpu = 0; cpu < smp_num_cpus; cpu++ ) 13.19 prev_nmi_count[cpu] = nmi_count(cpu); 13.20 - } 13.21 __sti(); 13.22 mdelay((10*1000)/nmi_hz); /* wait 10 ticks */ 13.23 13.24 - for ( j = 0; j < smp_num_cpus; j++ ) 13.25 + for ( cpu = 0; cpu < smp_num_cpus; cpu++ ) 13.26 { 13.27 - cpu = cpu_logical_map(j); 13.28 if ( nmi_count(cpu) - prev_nmi_count[cpu] <= 5 ) 13.29 printk("CPU#%d stuck. ", cpu); 13.30 else
14.1 --- a/xen/arch/x86/physdev.c Mon May 09 14:34:59 2005 +0000 14.2 +++ b/xen/arch/x86/physdev.c Mon May 09 17:50:11 2005 +0000 14.3 @@ -10,8 +10,6 @@ 14.4 #include <public/xen.h> 14.5 #include <public/physdev.h> 14.6 14.7 -extern void (*interrupt[])(void); 14.8 - 14.9 extern int ioapic_guest_read(int apicid, int address, u32 *pval); 14.10 extern int ioapic_guest_write(int apicid, int address, u32 pval); 14.11
15.1 --- a/xen/arch/x86/smpboot.c Mon May 09 14:34:59 2005 +0000 15.2 +++ b/xen/arch/x86/smpboot.c Mon May 09 17:50:11 2005 +0000 15.3 @@ -48,8 +48,8 @@ 15.4 #include <xen/sched.h> 15.5 #include <xen/delay.h> 15.6 #include <xen/lib.h> 15.7 - 15.8 -#ifdef CONFIG_SMP 15.9 +#include <mach_apic.h> 15.10 +#include <mach_wakecpu.h> 15.11 15.12 /* Cconfigured maximum number of CPUs to activate. We name the parameter 15.13 "maxcpus" rather than max_cpus to be compatible with Linux */ 15.14 @@ -63,10 +63,10 @@ int smp_num_cpus = 1; 15.15 int ht_per_core = 1; 15.16 15.17 /* Bitmask of currently online CPUs */ 15.18 -unsigned long cpu_online_map; 15.19 +cpumask_t cpu_online_map; 15.20 15.21 -static volatile unsigned long cpu_callin_map; 15.22 -static volatile unsigned long cpu_callout_map; 15.23 +cpumask_t cpu_callin_map; 15.24 +cpumask_t cpu_callout_map; 15.25 15.26 /* Per CPU bogomips and other parameters */ 15.27 struct cpuinfo_x86 cpu_data[NR_CPUS]; 15.28 @@ -800,7 +800,8 @@ void __init smp_boot_cpus(void) 15.29 if (!smp_found_config) { 15.30 printk("SMP motherboard not detected.\n"); 15.31 io_apic_irqs = 0; 15.32 - cpu_online_map = phys_cpu_present_map = 1; 15.33 + phys_cpu_present_map = physid_mask_of_physid(0); 15.34 + cpu_online_map = 1; 15.35 smp_num_cpus = 1; 15.36 if (APIC_init_uniprocessor()) 15.37 printk("Local APIC not detected." 15.38 @@ -815,7 +816,7 @@ void __init smp_boot_cpus(void) 15.39 if (!test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map)) { 15.40 printk("weird, boot CPU (#%d) not listed by the BIOS.\n", 15.41 boot_cpu_physical_apicid); 15.42 - phys_cpu_present_map |= (1 << hard_smp_processor_id()); 15.43 + physid_set(hard_smp_processor_id(), phys_cpu_present_map); 15.44 } 15.45 15.46 /* 15.47 @@ -827,7 +828,8 @@ void __init smp_boot_cpus(void) 15.48 boot_cpu_physical_apicid); 15.49 printk("... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); 15.50 io_apic_irqs = 0; 15.51 - cpu_online_map = phys_cpu_present_map = 1; 15.52 + phys_cpu_present_map = physid_mask_of_physid(0); 15.53 + cpu_online_map = 1; 15.54 smp_num_cpus = 1; 15.55 goto smp_done; 15.56 } 15.57 @@ -841,7 +843,8 @@ void __init smp_boot_cpus(void) 15.58 smp_found_config = 0; 15.59 printk("SMP mode deactivated, forcing use of dummy APIC emulation.\n"); 15.60 io_apic_irqs = 0; 15.61 - cpu_online_map = phys_cpu_present_map = 1; 15.62 + phys_cpu_present_map = physid_mask_of_physid(0); 15.63 + cpu_online_map = 1; 15.64 smp_num_cpus = 1; 15.65 goto smp_done; 15.66 } 15.67 @@ -875,7 +878,7 @@ void __init smp_boot_cpus(void) 15.68 if (opt_noht && (apicid & (ht_per_core - 1))) 15.69 continue; 15.70 15.71 - if (!(phys_cpu_present_map & (1 << bit))) 15.72 + if (!check_apicid_present(bit)) 15.73 continue; 15.74 if ((max_cpus >= 0) && (max_cpus <= cpucount+1)) 15.75 continue; 15.76 @@ -886,7 +889,7 @@ void __init smp_boot_cpus(void) 15.77 * Make sure we unmap all failed CPUs 15.78 */ 15.79 if ((boot_apicid_to_cpu(apicid) == -1) && 15.80 - (phys_cpu_present_map & (1 << bit))) 15.81 + (!check_apicid_present(bit))) 15.82 printk("CPU #%d not responding - cannot use it.\n", 15.83 apicid); 15.84 } 15.85 @@ -923,7 +926,10 @@ void __init smp_boot_cpus(void) 15.86 if ( nr_ioapics ) setup_IO_APIC(); 15.87 15.88 /* Set up all local APIC timers in the system. */ 15.89 - setup_APIC_clocks(); 15.90 + { 15.91 + extern void setup_APIC_clocks(void); 15.92 + setup_APIC_clocks(); 15.93 + } 15.94 15.95 /* Synchronize the TSC with the AP(s). */ 15.96 if ( cpucount ) synchronize_tsc_bp(); 15.97 @@ -932,8 +938,6 @@ void __init smp_boot_cpus(void) 15.98 ; 15.99 } 15.100 15.101 -#endif /* CONFIG_SMP */ 15.102 - 15.103 /* 15.104 * Local variables: 15.105 * mode: C
16.1 --- a/xen/arch/x86/x86_32/asm-offsets.c Mon May 09 14:34:59 2005 +0000 16.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c Mon May 09 17:50:11 2005 +0000 16.3 @@ -90,7 +90,7 @@ void __dummy__(void) 16.4 OFFSET(MULTICALL_result, multicall_entry_t, args[5]); 16.5 BLANK(); 16.6 16.7 - DEFINE(FIXMAP_apic_base, fix_to_virt(FIX_APIC_BASE)); 16.8 + DEFINE(FIXMAP_apic_base, __fix_to_virt(FIX_APIC_BASE)); 16.9 BLANK(); 16.10 16.11 DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
17.1 --- a/xen/arch/x86/x86_32/mm.c Mon May 09 14:34:59 2005 +0000 17.2 +++ b/xen/arch/x86/x86_32/mm.c Mon May 09 17:50:11 2005 +0000 17.3 @@ -83,7 +83,7 @@ void __set_fixmap( 17.4 { 17.5 if ( unlikely(idx >= __end_of_fixed_addresses) ) 17.6 BUG(); 17.7 - map_pages(idle_pg_table, fix_to_virt(idx), p, PAGE_SIZE, flags); 17.8 + map_pages(idle_pg_table, __fix_to_virt(idx), p, PAGE_SIZE, flags); 17.9 } 17.10 17.11
18.1 --- a/xen/arch/x86/x86_32/traps.c Mon May 09 14:34:59 2005 +0000 18.2 +++ b/xen/arch/x86/x86_32/traps.c Mon May 09 17:50:11 2005 +0000 18.3 @@ -273,12 +273,8 @@ long set_fast_trap(struct exec_domain *p 18.4 return 0; 18.5 } 18.6 18.7 - /* 18.8 - * We only fast-trap vectors 0x20-0x2f, and vector 0x80. 18.9 - * The former range is used by Windows and MS-DOS. 18.10 - * Vector 0x80 is used by Linux and the BSD variants. 18.11 - */ 18.12 - if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) 18.13 + /* We only fast-trap vector 0x80 (used by Linux and the BSD variants). */ 18.14 + if ( idx != 0x80 ) 18.15 return -1; 18.16 18.17 ti = &p->arch.guest_context.trap_ctxt[idx];
19.1 --- a/xen/arch/x86/x86_64/mm.c Mon May 09 14:34:59 2005 +0000 19.2 +++ b/xen/arch/x86/x86_64/mm.c Mon May 09 17:50:11 2005 +0000 19.3 @@ -125,7 +125,7 @@ void __set_fixmap( 19.4 { 19.5 if ( unlikely(idx >= __end_of_fixed_addresses) ) 19.6 BUG(); 19.7 - map_pages(idle_pg_table, fix_to_virt(idx), p, PAGE_SIZE, flags); 19.8 + map_pages(idle_pg_table, __fix_to_virt(idx), p, PAGE_SIZE, flags); 19.9 } 19.10 19.11
20.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 20.2 +++ b/xen/common/bitmap.c Mon May 09 17:50:11 2005 +0000 20.3 @@ -0,0 +1,365 @@ 20.4 +/* 20.5 + * lib/bitmap.c 20.6 + * Helper functions for bitmap.h. 20.7 + * 20.8 + * This source code is licensed under the GNU General Public License, 20.9 + * Version 2. See the file COPYING for more details. 20.10 + */ 20.11 +#include <xen/config.h> 20.12 +#include <xen/types.h> 20.13 +#include <xen/errno.h> 20.14 +#include <xen/bitmap.h> 20.15 +#include <xen/bitops.h> 20.16 +#include <asm/uaccess.h> 20.17 + 20.18 +/* 20.19 + * bitmaps provide an array of bits, implemented using an an 20.20 + * array of unsigned longs. The number of valid bits in a 20.21 + * given bitmap does _not_ need to be an exact multiple of 20.22 + * BITS_PER_LONG. 20.23 + * 20.24 + * The possible unused bits in the last, partially used word 20.25 + * of a bitmap are 'don't care'. The implementation makes 20.26 + * no particular effort to keep them zero. It ensures that 20.27 + * their value will not affect the results of any operation. 20.28 + * The bitmap operations that return Boolean (bitmap_empty, 20.29 + * for example) or scalar (bitmap_weight, for example) results 20.30 + * carefully filter out these unused bits from impacting their 20.31 + * results. 20.32 + * 20.33 + * These operations actually hold to a slightly stronger rule: 20.34 + * if you don't input any bitmaps to these ops that have some 20.35 + * unused bits set, then they won't output any set unused bits 20.36 + * in output bitmaps. 20.37 + * 20.38 + * The byte ordering of bitmaps is more natural on little 20.39 + * endian architectures. See the big-endian headers 20.40 + * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h 20.41 + * for the best explanations of this ordering. 20.42 + */ 20.43 + 20.44 +int __bitmap_empty(const unsigned long *bitmap, int bits) 20.45 +{ 20.46 + int k, lim = bits/BITS_PER_LONG; 20.47 + for (k = 0; k < lim; ++k) 20.48 + if (bitmap[k]) 20.49 + return 0; 20.50 + 20.51 + if (bits % BITS_PER_LONG) 20.52 + if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) 20.53 + return 0; 20.54 + 20.55 + return 1; 20.56 +} 20.57 +EXPORT_SYMBOL(__bitmap_empty); 20.58 + 20.59 +int __bitmap_full(const unsigned long *bitmap, int bits) 20.60 +{ 20.61 + int k, lim = bits/BITS_PER_LONG; 20.62 + for (k = 0; k < lim; ++k) 20.63 + if (~bitmap[k]) 20.64 + return 0; 20.65 + 20.66 + if (bits % BITS_PER_LONG) 20.67 + if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) 20.68 + return 0; 20.69 + 20.70 + return 1; 20.71 +} 20.72 +EXPORT_SYMBOL(__bitmap_full); 20.73 + 20.74 +int __bitmap_equal(const unsigned long *bitmap1, 20.75 + const unsigned long *bitmap2, int bits) 20.76 +{ 20.77 + int k, lim = bits/BITS_PER_LONG; 20.78 + for (k = 0; k < lim; ++k) 20.79 + if (bitmap1[k] != bitmap2[k]) 20.80 + return 0; 20.81 + 20.82 + if (bits % BITS_PER_LONG) 20.83 + if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 20.84 + return 0; 20.85 + 20.86 + return 1; 20.87 +} 20.88 +EXPORT_SYMBOL(__bitmap_equal); 20.89 + 20.90 +void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits) 20.91 +{ 20.92 + int k, lim = bits/BITS_PER_LONG; 20.93 + for (k = 0; k < lim; ++k) 20.94 + dst[k] = ~src[k]; 20.95 + 20.96 + if (bits % BITS_PER_LONG) 20.97 + dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); 20.98 +} 20.99 +EXPORT_SYMBOL(__bitmap_complement); 20.100 + 20.101 +/* 20.102 + * __bitmap_shift_right - logical right shift of the bits in a bitmap 20.103 + * @dst - destination bitmap 20.104 + * @src - source bitmap 20.105 + * @nbits - shift by this many bits 20.106 + * @bits - bitmap size, in bits 20.107 + * 20.108 + * Shifting right (dividing) means moving bits in the MS -> LS bit 20.109 + * direction. Zeros are fed into the vacated MS positions and the 20.110 + * LS bits shifted off the bottom are lost. 20.111 + */ 20.112 +void __bitmap_shift_right(unsigned long *dst, 20.113 + const unsigned long *src, int shift, int bits) 20.114 +{ 20.115 + int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; 20.116 + int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; 20.117 + unsigned long mask = (1UL << left) - 1; 20.118 + for (k = 0; off + k < lim; ++k) { 20.119 + unsigned long upper, lower; 20.120 + 20.121 + /* 20.122 + * If shift is not word aligned, take lower rem bits of 20.123 + * word above and make them the top rem bits of result. 20.124 + */ 20.125 + if (!rem || off + k + 1 >= lim) 20.126 + upper = 0; 20.127 + else { 20.128 + upper = src[off + k + 1]; 20.129 + if (off + k + 1 == lim - 1 && left) 20.130 + upper &= mask; 20.131 + } 20.132 + lower = src[off + k]; 20.133 + if (left && off + k == lim - 1) 20.134 + lower &= mask; 20.135 + dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; 20.136 + if (left && k == lim - 1) 20.137 + dst[k] &= mask; 20.138 + } 20.139 + if (off) 20.140 + memset(&dst[lim - off], 0, off*sizeof(unsigned long)); 20.141 +} 20.142 +EXPORT_SYMBOL(__bitmap_shift_right); 20.143 + 20.144 + 20.145 +/* 20.146 + * __bitmap_shift_left - logical left shift of the bits in a bitmap 20.147 + * @dst - destination bitmap 20.148 + * @src - source bitmap 20.149 + * @nbits - shift by this many bits 20.150 + * @bits - bitmap size, in bits 20.151 + * 20.152 + * Shifting left (multiplying) means moving bits in the LS -> MS 20.153 + * direction. Zeros are fed into the vacated LS bit positions 20.154 + * and those MS bits shifted off the top are lost. 20.155 + */ 20.156 + 20.157 +void __bitmap_shift_left(unsigned long *dst, 20.158 + const unsigned long *src, int shift, int bits) 20.159 +{ 20.160 + int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; 20.161 + int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; 20.162 + for (k = lim - off - 1; k >= 0; --k) { 20.163 + unsigned long upper, lower; 20.164 + 20.165 + /* 20.166 + * If shift is not word aligned, take upper rem bits of 20.167 + * word below and make them the bottom rem bits of result. 20.168 + */ 20.169 + if (rem && k > 0) 20.170 + lower = src[k - 1]; 20.171 + else 20.172 + lower = 0; 20.173 + upper = src[k]; 20.174 + if (left && k == lim - 1) 20.175 + upper &= (1UL << left) - 1; 20.176 + dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; 20.177 + if (left && k + off == lim - 1) 20.178 + dst[k + off] &= (1UL << left) - 1; 20.179 + } 20.180 + if (off) 20.181 + memset(dst, 0, off*sizeof(unsigned long)); 20.182 +} 20.183 +EXPORT_SYMBOL(__bitmap_shift_left); 20.184 + 20.185 +void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 20.186 + const unsigned long *bitmap2, int bits) 20.187 +{ 20.188 + int k; 20.189 + int nr = BITS_TO_LONGS(bits); 20.190 + 20.191 + for (k = 0; k < nr; k++) 20.192 + dst[k] = bitmap1[k] & bitmap2[k]; 20.193 +} 20.194 +EXPORT_SYMBOL(__bitmap_and); 20.195 + 20.196 +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, 20.197 + const unsigned long *bitmap2, int bits) 20.198 +{ 20.199 + int k; 20.200 + int nr = BITS_TO_LONGS(bits); 20.201 + 20.202 + for (k = 0; k < nr; k++) 20.203 + dst[k] = bitmap1[k] | bitmap2[k]; 20.204 +} 20.205 +EXPORT_SYMBOL(__bitmap_or); 20.206 + 20.207 +void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, 20.208 + const unsigned long *bitmap2, int bits) 20.209 +{ 20.210 + int k; 20.211 + int nr = BITS_TO_LONGS(bits); 20.212 + 20.213 + for (k = 0; k < nr; k++) 20.214 + dst[k] = bitmap1[k] ^ bitmap2[k]; 20.215 +} 20.216 +EXPORT_SYMBOL(__bitmap_xor); 20.217 + 20.218 +void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, 20.219 + const unsigned long *bitmap2, int bits) 20.220 +{ 20.221 + int k; 20.222 + int nr = BITS_TO_LONGS(bits); 20.223 + 20.224 + for (k = 0; k < nr; k++) 20.225 + dst[k] = bitmap1[k] & ~bitmap2[k]; 20.226 +} 20.227 +EXPORT_SYMBOL(__bitmap_andnot); 20.228 + 20.229 +int __bitmap_intersects(const unsigned long *bitmap1, 20.230 + const unsigned long *bitmap2, int bits) 20.231 +{ 20.232 + int k, lim = bits/BITS_PER_LONG; 20.233 + for (k = 0; k < lim; ++k) 20.234 + if (bitmap1[k] & bitmap2[k]) 20.235 + return 1; 20.236 + 20.237 + if (bits % BITS_PER_LONG) 20.238 + if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 20.239 + return 1; 20.240 + return 0; 20.241 +} 20.242 +EXPORT_SYMBOL(__bitmap_intersects); 20.243 + 20.244 +int __bitmap_subset(const unsigned long *bitmap1, 20.245 + const unsigned long *bitmap2, int bits) 20.246 +{ 20.247 + int k, lim = bits/BITS_PER_LONG; 20.248 + for (k = 0; k < lim; ++k) 20.249 + if (bitmap1[k] & ~bitmap2[k]) 20.250 + return 0; 20.251 + 20.252 + if (bits % BITS_PER_LONG) 20.253 + if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 20.254 + return 0; 20.255 + return 1; 20.256 +} 20.257 +EXPORT_SYMBOL(__bitmap_subset); 20.258 + 20.259 +#if BITS_PER_LONG == 32 20.260 +int __bitmap_weight(const unsigned long *bitmap, int bits) 20.261 +{ 20.262 + int k, w = 0, lim = bits/BITS_PER_LONG; 20.263 + 20.264 + for (k = 0; k < lim; k++) 20.265 + w += hweight32(bitmap[k]); 20.266 + 20.267 + if (bits % BITS_PER_LONG) 20.268 + w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); 20.269 + 20.270 + return w; 20.271 +} 20.272 +#else 20.273 +int __bitmap_weight(const unsigned long *bitmap, int bits) 20.274 +{ 20.275 + int k, w = 0, lim = bits/BITS_PER_LONG; 20.276 + 20.277 + for (k = 0; k < lim; k++) 20.278 + w += hweight64(bitmap[k]); 20.279 + 20.280 + if (bits % BITS_PER_LONG) 20.281 + w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); 20.282 + 20.283 + return w; 20.284 +} 20.285 +#endif 20.286 +EXPORT_SYMBOL(__bitmap_weight); 20.287 + 20.288 +/** 20.289 + * bitmap_find_free_region - find a contiguous aligned mem region 20.290 + * @bitmap: an array of unsigned longs corresponding to the bitmap 20.291 + * @bits: number of bits in the bitmap 20.292 + * @order: region size to find (size is actually 1<<order) 20.293 + * 20.294 + * This is used to allocate a memory region from a bitmap. The idea is 20.295 + * that the region has to be 1<<order sized and 1<<order aligned (this 20.296 + * makes the search algorithm much faster). 20.297 + * 20.298 + * The region is marked as set bits in the bitmap if a free one is 20.299 + * found. 20.300 + * 20.301 + * Returns either beginning of region or negative error 20.302 + */ 20.303 +int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) 20.304 +{ 20.305 + unsigned long mask; 20.306 + int pages = 1 << order; 20.307 + int i; 20.308 + 20.309 + if(pages > BITS_PER_LONG) 20.310 + return -EINVAL; 20.311 + 20.312 + /* make a mask of the order */ 20.313 + mask = (1ul << (pages - 1)); 20.314 + mask += mask - 1; 20.315 + 20.316 + /* run up the bitmap pages bits at a time */ 20.317 + for (i = 0; i < bits; i += pages) { 20.318 + int index = i/BITS_PER_LONG; 20.319 + int offset = i - (index * BITS_PER_LONG); 20.320 + if((bitmap[index] & (mask << offset)) == 0) { 20.321 + /* set region in bimap */ 20.322 + bitmap[index] |= (mask << offset); 20.323 + return i; 20.324 + } 20.325 + } 20.326 + return -ENOMEM; 20.327 +} 20.328 +EXPORT_SYMBOL(bitmap_find_free_region); 20.329 + 20.330 +/** 20.331 + * bitmap_release_region - release allocated bitmap region 20.332 + * @bitmap: a pointer to the bitmap 20.333 + * @pos: the beginning of the region 20.334 + * @order: the order of the bits to release (number is 1<<order) 20.335 + * 20.336 + * This is the complement to __bitmap_find_free_region and releases 20.337 + * the found region (by clearing it in the bitmap). 20.338 + */ 20.339 +void bitmap_release_region(unsigned long *bitmap, int pos, int order) 20.340 +{ 20.341 + int pages = 1 << order; 20.342 + unsigned long mask = (1ul << (pages - 1)); 20.343 + int index = pos/BITS_PER_LONG; 20.344 + int offset = pos - (index * BITS_PER_LONG); 20.345 + mask += mask - 1; 20.346 + bitmap[index] &= ~(mask << offset); 20.347 +} 20.348 +EXPORT_SYMBOL(bitmap_release_region); 20.349 + 20.350 +int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) 20.351 +{ 20.352 + int pages = 1 << order; 20.353 + unsigned long mask = (1ul << (pages - 1)); 20.354 + int index = pos/BITS_PER_LONG; 20.355 + int offset = pos - (index * BITS_PER_LONG); 20.356 + 20.357 + /* We don't do regions of pages > BITS_PER_LONG. The 20.358 + * algorithm would be a simple look for multiple zeros in the 20.359 + * array, but there's no driver today that needs this. If you 20.360 + * trip this BUG(), you get to code it... */ 20.361 + BUG_ON(pages > BITS_PER_LONG); 20.362 + mask += mask - 1; 20.363 + if (bitmap[index] & (mask << offset)) 20.364 + return -EBUSY; 20.365 + bitmap[index] |= (mask << offset); 20.366 + return 0; 20.367 +} 20.368 +EXPORT_SYMBOL(bitmap_allocate_region);
21.1 --- a/xen/common/xmalloc.c Mon May 09 14:34:59 2005 +0000 21.2 +++ b/xen/common/xmalloc.c Mon May 09 17:50:11 2005 +0000 21.3 @@ -42,7 +42,7 @@ struct xmalloc_hdr 21.4 /* Total including this hdr. */ 21.5 size_t size; 21.6 struct list_head freelist; 21.7 -} __attribute__((__aligned__(SMP_CACHE_BYTES))); 21.8 +} __cacheline_aligned; 21.9 21.10 static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block) 21.11 {
22.1 --- a/xen/drivers/acpi/tables.c Mon May 09 14:34:59 2005 +0000 22.2 +++ b/xen/drivers/acpi/tables.c Mon May 09 17:50:11 2005 +0000 22.3 @@ -58,6 +58,7 @@ static char *acpi_table_signatures[ACPI_ 22.4 [ACPI_SSDT] = "SSDT", 22.5 [ACPI_SPMI] = "SPMI", 22.6 [ACPI_HPET] = "HPET", 22.7 + [ACPI_MCFG] = "MCFG", 22.8 }; 22.9 22.10 static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" }; 22.11 @@ -100,7 +101,7 @@ acpi_table_print ( 22.12 else 22.13 name = header->signature; 22.14 22.15 - printk(KERN_INFO PREFIX "%.4s (v%3.3d %6.6s %8.8s 0x%08x %.4s 0x%08x) @ 0x%p\n", 22.16 + printk(KERN_DEBUG PREFIX "%.4s (v%3.3d %6.6s %8.8s 0x%08x %.4s 0x%08x) @ 0x%p\n", 22.17 name, header->revision, header->oem_id, 22.18 header->oem_table_id, header->oem_revision, 22.19 header->asl_compiler_id, header->asl_compiler_revision, 22.20 @@ -130,7 +131,7 @@ acpi_table_print_madt_entry ( 22.21 { 22.22 struct acpi_table_ioapic *p = 22.23 (struct acpi_table_ioapic*) header; 22.24 - printk(KERN_INFO PREFIX "IOAPIC (id[0x%02x] address[0x%08x] global_irq_base[0x%x])\n", 22.25 + printk(KERN_INFO PREFIX "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n", 22.26 p->id, p->address, p->global_irq_base); 22.27 } 22.28 break; 22.29 @@ -184,8 +185,8 @@ acpi_table_print_madt_entry ( 22.30 { 22.31 struct acpi_table_iosapic *p = 22.32 (struct acpi_table_iosapic*) header; 22.33 - printk(KERN_INFO PREFIX "IOSAPIC (id[0x%x] global_irq_base[0x%x] address[%p])\n", 22.34 - p->id, p->global_irq_base, (void *) (unsigned long) p->address); 22.35 + printk(KERN_INFO PREFIX "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n", 22.36 + p->id, (void *) (unsigned long) p->address, p->global_irq_base); 22.37 } 22.38 break; 22.39 22.40 @@ -285,7 +286,7 @@ acpi_get_table_header_early ( 22.41 *header = (void *) __acpi_map_table(fadt->V1_dsdt, 22.42 sizeof(struct acpi_table_header)); 22.43 } else 22.44 - *header = 0; 22.45 + *header = NULL; 22.46 22.47 if (!*header) { 22.48 printk(KERN_WARNING PREFIX "Unable to map DSDT\n"); 22.49 @@ -302,13 +303,14 @@ acpi_table_parse_madt_family ( 22.50 enum acpi_table_id id, 22.51 unsigned long madt_size, 22.52 int entry_id, 22.53 - acpi_madt_entry_handler handler) 22.54 + acpi_madt_entry_handler handler, 22.55 + unsigned int max_entries) 22.56 { 22.57 void *madt = NULL; 22.58 - acpi_table_entry_header *entry = NULL; 22.59 - unsigned long count = 0; 22.60 - unsigned long madt_end = 0; 22.61 - unsigned int i = 0; 22.62 + acpi_table_entry_header *entry; 22.63 + unsigned int count = 0; 22.64 + unsigned long madt_end; 22.65 + unsigned int i; 22.66 22.67 if (!handler) 22.68 return -EINVAL; 22.69 @@ -341,14 +343,20 @@ acpi_table_parse_madt_family ( 22.70 entry = (acpi_table_entry_header *) 22.71 ((unsigned long) madt + madt_size); 22.72 22.73 - while (((unsigned long) entry) < madt_end) { 22.74 - if (entry->type == entry_id) { 22.75 - count++; 22.76 - handler(entry); 22.77 - } 22.78 + while (((unsigned long) entry) + sizeof(acpi_table_entry_header) < madt_end) { 22.79 + if (entry->type == entry_id && 22.80 + (!max_entries || count++ < max_entries)) 22.81 + if (handler(entry, madt_end)) 22.82 + return -EINVAL; 22.83 + 22.84 entry = (acpi_table_entry_header *) 22.85 ((unsigned long) entry + entry->length); 22.86 } 22.87 + if (max_entries && count > max_entries) { 22.88 + printk(KERN_WARNING PREFIX "[%s:0x%02x] ignored %i entries of " 22.89 + "%i found\n", acpi_table_signatures[id], entry_id, 22.90 + count - max_entries, count); 22.91 + } 22.92 22.93 return count; 22.94 } 22.95 @@ -357,10 +365,11 @@ acpi_table_parse_madt_family ( 22.96 int __init 22.97 acpi_table_parse_madt ( 22.98 enum acpi_madt_entry_id id, 22.99 - acpi_madt_entry_handler handler) 22.100 + acpi_madt_entry_handler handler, 22.101 + unsigned int max_entries) 22.102 { 22.103 return acpi_table_parse_madt_family(ACPI_APIC, sizeof(struct acpi_table_madt), 22.104 - id, handler); 22.105 + id, handler, max_entries); 22.106 } 22.107 22.108 22.109 @@ -378,8 +387,13 @@ acpi_table_parse ( 22.110 for (i = 0; i < sdt_count; i++) { 22.111 if (sdt_entry[i].id != id) 22.112 continue; 22.113 - handler(sdt_entry[i].pa, sdt_entry[i].size); 22.114 count++; 22.115 + if (count == 1) 22.116 + handler(sdt_entry[i].pa, sdt_entry[i].size); 22.117 + 22.118 + else 22.119 + printk(KERN_WARNING PREFIX "%d duplicate %s table ignored.\n", 22.120 + count, acpi_table_signatures[id]); 22.121 } 22.122 22.123 return count; 22.124 @@ -543,6 +557,14 @@ acpi_table_get_sdt ( 22.125 return 0; 22.126 } 22.127 22.128 +/* 22.129 + * acpi_table_init() 22.130 + * 22.131 + * find RSDP, find and checksum SDT/XSDT. 22.132 + * checksum all tables, print SDT/XSDT 22.133 + * 22.134 + * result: sdt_entry[] is initialized 22.135 + */ 22.136 22.137 int __init 22.138 acpi_table_init (void) 22.139 @@ -565,7 +587,7 @@ acpi_table_init (void) 22.140 return -ENODEV; 22.141 } 22.142 22.143 - printk(KERN_INFO PREFIX "RSDP (v%3.3d %6.6s ) @ 0x%p\n", 22.144 + printk(KERN_DEBUG PREFIX "RSDP (v%3.3d %6.6s ) @ 0x%p\n", 22.145 rsdp->revision, rsdp->oem_id, (void *) rsdp_phys); 22.146 22.147 if (rsdp->revision < 2) 22.148 @@ -585,4 +607,3 @@ acpi_table_init (void) 22.149 22.150 return 0; 22.151 } 22.152 -
23.1 --- a/xen/include/acpi/acconfig.h Mon May 09 14:34:59 2005 +0000 23.2 +++ b/xen/include/acpi/acconfig.h Mon May 09 17:50:11 2005 +0000 23.3 @@ -5,7 +5,7 @@ 23.4 *****************************************************************************/ 23.5 23.6 /* 23.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 23.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 23.9 * All rights reserved. 23.10 * 23.11 * Redistribution and use in source and binary forms, with or without 23.12 @@ -64,11 +64,21 @@ 23.13 23.14 /* Version string */ 23.15 23.16 -#define ACPI_CA_VERSION 0x20040116 23.17 +#define ACPI_CA_VERSION 0x20050211 23.18 + 23.19 +/* 23.20 + * OS name, used for the _OS object. The _OS object is essentially obsolete, 23.21 + * but there is a large base of ASL/AML code in existing machines that check 23.22 + * for the string below. The use of this string usually guarantees that 23.23 + * the ASL will execute down the most tested code path. Also, there is some 23.24 + * code that will not execute the _OSI method unless _OS matches the string 23.25 + * below. Therefore, change this string at your own risk. 23.26 + */ 23.27 +#define ACPI_OS_NAME "Microsoft Windows NT" 23.28 23.29 /* Maximum objects in the various object caches */ 23.30 23.31 -#define ACPI_MAX_STATE_CACHE_DEPTH 64 /* State objects for stacks */ 23.32 +#define ACPI_MAX_STATE_CACHE_DEPTH 64 /* State objects */ 23.33 #define ACPI_MAX_PARSE_CACHE_DEPTH 96 /* Parse tree objects */ 23.34 #define ACPI_MAX_EXTPARSE_CACHE_DEPTH 64 /* Parse tree objects */ 23.35 #define ACPI_MAX_OBJECT_CACHE_DEPTH 64 /* Interpreter operand objects */ 23.36 @@ -89,7 +99,7 @@ 23.37 23.38 /* Version of ACPI supported */ 23.39 23.40 -#define ACPI_CA_SUPPORT_LEVEL 2 23.41 +#define ACPI_CA_SUPPORT_LEVEL 3 23.42 23.43 /* String size constants */ 23.44 23.45 @@ -152,10 +162,11 @@ 23.46 23.47 /* Constants used in searching for the RSDP in low memory */ 23.48 23.49 -#define ACPI_LO_RSDP_WINDOW_BASE 0 /* Physical Address */ 23.50 -#define ACPI_HI_RSDP_WINDOW_BASE 0xE0000 /* Physical Address */ 23.51 -#define ACPI_LO_RSDP_WINDOW_SIZE 0x400 23.52 -#define ACPI_HI_RSDP_WINDOW_SIZE 0x20000 23.53 +#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */ 23.54 +#define ACPI_EBDA_PTR_LENGTH 2 23.55 +#define ACPI_EBDA_WINDOW_SIZE 1024 23.56 +#define ACPI_HI_RSDP_WINDOW_BASE 0x000E0000 /* Physical Address */ 23.57 +#define ACPI_HI_RSDP_WINDOW_SIZE 0x00020000 23.58 #define ACPI_RSDP_SCAN_STEP 16 23.59 23.60 /* Operation regions */ 23.61 @@ -185,6 +196,10 @@ 23.62 23.63 #define ACPI_SMBUS_BUFFER_SIZE 34 23.64 23.65 +/* Number of strings associated with the _OSI reserved method */ 23.66 + 23.67 +#define ACPI_NUM_OSI_STRINGS 9 23.68 + 23.69 23.70 /****************************************************************************** 23.71 *
24.1 --- a/xen/include/acpi/acexcep.h Mon May 09 14:34:59 2005 +0000 24.2 +++ b/xen/include/acpi/acexcep.h Mon May 09 17:50:11 2005 +0000 24.3 @@ -5,7 +5,7 @@ 24.4 *****************************************************************************/ 24.5 24.6 /* 24.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 24.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 24.9 * All rights reserved. 24.10 * 24.11 * Redistribution and use in source and binary forms, with or without 24.12 @@ -95,8 +95,9 @@ 24.13 #define AE_LOGICAL_ADDRESS (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL) 24.14 #define AE_ABORT_METHOD (acpi_status) (0x001C | AE_CODE_ENVIRONMENTAL) 24.15 #define AE_SAME_HANDLER (acpi_status) (0x001D | AE_CODE_ENVIRONMENTAL) 24.16 +#define AE_WAKE_ONLY_GPE (acpi_status) (0x001E | AE_CODE_ENVIRONMENTAL) 24.17 24.18 -#define AE_CODE_ENV_MAX 0x001D 24.19 +#define AE_CODE_ENV_MAX 0x001E 24.20 24.21 /* 24.22 * Programmer exceptions 24.23 @@ -165,7 +166,7 @@ 24.24 #define AE_AML_CIRCULAR_REFERENCE (acpi_status) (0x0020 | AE_CODE_AML) 24.25 #define AE_AML_BAD_RESOURCE_LENGTH (acpi_status) (0x0021 | AE_CODE_AML) 24.26 24.27 -#define AE_CODE_AML_MAX 0x0020 24.28 +#define AE_CODE_AML_MAX 0x0021 24.29 24.30 /* 24.31 * Internal exceptions used for control 24.32 @@ -222,7 +223,8 @@ char const *acpi_gbl_exception_names_e 24.33 "AE_NO_GLOBAL_LOCK", 24.34 "AE_LOGICAL_ADDRESS", 24.35 "AE_ABORT_METHOD", 24.36 - "AE_SAME_HANDLER" 24.37 + "AE_SAME_HANDLER", 24.38 + "AE_WAKE_ONLY_GPE" 24.39 }; 24.40 24.41 char const *acpi_gbl_exception_names_pgm[] =
25.1 --- a/xen/include/acpi/acglobal.h Mon May 09 14:34:59 2005 +0000 25.2 +++ b/xen/include/acpi/acglobal.h Mon May 09 17:50:11 2005 +0000 25.3 @@ -5,7 +5,7 @@ 25.4 *****************************************************************************/ 25.5 25.6 /* 25.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 25.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 25.9 * All rights reserved. 25.10 * 25.11 * Redistribution and use in source and binary forms, with or without 25.12 @@ -46,17 +46,26 @@ 25.13 25.14 25.15 /* 25.16 - * Ensure that the globals are actually defined only once. 25.17 + * Ensure that the globals are actually defined and initialized only once. 25.18 * 25.19 - * The use of these defines allows a single list of globals (here) in order 25.20 + * The use of these macros allows a single list of globals (here) in order 25.21 * to simplify maintenance of the code. 25.22 */ 25.23 #ifdef DEFINE_ACPI_GLOBALS 25.24 #define ACPI_EXTERN 25.25 +#define ACPI_INIT_GLOBAL(a,b) a=b 25.26 #else 25.27 #define ACPI_EXTERN extern 25.28 +#define ACPI_INIT_GLOBAL(a,b) a 25.29 #endif 25.30 25.31 +/* 25.32 + * Keep local copies of these FADT-based registers. NOTE: These globals 25.33 + * are first in this file for alignment reasons on 64-bit systems. 25.34 + */ 25.35 +ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable; 25.36 +ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable; 25.37 + 25.38 25.39 /***************************************************************************** 25.40 * 25.41 @@ -76,6 +85,46 @@ extern u32 25.42 25.43 /***************************************************************************** 25.44 * 25.45 + * Runtime configuration (static defaults that can be overriden at runtime) 25.46 + * 25.47 + ****************************************************************************/ 25.48 + 25.49 +/* 25.50 + * Enable "slack" in the AML interpreter? Default is FALSE, and the 25.51 + * interpreter strictly follows the ACPI specification. Setting to TRUE 25.52 + * allows the interpreter to forgive certain bad AML constructs. Currently: 25.53 + * 1) Allow "implicit return" of last value in a control method 25.54 + * 2) Allow access beyond end of operation region 25.55 + * 3) Allow access to uninitialized locals/args (auto-init to integer 0) 25.56 + * 4) Allow ANY object type to be a source operand for the Store() operator 25.57 + */ 25.58 +ACPI_EXTERN u8 ACPI_INIT_GLOBAL (acpi_gbl_enable_interpreter_slack, FALSE); 25.59 + 25.60 +/* 25.61 + * Automatically serialize ALL control methods? Default is FALSE, meaning 25.62 + * to use the Serialized/not_serialized method flags on a per method basis. 25.63 + * Only change this if the ASL code is poorly written and cannot handle 25.64 + * reentrancy even though methods are marked "not_serialized". 25.65 + */ 25.66 +ACPI_EXTERN u8 ACPI_INIT_GLOBAL (acpi_gbl_all_methods_serialized, FALSE); 25.67 + 25.68 +/* 25.69 + * Create the predefined _OSI method in the namespace? Default is TRUE 25.70 + * because ACPI CA is fully compatible with other ACPI implementations. 25.71 + * Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior. 25.72 + */ 25.73 +ACPI_EXTERN u8 ACPI_INIT_GLOBAL (acpi_gbl_create_osi_method, TRUE); 25.74 + 25.75 +/* 25.76 + * Disable wakeup GPEs during runtime? Default is TRUE because WAKE and 25.77 + * RUNTIME GPEs should never be shared, and WAKE GPEs should typically only 25.78 + * be enabled just before going to sleep. 25.79 + */ 25.80 +ACPI_EXTERN u8 ACPI_INIT_GLOBAL (acpi_gbl_leave_wake_gpes_disabled, TRUE); 25.81 + 25.82 + 25.83 +/***************************************************************************** 25.84 + * 25.85 * ACPI Table globals 25.86 * 25.87 ****************************************************************************/ 25.88 @@ -87,7 +136,6 @@ extern u32 25.89 * 25.90 * These tables are single-table only; meaning that there can be at most one 25.91 * of each in the system. Each global points to the actual table. 25.92 - * 25.93 */ 25.94 ACPI_EXTERN u32 acpi_gbl_table_flags; 25.95 ACPI_EXTERN u32 acpi_gbl_rsdt_table_count; 25.96 @@ -97,6 +145,11 @@ ACPI_EXTERN FADT_DESCRIPTOR * 25.97 ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT; 25.98 ACPI_EXTERN FACS_DESCRIPTOR *acpi_gbl_FACS; 25.99 ACPI_EXTERN struct acpi_common_facs acpi_gbl_common_fACS; 25.100 +/* 25.101 + * Since there may be multiple SSDTs and PSDTS, a single pointer is not 25.102 + * sufficient; Therefore, there isn't one! 25.103 + */ 25.104 + 25.105 25.106 /* 25.107 * Handle both ACPI 1.0 and ACPI 2.0 Integer widths 25.108 @@ -107,17 +160,6 @@ ACPI_EXTERN u8 25.109 ACPI_EXTERN u8 acpi_gbl_integer_byte_width; 25.110 ACPI_EXTERN u8 acpi_gbl_integer_nybble_width; 25.111 25.112 -/* Keep local copies of these FADT-based registers */ 25.113 - 25.114 -ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable; 25.115 -ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable; 25.116 - 25.117 -/* 25.118 - * Since there may be multiple SSDTs and PSDTS, a single pointer is not 25.119 - * sufficient; Therefore, there isn't one! 25.120 - */ 25.121 - 25.122 - 25.123 /* 25.124 * ACPI Table info arrays 25.125 */ 25.126 @@ -142,6 +184,7 @@ ACPI_EXTERN struct acpi_mutex_info 25.127 ACPI_EXTERN struct acpi_memory_list acpi_gbl_memory_lists[ACPI_NUM_MEM_LISTS]; 25.128 ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify; 25.129 ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify; 25.130 +ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler; 25.131 ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler; 25.132 ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk; 25.133 ACPI_EXTERN acpi_handle acpi_gbl_global_lock_semaphore; 25.134 @@ -161,13 +204,16 @@ ACPI_EXTERN u8 25.135 ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present; 25.136 ACPI_EXTERN u8 acpi_gbl_global_lock_present; 25.137 ACPI_EXTERN u8 acpi_gbl_events_initialized; 25.138 +ACPI_EXTERN u8 acpi_gbl_system_awake_and_running; 25.139 25.140 extern u8 acpi_gbl_shutdown; 25.141 extern u32 acpi_gbl_startup_flags; 25.142 extern const u8 acpi_gbl_decode_to8bit[8]; 25.143 -extern const char *acpi_gbl_db_sleep_states[ACPI_S_STATE_COUNT]; 25.144 +extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT]; 25.145 +extern const char *acpi_gbl_highest_dstate_names[4]; 25.146 extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES]; 25.147 extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS]; 25.148 +extern const char *acpi_gbl_valid_osi_strings[ACPI_NUM_OSI_STRINGS]; 25.149 25.150 25.151 /***************************************************************************** 25.152 @@ -178,7 +224,7 @@ extern const char 25.153 25.154 #define NUM_NS_TYPES ACPI_TYPE_INVALID+1 25.155 25.156 -#if defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY) 25.157 +#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY) 25.158 #define NUM_PREDEFINED_NAMES 10 25.159 #else 25.160 #define NUM_PREDEFINED_NAMES 9 25.161 @@ -186,6 +232,7 @@ extern const char 25.162 25.163 ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct; 25.164 ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node; 25.165 +ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_fadt_gpe_device; 25.166 25.167 extern const u8 acpi_gbl_ns_properties[NUM_NS_TYPES]; 25.168 extern const struct acpi_predefined_names acpi_gbl_pre_defined_names [NUM_PREDEFINED_NAMES];
26.1 --- a/xen/include/acpi/achware.h Mon May 09 14:34:59 2005 +0000 26.2 +++ b/xen/include/acpi/achware.h Mon May 09 17:50:11 2005 +0000 26.3 @@ -5,7 +5,7 @@ 26.4 *****************************************************************************/ 26.5 26.6 /* 26.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 26.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 26.9 * All rights reserved. 26.10 * 26.11 * Redistribution and use in source and binary forms, with or without 26.12 @@ -114,15 +114,7 @@ acpi_hw_clear_acpi_status ( 26.13 /* GPE support */ 26.14 26.15 acpi_status 26.16 -acpi_hw_enable_gpe ( 26.17 - struct acpi_gpe_event_info *gpe_event_info); 26.18 - 26.19 -void 26.20 -acpi_hw_enable_gpe_for_wakeup ( 26.21 - struct acpi_gpe_event_info *gpe_event_info); 26.22 - 26.23 -acpi_status 26.24 -acpi_hw_disable_gpe ( 26.25 +acpi_hw_write_gpe_enable_reg ( 26.26 struct acpi_gpe_event_info *gpe_event_info); 26.27 26.28 acpi_status 26.29 @@ -130,10 +122,6 @@ acpi_hw_disable_gpe_block ( 26.30 struct acpi_gpe_xrupt_info *gpe_xrupt_info, 26.31 struct acpi_gpe_block_info *gpe_block); 26.32 26.33 -void 26.34 -acpi_hw_disable_gpe_for_wakeup ( 26.35 - struct acpi_gpe_event_info *gpe_event_info); 26.36 - 26.37 acpi_status 26.38 acpi_hw_clear_gpe ( 26.39 struct acpi_gpe_event_info *gpe_event_info); 26.40 @@ -143,22 +131,39 @@ acpi_hw_clear_gpe_block ( 26.41 struct acpi_gpe_xrupt_info *gpe_xrupt_info, 26.42 struct acpi_gpe_block_info *gpe_block); 26.43 26.44 +#ifdef ACPI_FUTURE_USAGE 26.45 acpi_status 26.46 acpi_hw_get_gpe_status ( 26.47 struct acpi_gpe_event_info *gpe_event_info, 26.48 acpi_event_status *event_status); 26.49 +#endif 26.50 + 26.51 +acpi_status 26.52 +acpi_hw_disable_all_gpes ( 26.53 + u32 flags); 26.54 + 26.55 +acpi_status 26.56 +acpi_hw_enable_all_runtime_gpes ( 26.57 + u32 flags); 26.58 26.59 acpi_status 26.60 -acpi_hw_disable_non_wakeup_gpes ( 26.61 - void); 26.62 +acpi_hw_enable_all_wakeup_gpes ( 26.63 + u32 flags); 26.64 26.65 acpi_status 26.66 -acpi_hw_enable_non_wakeup_gpes ( 26.67 - void); 26.68 +acpi_hw_enable_runtime_gpe_block ( 26.69 + struct acpi_gpe_xrupt_info *gpe_xrupt_info, 26.70 + struct acpi_gpe_block_info *gpe_block); 26.71 + 26.72 +acpi_status 26.73 +acpi_hw_enable_wakeup_gpe_block ( 26.74 + struct acpi_gpe_xrupt_info *gpe_xrupt_info, 26.75 + struct acpi_gpe_block_info *gpe_block); 26.76 26.77 26.78 /* ACPI Timer prototypes */ 26.79 26.80 +#ifdef ACPI_FUTURE_USAGE 26.81 acpi_status 26.82 acpi_get_timer_resolution ( 26.83 u32 *resolution); 26.84 @@ -172,6 +177,6 @@ acpi_get_timer_duration ( 26.85 u32 start_ticks, 26.86 u32 end_ticks, 26.87 u32 *time_elapsed); 26.88 - 26.89 +#endif /* ACPI_FUTURE_USAGE */ 26.90 26.91 #endif /* __ACHWARE_H__ */
27.1 --- a/xen/include/acpi/aclocal.h Mon May 09 14:34:59 2005 +0000 27.2 +++ b/xen/include/acpi/aclocal.h Mon May 09 17:50:11 2005 +0000 27.3 @@ -5,7 +5,7 @@ 27.4 *****************************************************************************/ 27.5 27.6 /* 27.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 27.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 27.9 * All rights reserved. 27.10 * 27.11 * Redistribution and use in source and binary forms, with or without 27.12 @@ -53,7 +53,7 @@ typedef u32 27.13 27.14 /* Total number of aml opcodes defined */ 27.15 27.16 -#define AML_NUM_OPCODES 0x7E 27.17 +#define AML_NUM_OPCODES 0x7F 27.18 27.19 27.20 /***************************************************************************** 27.21 @@ -189,8 +189,6 @@ struct acpi_namespace_node 27.22 u8 type; /* Type associated with this name */ 27.23 u16 owner_id; 27.24 union acpi_name_union name; /* ACPI Name, always 4 chars per ACPI spec */ 27.25 - 27.26 - 27.27 union acpi_operand_object *object; /* Pointer to attached ACPI object (optional) */ 27.28 struct acpi_namespace_node *child; /* First child */ 27.29 struct acpi_namespace_node *peer; /* Next peer*/ 27.30 @@ -211,10 +209,8 @@ struct acpi_namespace_node 27.31 #define ANOBJ_METHOD_LOCAL 0x10 27.32 #define ANOBJ_METHOD_NO_RETVAL 0x20 27.33 #define ANOBJ_METHOD_SOME_NO_RETVAL 0x40 27.34 - 27.35 #define ANOBJ_IS_BIT_OFFSET 0x80 27.36 27.37 - 27.38 /* 27.39 * ACPI Table Descriptor. One per ACPI table 27.40 */ 27.41 @@ -309,16 +305,31 @@ struct acpi_create_field_info 27.42 * 27.43 ****************************************************************************/ 27.44 27.45 -/* Information about a GPE, one per each GPE in an array */ 27.46 +/* Dispatch info for each GPE -- either a method or handler, cannot be both */ 27.47 27.48 -struct acpi_gpe_event_info 27.49 +struct acpi_handler_info 27.50 +{ 27.51 + acpi_event_handler address; /* Address of handler, if any */ 27.52 + void *context; /* Context to be passed to handler */ 27.53 + struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ 27.54 +}; 27.55 + 27.56 +union acpi_gpe_dispatch_info 27.57 { 27.58 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 27.59 - acpi_gpe_handler handler; /* Address of handler, if any */ 27.60 - void *context; /* Context to be passed to handler */ 27.61 + struct acpi_handler_info *handler; 27.62 +}; 27.63 + 27.64 +/* 27.65 + * Information about a GPE, one per each GPE in an array. 27.66 + * NOTE: Important to keep this struct as small as possible. 27.67 + */ 27.68 +struct acpi_gpe_event_info 27.69 +{ 27.70 + union acpi_gpe_dispatch_info dispatch; /* Either Method or Handler */ 27.71 struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ 27.72 - u8 flags; /* Level or Edge */ 27.73 - u8 bit_mask; /* This GPE within the register */ 27.74 + u8 flags; /* Misc info about this GPE */ 27.75 + u8 register_bit; /* This GPE bit within the register */ 27.76 }; 27.77 27.78 /* Information about a GPE register pair, one per each status/enable pair in an array */ 27.79 @@ -327,9 +338,8 @@ struct acpi_gpe_register_info 27.80 { 27.81 struct acpi_generic_address status_address; /* Address of status reg */ 27.82 struct acpi_generic_address enable_address; /* Address of enable reg */ 27.83 - u8 status; /* Current value of status reg */ 27.84 - u8 enable; /* Current value of enable reg */ 27.85 - u8 wake_enable; /* Mask of bits to keep enabled when sleeping */ 27.86 + u8 enable_for_wake; /* GPEs to keep enabled when sleeping */ 27.87 + u8 enable_for_run; /* GPEs to keep enabled when running */ 27.88 u8 base_gpe_number; /* Base GPE number for this register */ 27.89 }; 27.90 27.91 @@ -339,6 +349,7 @@ struct acpi_gpe_register_info 27.92 */ 27.93 struct acpi_gpe_block_info 27.94 { 27.95 + struct acpi_namespace_node *node; 27.96 struct acpi_gpe_block_info *previous; 27.97 struct acpi_gpe_block_info *next; 27.98 struct acpi_gpe_xrupt_info *xrupt_block; /* Backpointer to interrupt block */ 27.99 @@ -360,6 +371,13 @@ struct acpi_gpe_xrupt_info 27.100 }; 27.101 27.102 27.103 +struct acpi_gpe_walk_info 27.104 +{ 27.105 + struct acpi_namespace_node *gpe_device; 27.106 + struct acpi_gpe_block_info *gpe_block; 27.107 +}; 27.108 + 27.109 + 27.110 typedef acpi_status (*ACPI_GPE_CALLBACK) ( 27.111 struct acpi_gpe_xrupt_info *gpe_xrupt_info, 27.112 struct acpi_gpe_block_info *gpe_block); 27.113 @@ -495,7 +513,7 @@ struct acpi_thread_state 27.114 struct acpi_walk_state *walk_state_list; /* Head of list of walk_states for this thread */ 27.115 union acpi_operand_object *acquired_mutex_list; /* List of all currently acquired mutexes */ 27.116 u32 thread_id; /* Running thread ID */ 27.117 - u16 current_sync_level; /* Mutex Sync (nested acquire) level */ 27.118 + u8 current_sync_level; /* Mutex Sync (nested acquire) level */ 27.119 }; 27.120 27.121 27.122 @@ -544,7 +562,7 @@ union acpi_generic_state 27.123 struct acpi_scope_state scope; 27.124 struct acpi_pscope_state parse_scope; 27.125 struct acpi_pkg_state pkg; 27.126 - struct acpi_thread_state acpi_thread; 27.127 + struct acpi_thread_state thread; 27.128 struct acpi_result_values results; 27.129 struct acpi_notify_info notify; 27.130 }; 27.131 @@ -844,7 +862,6 @@ struct acpi_bit_register_info 27.132 /* 27.133 * Large resource descriptor types 27.134 */ 27.135 - 27.136 #define ACPI_RDESC_TYPE_MEMORY_24 0x81 27.137 #define ACPI_RDESC_TYPE_GENERAL_REGISTER 0x82 27.138 #define ACPI_RDESC_TYPE_LARGE_VENDOR 0x84 27.139 @@ -854,6 +871,7 @@ struct acpi_bit_register_info 27.140 #define ACPI_RDESC_TYPE_WORD_ADDRESS_SPACE 0x88 27.141 #define ACPI_RDESC_TYPE_EXTENDED_XRUPT 0x89 27.142 #define ACPI_RDESC_TYPE_QWORD_ADDRESS_SPACE 0x8A 27.143 +#define ACPI_RDESC_TYPE_EXTENDED_ADDRESS_SPACE 0x8B 27.144 27.145 27.146 /*****************************************************************************
28.1 --- a/xen/include/acpi/acmacros.h Mon May 09 14:34:59 2005 +0000 28.2 +++ b/xen/include/acpi/acmacros.h Mon May 09 17:50:11 2005 +0000 28.3 @@ -5,7 +5,7 @@ 28.4 *****************************************************************************/ 28.5 28.6 /* 28.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 28.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 28.9 * All rights reserved. 28.10 * 28.11 * Redistribution and use in source and binary forms, with or without 28.12 @@ -53,6 +53,10 @@ 28.13 #define ACPI_LOBYTE(l) ((u8)(u16)(l)) 28.14 #define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF)) 28.15 28.16 +#define ACPI_SET_BIT(target,bit) ((target) |= (bit)) 28.17 +#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) 28.18 +#define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) 28.19 + 28.20 28.21 #if ACPI_MACHINE_WIDTH == 16 28.22 28.23 @@ -97,7 +101,7 @@ 28.24 * printf() format helpers 28.25 */ 28.26 28.27 -/* Split 64-bit integer into two 32-bit values. use with %8,8_x%8.8X */ 28.28 +/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */ 28.29 28.30 #define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i),ACPI_LODWORD(i) 28.31 28.32 @@ -361,24 +365,6 @@ 28.33 28.34 #define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7')) 28.35 28.36 -/* Macros for GAS addressing */ 28.37 - 28.38 -#if ACPI_MACHINE_WIDTH != 16 28.39 - 28.40 -#define ACPI_PCI_DEVICE(a) (u16) ((ACPI_HIDWORD ((a))) & 0x0000FFFF) 28.41 -#define ACPI_PCI_FUNCTION(a) (u16) ((ACPI_LODWORD ((a))) >> 16) 28.42 -#define ACPI_PCI_REGISTER(a) (u16) ((ACPI_LODWORD ((a))) & 0x0000FFFF) 28.43 - 28.44 -#else 28.45 - 28.46 -/* No support for GAS and PCI IDs in 16-bit mode */ 28.47 - 28.48 -#define ACPI_PCI_FUNCTION(a) (u16) ((a) & 0xFFFF0000) 28.49 -#define ACPI_PCI_DEVICE(a) (u16) ((a) & 0x0000FFFF) 28.50 -#define ACPI_PCI_REGISTER(a) (u16) ((a) & 0x0000FFFF) 28.51 - 28.52 -#endif 28.53 - 28.54 28.55 /* Bitfields within ACPI registers */ 28.56 28.57 @@ -502,19 +488,19 @@ 28.58 * The first parameter should be the procedure name as a quoted string. This is declared 28.59 * as a local string ("_proc_name) so that it can be also used by the function exit macros below. 28.60 */ 28.61 -#define ACPI_FUNCTION_NAME(a) struct acpi_debug_print_info _dbg; \ 28.62 - _dbg.component_id = _COMPONENT; \ 28.63 - _dbg.proc_name = a; \ 28.64 - _dbg.module_name = _THIS_MODULE; 28.65 +#define ACPI_FUNCTION_NAME(a) struct acpi_debug_print_info _debug_info; \ 28.66 + _debug_info.component_id = _COMPONENT; \ 28.67 + _debug_info.proc_name = a; \ 28.68 + _debug_info.module_name = _THIS_MODULE; 28.69 28.70 #define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \ 28.71 - acpi_ut_trace(__LINE__,&_dbg) 28.72 + acpi_ut_trace(__LINE__,&_debug_info) 28.73 #define ACPI_FUNCTION_TRACE_PTR(a,b) ACPI_FUNCTION_NAME(a) \ 28.74 - acpi_ut_trace_ptr(__LINE__,&_dbg,(void *)b) 28.75 + acpi_ut_trace_ptr(__LINE__,&_debug_info,(void *)b) 28.76 #define ACPI_FUNCTION_TRACE_U32(a,b) ACPI_FUNCTION_NAME(a) \ 28.77 - acpi_ut_trace_u32(__LINE__,&_dbg,(u32)b) 28.78 + acpi_ut_trace_u32(__LINE__,&_debug_info,(u32)b) 28.79 #define ACPI_FUNCTION_TRACE_STR(a,b) ACPI_FUNCTION_NAME(a) \ 28.80 - acpi_ut_trace_str(__LINE__,&_dbg,(char *)b) 28.81 + acpi_ut_trace_str(__LINE__,&_debug_info,(char *)b) 28.82 28.83 #define ACPI_FUNCTION_ENTRY() acpi_ut_track_stack_ptr() 28.84 28.85 @@ -531,10 +517,10 @@ 28.86 #define ACPI_DO_WHILE0(a) a 28.87 #endif 28.88 28.89 -#define return_VOID ACPI_DO_WHILE0 ({acpi_ut_exit(__LINE__,&_dbg);return;}) 28.90 -#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({acpi_ut_status_exit(__LINE__,&_dbg,(s));return((s));}) 28.91 -#define return_VALUE(s) ACPI_DO_WHILE0 ({acpi_ut_value_exit(__LINE__,&_dbg,(acpi_integer)(s));return((s));}) 28.92 -#define return_PTR(s) ACPI_DO_WHILE0 ({acpi_ut_ptr_exit(__LINE__,&_dbg,(u8 *)(s));return((s));}) 28.93 +#define return_VOID ACPI_DO_WHILE0 ({acpi_ut_exit(__LINE__,&_debug_info);return;}) 28.94 +#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({acpi_ut_status_exit(__LINE__,&_debug_info,(s));return((s));}) 28.95 +#define return_VALUE(s) ACPI_DO_WHILE0 ({acpi_ut_value_exit(__LINE__,&_debug_info,(acpi_integer)(s));return((s));}) 28.96 +#define return_PTR(s) ACPI_DO_WHILE0 ({acpi_ut_ptr_exit(__LINE__,&_debug_info,(u8 *)(s));return((s));}) 28.97 28.98 /* Conditional execution */ 28.99 28.100 @@ -548,12 +534,16 @@ 28.101 28.102 /* Stack and buffer dumping */ 28.103 28.104 -#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand(a) 28.105 +#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a),0) 28.106 #define ACPI_DUMP_OPERANDS(a,b,c,d,e) acpi_ex_dump_operands(a,b,c,d,e,_THIS_MODULE,__LINE__) 28.107 28.108 28.109 #define ACPI_DUMP_ENTRY(a,b) acpi_ns_dump_entry (a,b) 28.110 + 28.111 +#ifdef ACPI_FUTURE_USAGE 28.112 #define ACPI_DUMP_TABLES(a,b) acpi_ns_dump_tables(a,b) 28.113 +#endif 28.114 + 28.115 #define ACPI_DUMP_PATHNAME(a,b,c,d) acpi_ns_dump_pathname(a,b,c,d) 28.116 #define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a) 28.117 #define ACPI_DUMP_BUFFER(a,b) acpi_ut_dump_buffer((u8 *)a,b,DB_BYTE_DISPLAY,_COMPONENT) 28.118 @@ -606,7 +596,11 @@ 28.119 #define ACPI_DUMP_STACK_ENTRY(a) 28.120 #define ACPI_DUMP_OPERANDS(a,b,c,d,e) 28.121 #define ACPI_DUMP_ENTRY(a,b) 28.122 + 28.123 +#ifdef ACPI_FUTURE_USAGE 28.124 #define ACPI_DUMP_TABLES(a,b) 28.125 +#endif 28.126 + 28.127 #define ACPI_DUMP_PATHNAME(a,b,c,d) 28.128 #define ACPI_DUMP_RESOURCE_LIST(a) 28.129 #define ACPI_DUMP_BUFFER(a,b) 28.130 @@ -681,7 +675,4 @@ 28.131 28.132 #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ 28.133 28.134 - 28.135 -#define ACPI_GET_STACK_POINTER _asm {mov eax, ebx} 28.136 - 28.137 #endif /* ACMACROS_H */
29.1 --- a/xen/include/acpi/acobject.h Mon May 09 14:34:59 2005 +0000 29.2 +++ b/xen/include/acpi/acobject.h Mon May 09 17:50:11 2005 +0000 29.3 @@ -6,7 +6,7 @@ 29.4 *****************************************************************************/ 29.5 29.6 /* 29.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 29.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 29.9 * All rights reserved. 29.10 * 29.11 * Redistribution and use in source and binary forms, with or without 29.12 @@ -94,9 +94,7 @@ 29.13 u32 bit_length; /* Length of field in bits */\ 29.14 u32 base_byte_offset; /* Byte offset within containing object */\ 29.15 u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\ 29.16 - u8 datum_valid_bits; /* Valid bit in first "Field datum" */\ 29.17 - u8 end_field_valid_bits; /* Valid bits in the last "field datum" */\ 29.18 - u8 end_buffer_valid_bits; /* Valid bits in the last "buffer datum" */\ 29.19 + u8 access_bit_width; /* Read/Write size in bits (8-64) */\ 29.20 u32 value; /* Value to store into the Bank or Index register */\ 29.21 struct acpi_namespace_node *node; /* Link back to parent node */ 29.22 29.23 @@ -135,7 +133,10 @@ struct acpi_object_integer 29.24 acpi_integer value; 29.25 }; 29.26 29.27 - 29.28 +/* 29.29 + * Note: The String and Buffer object must be identical through the Pointer 29.30 + * element. There is code that depends on this. 29.31 + */ 29.32 struct acpi_object_string /* Null terminated, ASCII characters only */ 29.33 { 29.34 ACPI_OBJECT_COMMON_HEADER 29.35 @@ -180,7 +181,11 @@ struct acpi_object_event 29.36 }; 29.37 29.38 29.39 -#define INFINITE_CONCURRENCY 0xFF 29.40 +#define ACPI_INFINITE_CONCURRENCY 0xFF 29.41 + 29.42 +typedef 29.43 +acpi_status (*ACPI_INTERNAL_METHOD) ( 29.44 + struct acpi_walk_state *walk_state); 29.45 29.46 struct acpi_object_method 29.47 { 29.48 @@ -190,6 +195,7 @@ struct acpi_object_method 29.49 u32 aml_length; 29.50 void *semaphore; 29.51 u8 *aml_start; 29.52 + ACPI_INTERNAL_METHOD implementation; 29.53 u8 concurrency; 29.54 u8 thread_count; 29.55 acpi_owner_id owning_id; 29.56 @@ -199,13 +205,14 @@ struct acpi_object_method 29.57 struct acpi_object_mutex 29.58 { 29.59 ACPI_OBJECT_COMMON_HEADER 29.60 - u16 sync_level; 29.61 - u16 acquisition_depth; 29.62 - struct acpi_thread_state *owner_thread; 29.63 - void *semaphore; 29.64 + u8 sync_level; /* 0-15, specified in Mutex() call */ 29.65 + u16 acquisition_depth; /* Allow multiple Acquires, same thread */ 29.66 + struct acpi_thread_state *owner_thread; /* Current owner of the mutex */ 29.67 + void *semaphore; /* Actual OS synchronization object */ 29.68 union acpi_operand_object *prev; /* Link for list of acquired mutexes */ 29.69 union acpi_operand_object *next; /* Link for list of acquired mutexes */ 29.70 - struct acpi_namespace_node *node; /* containing object */ 29.71 + struct acpi_namespace_node *node; /* Containing namespace node */ 29.72 + u8 original_sync_level; /* Owner's original sync level (0-15) */ 29.73 }; 29.74 29.75 29.76 @@ -215,7 +222,7 @@ struct acpi_object_region 29.77 29.78 u8 space_id; 29.79 union acpi_operand_object *handler; /* Handler for region access */ 29.80 - struct acpi_namespace_node *node; /* containing object */ 29.81 + struct acpi_namespace_node *node; /* Containing namespace node */ 29.82 union acpi_operand_object *next; 29.83 u32 length; 29.84 acpi_physical_address address;
30.1 --- a/xen/include/acpi/acoutput.h Mon May 09 14:34:59 2005 +0000 30.2 +++ b/xen/include/acpi/acoutput.h Mon May 09 17:50:11 2005 +0000 30.3 @@ -5,7 +5,7 @@ 30.4 *****************************************************************************/ 30.5 30.6 /* 30.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 30.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 30.9 * All rights reserved. 30.10 * 30.11 * Redistribution and use in source and binary forms, with or without 30.12 @@ -136,7 +136,7 @@ 30.13 /* 30.14 * Debug level macros that are used in the DEBUG_PRINT macros 30.15 */ 30.16 -#define ACPI_DEBUG_LEVEL(dl) (u32) dl,__LINE__,&_dbg 30.17 +#define ACPI_DEBUG_LEVEL(dl) (u32) dl,__LINE__,&_debug_info 30.18 30.19 /* Exception level -- used in the global "debug_level" */ 30.20
31.1 --- a/xen/include/acpi/acpi.h Mon May 09 14:34:59 2005 +0000 31.2 +++ b/xen/include/acpi/acpi.h Mon May 09 17:50:11 2005 +0000 31.3 @@ -5,7 +5,7 @@ 31.4 *****************************************************************************/ 31.5 31.6 /* 31.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 31.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 31.9 * All rights reserved. 31.10 * 31.11 * Redistribution and use in source and binary forms, with or without
32.1 --- a/xen/include/acpi/acpi_bus.h Mon May 09 14:34:59 2005 +0000 32.2 +++ b/xen/include/acpi/acpi_bus.h Mon May 09 17:50:11 2005 +0000 32.3 @@ -26,15 +26,9 @@ 32.4 #ifndef __ACPI_BUS_H__ 32.5 #define __ACPI_BUS_H__ 32.6 32.7 -#if 0 32.8 -#include <xen/version.h> 32.9 -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,4)) 32.10 -#include <xen/device.h> 32.11 -#define CONFIG_LDM 32.12 -#endif 32.13 -#endif /* 0 */ 32.14 +#include <acpi/acpi.h> 32.15 32.16 -#include <acpi/acpi.h> 32.17 +#define PREFIX "ACPI: " 32.18 32.19 /* TBD: Make dynamic */ 32.20 #define ACPI_MAX_HANDLES 10 32.21 @@ -66,10 +60,10 @@ acpi_evaluate_reference ( 32.22 32.23 #ifdef CONFIG_ACPI_BUS 32.24 32.25 -/*#include <xen/proc_fs.h>*/ 32.26 +#include <linux/proc_fs.h> 32.27 32.28 #define ACPI_BUS_FILE_ROOT "acpi" 32.29 -/*extern struct proc_dir_entry *acpi_root_dir;*/ 32.30 +extern struct proc_dir_entry *acpi_root_dir; 32.31 extern FADT_DESCRIPTOR acpi_fadt; 32.32 32.33 enum acpi_bus_removal_type { 32.34 @@ -108,6 +102,9 @@ typedef int (*acpi_op_suspend) (struct a 32.35 typedef int (*acpi_op_resume) (struct acpi_device *device, int state); 32.36 typedef int (*acpi_op_scan) (struct acpi_device *device); 32.37 typedef int (*acpi_op_bind) (struct acpi_device *device); 32.38 +typedef int (*acpi_op_unbind) (struct acpi_device *device); 32.39 +typedef int (*acpi_op_match) (struct acpi_device *device, 32.40 + struct acpi_driver *driver); 32.41 32.42 struct acpi_device_ops { 32.43 acpi_op_add add; 32.44 @@ -119,13 +116,15 @@ struct acpi_device_ops { 32.45 acpi_op_resume resume; 32.46 acpi_op_scan scan; 32.47 acpi_op_bind bind; 32.48 + acpi_op_unbind unbind; 32.49 + acpi_op_match match; 32.50 }; 32.51 32.52 struct acpi_driver { 32.53 struct list_head node; 32.54 char name[80]; 32.55 char class[80]; 32.56 - int references; 32.57 + atomic_t references; 32.58 char *ids; /* Supported Hardware IDs */ 32.59 struct acpi_device_ops ops; 32.60 }; 32.61 @@ -161,7 +160,8 @@ struct acpi_device_flags { 32.62 u32 suprise_removal_ok:1; 32.63 u32 power_manageable:1; 32.64 u32 performance_manageable:1; 32.65 - u32 reserved:21; 32.66 + u32 wake_capable:1; /* Wakeup(_PRW) supported? */ 32.67 + u32 reserved:20; 32.68 }; 32.69 32.70 32.71 @@ -207,10 +207,8 @@ struct acpi_device_power_flags { 32.72 u32 explicit_get:1; /* _PSC present? */ 32.73 u32 power_resources:1; /* Power resources */ 32.74 u32 inrush_current:1; /* Serialize Dx->D0 */ 32.75 - u32 wake_capable:1; /* Wakeup supported? */ 32.76 - u32 wake_enabled:1; /* Enabled for wakeup */ 32.77 u32 power_removed:1; /* Optimize Dx->D0 */ 32.78 - u32 reserved:26; 32.79 + u32 reserved:28; 32.80 }; 32.81 32.82 struct acpi_device_power_state { 32.83 @@ -254,6 +252,25 @@ struct acpi_device_perf { 32.84 struct acpi_device_perf_state *states; 32.85 }; 32.86 32.87 +/* Wakeup Management */ 32.88 +struct acpi_device_wakeup_flags { 32.89 + u8 valid:1; /* Can successfully enable wakeup? */ 32.90 + u8 run_wake:1; /* Run-Wake GPE devices */ 32.91 +}; 32.92 + 32.93 +struct acpi_device_wakeup_state { 32.94 + u8 enabled:1; 32.95 + u8 active:1; 32.96 +}; 32.97 + 32.98 +struct acpi_device_wakeup { 32.99 + acpi_handle gpe_device; 32.100 + acpi_integer gpe_number;; 32.101 + acpi_integer sleep_state; 32.102 + struct acpi_handle_list resources; 32.103 + struct acpi_device_wakeup_state state; 32.104 + struct acpi_device_wakeup_flags flags; 32.105 +}; 32.106 32.107 /* Device */ 32.108 32.109 @@ -262,18 +279,19 @@ struct acpi_device { 32.110 struct acpi_device *parent; 32.111 struct list_head children; 32.112 struct list_head node; 32.113 + struct list_head wakeup_list; 32.114 + struct list_head g_list; 32.115 struct acpi_device_status status; 32.116 struct acpi_device_flags flags; 32.117 struct acpi_device_pnp pnp; 32.118 struct acpi_device_power power; 32.119 + struct acpi_device_wakeup wakeup; 32.120 struct acpi_device_perf performance; 32.121 struct acpi_device_dir dir; 32.122 struct acpi_device_ops ops; 32.123 struct acpi_driver *driver; 32.124 void *driver_data; 32.125 -#ifdef CONFIG_LDM 32.126 - struct device dev; 32.127 -#endif 32.128 + struct kobject kobj; 32.129 }; 32.130 32.131 #define acpi_driver_data(d) ((d)->driver_data) 32.132 @@ -292,12 +310,14 @@ struct acpi_bus_event { 32.133 u32 data; 32.134 }; 32.135 32.136 +extern struct subsystem acpi_subsys; 32.137 32.138 /* 32.139 * External Functions 32.140 */ 32.141 32.142 -int acpi_bus_get_device(acpi_handle, struct acpi_device **device); 32.143 +int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); 32.144 +void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context); 32.145 int acpi_bus_get_status (struct acpi_device *device); 32.146 int acpi_bus_get_power (acpi_handle handle, int *state); 32.147 int acpi_bus_set_power (acpi_handle handle, int state); 32.148 @@ -305,11 +325,16 @@ int acpi_bus_generate_event (struct acpi 32.149 int acpi_bus_receive_event (struct acpi_bus_event *event); 32.150 int acpi_bus_register_driver (struct acpi_driver *driver); 32.151 int acpi_bus_unregister_driver (struct acpi_driver *driver); 32.152 -int acpi_bus_scan (struct acpi_device *device); 32.153 -int acpi_init (void); 32.154 -void acpi_exit (void); 32.155 +int acpi_bus_scan (struct acpi_device *start); 32.156 +int acpi_bus_trim(struct acpi_device *start, int rmdevice); 32.157 +int acpi_bus_add (struct acpi_device **child, struct acpi_device *parent, 32.158 + acpi_handle handle, int type); 32.159 32.160 32.161 +int acpi_match_ids (struct acpi_device *device, char *ids); 32.162 +int acpi_create_dir(struct acpi_device *); 32.163 +void acpi_remove_dir(struct acpi_device *); 32.164 + 32.165 #endif /*CONFIG_ACPI_BUS*/ 32.166 32.167 #endif /*__ACPI_BUS_H__*/
33.1 --- a/xen/include/acpi/acpi_drivers.h Mon May 09 14:34:59 2005 +0000 33.2 +++ b/xen/include/acpi/acpi_drivers.h Mon May 09 17:50:11 2005 +0000 33.3 @@ -1,5 +1,5 @@ 33.4 /* 33.5 - * acpi_drivers.h ($Revision: 32 $) 33.6 + * acpi_drivers.h ($Revision: 31 $) 33.7 * 33.8 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 33.9 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 33.10 @@ -27,122 +27,22 @@ 33.11 #define __ACPI_DRIVERS_H__ 33.12 33.13 #include <xen/acpi.h> 33.14 -#include "acpi_bus.h" 33.15 +#include <acpi/acpi_bus.h> 33.16 33.17 33.18 #define ACPI_MAX_STRING 80 33.19 33.20 - 33.21 -/* -------------------------------------------------------------------------- 33.22 - ACPI Bus 33.23 - -------------------------------------------------------------------------- */ 33.24 - 33.25 #define ACPI_BUS_COMPONENT 0x00010000 33.26 -#define ACPI_BUS_CLASS "system_bus" 33.27 -#define ACPI_BUS_HID "ACPI_BUS" 33.28 -#define ACPI_BUS_DRIVER_NAME "ACPI Bus Driver" 33.29 -#define ACPI_BUS_DEVICE_NAME "System Bus" 33.30 - 33.31 - 33.32 -/* -------------------------------------------------------------------------- 33.33 - AC Adapter 33.34 - -------------------------------------------------------------------------- */ 33.35 +#define ACPI_SYSTEM_COMPONENT 0x02000000 33.36 33.37 -#define ACPI_AC_COMPONENT 0x00020000 33.38 -#define ACPI_AC_CLASS "ac_adapter" 33.39 -#define ACPI_AC_HID "ACPI0003" 33.40 -#define ACPI_AC_DRIVER_NAME "ACPI AC Adapter Driver" 33.41 -#define ACPI_AC_DEVICE_NAME "AC Adapter" 33.42 -#define ACPI_AC_FILE_STATE "state" 33.43 -#define ACPI_AC_NOTIFY_STATUS 0x80 33.44 -#define ACPI_AC_STATUS_OFFLINE 0x00 33.45 -#define ACPI_AC_STATUS_ONLINE 0x01 33.46 -#define ACPI_AC_STATUS_UNKNOWN 0xFF 33.47 - 33.48 - 33.49 -/* -------------------------------------------------------------------------- 33.50 - Battery 33.51 - -------------------------------------------------------------------------- */ 33.52 - 33.53 -#define ACPI_BATTERY_COMPONENT 0x00040000 33.54 -#define ACPI_BATTERY_CLASS "battery" 33.55 -#define ACPI_BATTERY_HID "PNP0C0A" 33.56 -#define ACPI_BATTERY_DRIVER_NAME "ACPI Battery Driver" 33.57 -#define ACPI_BATTERY_DEVICE_NAME "Battery" 33.58 -#define ACPI_BATTERY_FILE_INFO "info" 33.59 -#define ACPI_BATTERY_FILE_STATUS "state" 33.60 -#define ACPI_BATTERY_FILE_ALARM "alarm" 33.61 -#define ACPI_BATTERY_NOTIFY_STATUS 0x80 33.62 -#define ACPI_BATTERY_NOTIFY_INFO 0x81 33.63 -#define ACPI_BATTERY_UNITS_WATTS "mW" 33.64 -#define ACPI_BATTERY_UNITS_AMPS "mA" 33.65 - 33.66 - 33.67 -/* -------------------------------------------------------------------------- 33.68 - Button 33.69 - -------------------------------------------------------------------------- */ 33.70 +/* _HID definitions */ 33.71 33.72 -#define ACPI_BUTTON_COMPONENT 0x00080000 33.73 -#define ACPI_BUTTON_DRIVER_NAME "ACPI Button Driver" 33.74 -#define ACPI_BUTTON_CLASS "button" 33.75 -#define ACPI_BUTTON_FILE_INFO "info" 33.76 -#define ACPI_BUTTON_FILE_STATE "state" 33.77 -#define ACPI_BUTTON_TYPE_UNKNOWN 0x00 33.78 -#define ACPI_BUTTON_NOTIFY_STATUS 0x80 33.79 - 33.80 -#define ACPI_BUTTON_SUBCLASS_POWER "power" 33.81 -#define ACPI_BUTTON_HID_POWER "PNP0C0C" 33.82 +#define ACPI_POWER_HID "ACPI_PWR" 33.83 +#define ACPI_PROCESSOR_HID "ACPI_CPU" 33.84 +#define ACPI_SYSTEM_HID "ACPI_SYS" 33.85 +#define ACPI_THERMAL_HID "ACPI_THM" 33.86 #define ACPI_BUTTON_HID_POWERF "ACPI_FPB" 33.87 -#define ACPI_BUTTON_DEVICE_NAME_POWER "Power Button (CM)" 33.88 -#define ACPI_BUTTON_DEVICE_NAME_POWERF "Power Button (FF)" 33.89 -#define ACPI_BUTTON_TYPE_POWER 0x01 33.90 -#define ACPI_BUTTON_TYPE_POWERF 0x02 33.91 - 33.92 -#define ACPI_BUTTON_SUBCLASS_SLEEP "sleep" 33.93 -#define ACPI_BUTTON_HID_SLEEP "PNP0C0E" 33.94 #define ACPI_BUTTON_HID_SLEEPF "ACPI_FSB" 33.95 -#define ACPI_BUTTON_DEVICE_NAME_SLEEP "Sleep Button (CM)" 33.96 -#define ACPI_BUTTON_DEVICE_NAME_SLEEPF "Sleep Button (FF)" 33.97 -#define ACPI_BUTTON_TYPE_SLEEP 0x03 33.98 -#define ACPI_BUTTON_TYPE_SLEEPF 0x04 33.99 - 33.100 -#define ACPI_BUTTON_SUBCLASS_LID "lid" 33.101 -#define ACPI_BUTTON_HID_LID "PNP0C0D" 33.102 -#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch" 33.103 -#define ACPI_BUTTON_TYPE_LID 0x05 33.104 - 33.105 - 33.106 -/* -------------------------------------------------------------------------- 33.107 - Embedded Controller 33.108 - -------------------------------------------------------------------------- */ 33.109 - 33.110 -#define ACPI_EC_COMPONENT 0x00100000 33.111 -#define ACPI_EC_CLASS "embedded_controller" 33.112 -#define ACPI_EC_HID "PNP0C09" 33.113 -#define ACPI_EC_DRIVER_NAME "ACPI Embedded Controller Driver" 33.114 -#define ACPI_EC_DEVICE_NAME "Embedded Controller" 33.115 -#define ACPI_EC_FILE_INFO "info" 33.116 - 33.117 -#ifdef CONFIG_ACPI_EC 33.118 - 33.119 -int acpi_ec_ecdt_probe (void); 33.120 -int acpi_ec_init (void); 33.121 -void acpi_ec_exit (void); 33.122 - 33.123 -#endif 33.124 - 33.125 - 33.126 -/* -------------------------------------------------------------------------- 33.127 - Fan 33.128 - -------------------------------------------------------------------------- */ 33.129 - 33.130 -#define ACPI_FAN_COMPONENT 0x00200000 33.131 -#define ACPI_FAN_CLASS "fan" 33.132 -#define ACPI_FAN_HID "PNP0C0B" 33.133 -#define ACPI_FAN_DRIVER_NAME "ACPI Fan Driver" 33.134 -#define ACPI_FAN_DEVICE_NAME "Fan" 33.135 -#define ACPI_FAN_FILE_STATE "state" 33.136 -#define ACPI_FAN_NOTIFY_STATUS 0x80 33.137 33.138 33.139 /* -------------------------------------------------------------------------- 33.140 @@ -153,41 +53,28 @@ void acpi_ec_exit (void); 33.141 33.142 #define ACPI_PCI_COMPONENT 0x00400000 33.143 33.144 -/* ACPI PCI Root Bridge (pci_root.c) */ 33.145 - 33.146 -#define ACPI_PCI_ROOT_CLASS "pci_bridge" 33.147 -#define ACPI_PCI_ROOT_HID "PNP0A03" 33.148 -#define ACPI_PCI_ROOT_DRIVER_NAME "ACPI PCI Root Bridge Driver" 33.149 -#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge" 33.150 - 33.151 -int acpi_pci_root_init (void); 33.152 -void acpi_pci_root_exit (void); 33.153 - 33.154 /* ACPI PCI Interrupt Link (pci_link.c) */ 33.155 33.156 -#define ACPI_PCI_LINK_CLASS "pci_irq_routing" 33.157 -#define ACPI_PCI_LINK_HID "PNP0C0F" 33.158 -#define ACPI_PCI_LINK_DRIVER_NAME "ACPI PCI Interrupt Link Driver" 33.159 -#define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link" 33.160 -#define ACPI_PCI_LINK_FILE_INFO "info" 33.161 -#define ACPI_PCI_LINK_FILE_STATUS "state" 33.162 - 33.163 -int acpi_pci_link_check (void); 33.164 +int acpi_irq_penalty_init (void); 33.165 int acpi_pci_link_get_irq (acpi_handle handle, int index, int* edge_level, int* active_high_low); 33.166 -int acpi_pci_link_init (void); 33.167 -void acpi_pci_link_exit (void); 33.168 33.169 /* ACPI PCI Interrupt Routing (pci_irq.c) */ 33.170 33.171 int acpi_pci_irq_add_prt (acpi_handle handle, int segment, int bus); 33.172 +void acpi_pci_irq_del_prt (int segment, int bus); 33.173 33.174 /* ACPI PCI Device Binding (pci_bind.c) */ 33.175 33.176 struct pci_bus; 33.177 33.178 int acpi_pci_bind (struct acpi_device *device); 33.179 +int acpi_pci_unbind (struct acpi_device *device); 33.180 int acpi_pci_bind_root (struct acpi_device *device, struct acpi_pci_id *id, struct pci_bus *bus); 33.181 33.182 +/* Arch-defined function to add a bus to the system */ 33.183 + 33.184 +struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain, int bus); 33.185 + 33.186 #endif /*CONFIG_ACPI_PCI*/ 33.187 33.188 33.189 @@ -195,43 +82,25 @@ int acpi_pci_bind_root (struct acpi_devi 33.190 Power Resource 33.191 -------------------------------------------------------------------------- */ 33.192 33.193 -#define ACPI_POWER_COMPONENT 0x00800000 33.194 -#define ACPI_POWER_CLASS "power_resource" 33.195 -#define ACPI_POWER_HID "ACPI_PWR" 33.196 -#define ACPI_POWER_DRIVER_NAME "ACPI Power Resource Driver" 33.197 -#define ACPI_POWER_DEVICE_NAME "Power Resource" 33.198 -#define ACPI_POWER_FILE_INFO "info" 33.199 -#define ACPI_POWER_FILE_STATUS "state" 33.200 -#define ACPI_POWER_RESOURCE_STATE_OFF 0x00 33.201 -#define ACPI_POWER_RESOURCE_STATE_ON 0x01 33.202 -#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF 33.203 - 33.204 #ifdef CONFIG_ACPI_POWER 33.205 - 33.206 +int acpi_enable_wakeup_device_power (struct acpi_device *dev); 33.207 +int acpi_disable_wakeup_device_power (struct acpi_device *dev); 33.208 int acpi_power_get_inferred_state (struct acpi_device *device); 33.209 int acpi_power_transition (struct acpi_device *device, int state); 33.210 -int acpi_power_init (void); 33.211 -void acpi_power_exit (void); 33.212 - 33.213 #endif 33.214 33.215 33.216 /* -------------------------------------------------------------------------- 33.217 + Embedded Controller 33.218 + -------------------------------------------------------------------------- */ 33.219 +#ifdef CONFIG_ACPI_EC 33.220 +int acpi_ec_ecdt_probe (void); 33.221 +#endif 33.222 + 33.223 +/* -------------------------------------------------------------------------- 33.224 Processor 33.225 -------------------------------------------------------------------------- */ 33.226 33.227 -#define ACPI_PROCESSOR_COMPONENT 0x01000000 33.228 -#define ACPI_PROCESSOR_CLASS "processor" 33.229 -#define ACPI_PROCESSOR_HID "ACPI_CPU" 33.230 -#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" 33.231 -#define ACPI_PROCESSOR_DEVICE_NAME "Processor" 33.232 -#define ACPI_PROCESSOR_FILE_INFO "info" 33.233 -#define ACPI_PROCESSOR_FILE_POWER "power" 33.234 -#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 33.235 -#define ACPI_PROCESSOR_FILE_THROTTLING "throttling" 33.236 -#define ACPI_PROCESSOR_FILE_LIMIT "limit" 33.237 -#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 33.238 -#define ACPI_PROCESSOR_NOTIFY_POWER 0x81 33.239 #define ACPI_PROCESSOR_LIMIT_NONE 0x00 33.240 #define ACPI_PROCESSOR_LIMIT_INCREMENT 0x01 33.241 #define ACPI_PROCESSOR_LIMIT_DECREMENT 0x02 33.242 @@ -239,109 +108,4 @@ void acpi_power_exit (void); 33.243 int acpi_processor_set_thermal_limit(acpi_handle handle, int type); 33.244 33.245 33.246 -/* -------------------------------------------------------------------------- 33.247 - System 33.248 - -------------------------------------------------------------------------- */ 33.249 - 33.250 -#define ACPI_SYSTEM_COMPONENT 0x02000000 33.251 -#define ACPI_SYSTEM_CLASS "system" 33.252 -#define ACPI_SYSTEM_HID "ACPI_SYS" 33.253 -#define ACPI_SYSTEM_DRIVER_NAME "ACPI System Driver" 33.254 -#define ACPI_SYSTEM_DEVICE_NAME "System" 33.255 -#define ACPI_SYSTEM_FILE_INFO "info" 33.256 -#define ACPI_SYSTEM_FILE_EVENT "event" 33.257 -#define ACPI_SYSTEM_FILE_ALARM "alarm" 33.258 -#define ACPI_SYSTEM_FILE_DSDT "dsdt" 33.259 -#define ACPI_SYSTEM_FILE_FADT "fadt" 33.260 -#define ACPI_SYSTEM_FILE_SLEEP "sleep" 33.261 -#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer" 33.262 -#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level" 33.263 - 33.264 -#ifdef CONFIG_ACPI_SYSTEM 33.265 - 33.266 -int acpi_system_init (void); 33.267 -void acpi_system_exit (void); 33.268 - 33.269 -#endif 33.270 - 33.271 - 33.272 -/* -------------------------------------------------------------------------- 33.273 - Thermal Zone 33.274 - -------------------------------------------------------------------------- */ 33.275 - 33.276 -#define ACPI_THERMAL_COMPONENT 0x04000000 33.277 -#define ACPI_THERMAL_CLASS "thermal_zone" 33.278 -#define ACPI_THERMAL_HID "ACPI_THM" 33.279 -#define ACPI_THERMAL_DRIVER_NAME "ACPI Thermal Zone Driver" 33.280 -#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone" 33.281 -#define ACPI_THERMAL_FILE_STATE "state" 33.282 -#define ACPI_THERMAL_FILE_TEMPERATURE "temperature" 33.283 -#define ACPI_THERMAL_FILE_TRIP_POINTS "trip_points" 33.284 -#define ACPI_THERMAL_FILE_COOLING_MODE "cooling_mode" 33.285 -#define ACPI_THERMAL_FILE_POLLING_FREQ "polling_frequency" 33.286 -#define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80 33.287 -#define ACPI_THERMAL_NOTIFY_THRESHOLDS 0x81 33.288 -#define ACPI_THERMAL_NOTIFY_DEVICES 0x82 33.289 -#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0 33.290 -#define ACPI_THERMAL_NOTIFY_HOT 0xF1 33.291 -#define ACPI_THERMAL_MODE_ACTIVE 0x00 33.292 -#define ACPI_THERMAL_MODE_PASSIVE 0x01 33.293 -#define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff" 33.294 - 33.295 - 33.296 -/* -------------------------------------------------------------------------- 33.297 - Debug Support 33.298 - -------------------------------------------------------------------------- */ 33.299 - 33.300 -#define ACPI_DEBUG_RESTORE 0 33.301 -#define ACPI_DEBUG_LOW 1 33.302 -#define ACPI_DEBUG_MEDIUM 2 33.303 -#define ACPI_DEBUG_HIGH 3 33.304 -#define ACPI_DEBUG_DRIVERS 4 33.305 - 33.306 -/*extern u32 acpi_dbg_level;*/ 33.307 -/*extern u32 acpi_dbg_layer;*/ 33.308 - 33.309 -static inline void 33.310 -acpi_set_debug ( 33.311 - u32 flag) 33.312 -{ 33.313 - static u32 layer_save; 33.314 - static u32 level_save; 33.315 - 33.316 - switch (flag) { 33.317 - case ACPI_DEBUG_RESTORE: 33.318 - acpi_dbg_layer = layer_save; 33.319 - acpi_dbg_level = level_save; 33.320 - break; 33.321 - case ACPI_DEBUG_LOW: 33.322 - case ACPI_DEBUG_MEDIUM: 33.323 - case ACPI_DEBUG_HIGH: 33.324 - case ACPI_DEBUG_DRIVERS: 33.325 - layer_save = acpi_dbg_layer; 33.326 - level_save = acpi_dbg_level; 33.327 - break; 33.328 - } 33.329 - 33.330 - switch (flag) { 33.331 - case ACPI_DEBUG_LOW: 33.332 - acpi_dbg_layer = ACPI_COMPONENT_DEFAULT | ACPI_ALL_DRIVERS; 33.333 - acpi_dbg_level = ACPI_DEBUG_DEFAULT; 33.334 - break; 33.335 - case ACPI_DEBUG_MEDIUM: 33.336 - acpi_dbg_layer = ACPI_COMPONENT_DEFAULT | ACPI_ALL_DRIVERS; 33.337 - acpi_dbg_level = ACPI_LV_FUNCTIONS | ACPI_LV_ALL_EXCEPTIONS; 33.338 - break; 33.339 - case ACPI_DEBUG_HIGH: 33.340 - acpi_dbg_layer = 0xFFFFFFFF; 33.341 - acpi_dbg_level = 0xFFFFFFFF; 33.342 - break; 33.343 - case ACPI_DEBUG_DRIVERS: 33.344 - acpi_dbg_layer = ACPI_ALL_DRIVERS; 33.345 - acpi_dbg_level = 0xFFFFFFFF; 33.346 - break; 33.347 - } 33.348 -} 33.349 - 33.350 - 33.351 #endif /*__ACPI_DRIVERS_H__*/
34.1 --- a/xen/include/acpi/acpiosxf.h Mon May 09 14:34:59 2005 +0000 34.2 +++ b/xen/include/acpi/acpiosxf.h Mon May 09 17:50:11 2005 +0000 34.3 @@ -9,7 +9,7 @@ 34.4 34.5 34.6 /* 34.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 34.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 34.9 * All rights reserved. 34.10 * 34.11 * Redistribution and use in source and binary forms, with or without 34.12 @@ -169,17 +169,19 @@ acpi_status 34.13 acpi_os_map_memory ( 34.14 acpi_physical_address physical_address, 34.15 acpi_size size, 34.16 - void **logical_address); 34.17 + void __iomem **logical_address); 34.18 34.19 void 34.20 acpi_os_unmap_memory ( 34.21 - void *logical_address, 34.22 + void __iomem *logical_address, 34.23 acpi_size size); 34.24 34.25 +#ifdef ACPI_FUTURE_USAGE 34.26 acpi_status 34.27 acpi_os_get_physical_address ( 34.28 void *logical_address, 34.29 acpi_physical_address *physical_address); 34.30 +#endif 34.31 34.32 34.33 /* 34.34 @@ -188,14 +190,14 @@ acpi_os_get_physical_address ( 34.35 34.36 acpi_status 34.37 acpi_os_install_interrupt_handler ( 34.38 - u32 interrupt_number, 34.39 - OSD_HANDLER service_routine, 34.40 + u32 gsi, 34.41 + acpi_osd_handler service_routine, 34.42 void *context); 34.43 34.44 acpi_status 34.45 acpi_os_remove_interrupt_handler ( 34.46 - u32 interrupt_number, 34.47 - OSD_HANDLER service_routine); 34.48 + u32 gsi, 34.49 + acpi_osd_handler service_routine); 34.50 34.51 34.52 /* 34.53 @@ -209,13 +211,16 @@ acpi_os_get_thread_id ( 34.54 acpi_status 34.55 acpi_os_queue_for_execution ( 34.56 u32 priority, 34.57 - OSD_EXECUTION_CALLBACK function, 34.58 + acpi_osd_exec_callback function, 34.59 + void *context); 34.60 + 34.61 +void 34.62 +acpi_os_wait_events_complete ( 34.63 void *context); 34.64 34.65 void 34.66 acpi_os_sleep ( 34.67 - u32 seconds, 34.68 - u32 milliseconds); 34.69 + acpi_integer milliseconds); 34.70 34.71 void 34.72 acpi_os_stall ( 34.73 @@ -258,25 +263,28 @@ acpi_os_write_memory ( 34.74 34.75 /* 34.76 * Platform and hardware-independent PCI configuration space access 34.77 + * Note: Can't use "Register" as a parameter, changed to "Reg" -- 34.78 + * certain compilers complain. 34.79 */ 34.80 34.81 acpi_status 34.82 acpi_os_read_pci_configuration ( 34.83 struct acpi_pci_id *pci_id, 34.84 - u32 register, 34.85 + u32 reg, 34.86 void *value, 34.87 u32 width); 34.88 34.89 acpi_status 34.90 acpi_os_write_pci_configuration ( 34.91 struct acpi_pci_id *pci_id, 34.92 - u32 register, 34.93 + u32 reg, 34.94 acpi_integer value, 34.95 u32 width); 34.96 34.97 /* 34.98 * Interim function needed for PCI IRQ routing 34.99 */ 34.100 + 34.101 void 34.102 acpi_os_derive_pci_id( 34.103 acpi_handle rhandle, 34.104 @@ -292,12 +300,14 @@ acpi_os_readable ( 34.105 void *pointer, 34.106 acpi_size length); 34.107 34.108 +#ifdef ACPI_FUTURE_USAGE 34.109 u8 34.110 acpi_os_writable ( 34.111 void *pointer, 34.112 acpi_size length); 34.113 +#endif 34.114 34.115 -u32 34.116 +u64 34.117 acpi_os_get_timer ( 34.118 void); 34.119 34.120 @@ -329,9 +339,11 @@ acpi_os_redirect_output ( 34.121 * Debug input 34.122 */ 34.123 34.124 +#ifdef ACPI_FUTURE_USAGE 34.125 u32 34.126 acpi_os_get_line ( 34.127 char *buffer); 34.128 +#endif 34.129 34.130 34.131 /*
35.1 --- a/xen/include/acpi/acpixf.h Mon May 09 14:34:59 2005 +0000 35.2 +++ b/xen/include/acpi/acpixf.h Mon May 09 17:50:11 2005 +0000 35.3 @@ -6,7 +6,7 @@ 35.4 *****************************************************************************/ 35.5 35.6 /* 35.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 35.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 35.9 * All rights reserved. 35.10 * 35.11 * Redistribution and use in source and binary forms, with or without 35.12 @@ -70,9 +70,11 @@ acpi_status 35.13 acpi_terminate ( 35.14 void); 35.15 35.16 +#ifdef ACPI_FUTURE_USAGE 35.17 acpi_status 35.18 acpi_subsystem_status ( 35.19 void); 35.20 +#endif 35.21 35.22 acpi_status 35.23 acpi_enable ( 35.24 @@ -82,9 +84,11 @@ acpi_status 35.25 acpi_disable ( 35.26 void); 35.27 35.28 +#ifdef ACPI_FUTURE_USAGE 35.29 acpi_status 35.30 acpi_get_system_info ( 35.31 struct acpi_buffer *ret_buffer); 35.32 +#endif 35.33 35.34 const char * 35.35 acpi_format_exception ( 35.36 @@ -94,10 +98,12 @@ acpi_status 35.37 acpi_purge_cached_objects ( 35.38 void); 35.39 35.40 +#ifdef ACPI_FUTURE_USAGE 35.41 acpi_status 35.42 acpi_install_initialization_handler ( 35.43 acpi_init_handler handler, 35.44 u32 function); 35.45 +#endif 35.46 35.47 /* 35.48 * ACPI Memory manager 35.49 @@ -129,6 +135,7 @@ acpi_status 35.50 acpi_load_tables ( 35.51 void); 35.52 35.53 +#ifdef ACPI_FUTURE_USAGE 35.54 acpi_status 35.55 acpi_load_table ( 35.56 struct acpi_table_header *table_ptr); 35.57 @@ -142,6 +149,7 @@ acpi_get_table_header ( 35.58 acpi_table_type table_type, 35.59 u32 instance, 35.60 struct acpi_table_header *out_table_header); 35.61 +#endif /* ACPI_FUTURE_USAGE */ 35.62 35.63 acpi_status 35.64 acpi_get_table ( 35.65 @@ -218,6 +226,7 @@ acpi_evaluate_object ( 35.66 struct acpi_object_list *parameter_objects, 35.67 struct acpi_buffer *return_object_buffer); 35.68 35.69 +#ifdef ACPI_FUTURE_USAGE 35.70 acpi_status 35.71 acpi_evaluate_object_typed ( 35.72 acpi_handle object, 35.73 @@ -225,6 +234,7 @@ acpi_evaluate_object_typed ( 35.74 struct acpi_object_list *external_params, 35.75 struct acpi_buffer *return_buffer, 35.76 acpi_object_type return_type); 35.77 +#endif 35.78 35.79 acpi_status 35.80 acpi_get_object_info ( 35.81 @@ -296,9 +306,20 @@ acpi_install_gpe_handler ( 35.82 acpi_handle gpe_device, 35.83 u32 gpe_number, 35.84 u32 type, 35.85 - acpi_gpe_handler handler, 35.86 + acpi_event_handler address, 35.87 void *context); 35.88 35.89 +#ifdef ACPI_FUTURE_USAGE 35.90 +acpi_status 35.91 +acpi_install_exception_handler ( 35.92 + acpi_exception_handler handler); 35.93 +#endif 35.94 + 35.95 + 35.96 +/* 35.97 + * Event interfaces 35.98 + */ 35.99 + 35.100 acpi_status 35.101 acpi_acquire_global_lock ( 35.102 u16 timeout, 35.103 @@ -312,7 +333,7 @@ acpi_status 35.104 acpi_remove_gpe_handler ( 35.105 acpi_handle gpe_device, 35.106 u32 gpe_number, 35.107 - acpi_gpe_handler handler); 35.108 + acpi_event_handler address); 35.109 35.110 acpi_status 35.111 acpi_enable_event ( 35.112 @@ -328,10 +349,18 @@ acpi_status 35.113 acpi_clear_event ( 35.114 u32 event); 35.115 35.116 +#ifdef ACPI_FUTURE_USAGE 35.117 acpi_status 35.118 acpi_get_event_status ( 35.119 u32 event, 35.120 acpi_event_status *event_status); 35.121 +#endif /* ACPI_FUTURE_USAGE */ 35.122 + 35.123 +acpi_status 35.124 +acpi_set_gpe_type ( 35.125 + acpi_handle gpe_device, 35.126 + u32 gpe_number, 35.127 + u8 type); 35.128 35.129 acpi_status 35.130 acpi_enable_gpe ( 35.131 @@ -351,12 +380,14 @@ acpi_clear_gpe ( 35.132 u32 gpe_number, 35.133 u32 flags); 35.134 35.135 +#ifdef ACPI_FUTURE_USAGE 35.136 acpi_status 35.137 acpi_get_gpe_status ( 35.138 acpi_handle gpe_device, 35.139 u32 gpe_number, 35.140 u32 flags, 35.141 acpi_event_status *event_status); 35.142 +#endif /* ACPI_FUTURE_USAGE */ 35.143 35.144 acpi_status 35.145 acpi_install_gpe_block ( 35.146 @@ -385,10 +416,12 @@ acpi_get_current_resources( 35.147 acpi_handle device_handle, 35.148 struct acpi_buffer *ret_buffer); 35.149 35.150 +#ifdef ACPI_FUTURE_USAGE 35.151 acpi_status 35.152 acpi_get_possible_resources( 35.153 acpi_handle device_handle, 35.154 struct acpi_buffer *ret_buffer); 35.155 +#endif 35.156 35.157 acpi_status 35.158 acpi_walk_resources ( 35.159 @@ -432,9 +465,11 @@ acpi_status 35.160 acpi_set_firmware_waking_vector ( 35.161 acpi_physical_address physical_address); 35.162 35.163 +#ifdef ACPI_FUTURE_USAGE 35.164 acpi_status 35.165 acpi_get_firmware_waking_vector ( 35.166 acpi_physical_address *physical_address); 35.167 +#endif 35.168 35.169 acpi_status 35.170 acpi_get_sleep_type_data ( 35.171 @@ -446,11 +481,11 @@ acpi_status 35.172 acpi_enter_sleep_state_prep ( 35.173 u8 sleep_state); 35.174 35.175 -acpi_status 35.176 +acpi_status asmlinkage 35.177 acpi_enter_sleep_state ( 35.178 u8 sleep_state); 35.179 35.180 -acpi_status 35.181 +acpi_status asmlinkage 35.182 acpi_enter_sleep_state_s4bios ( 35.183 void); 35.184
36.1 --- a/xen/include/acpi/acstruct.h Mon May 09 14:34:59 2005 +0000 36.2 +++ b/xen/include/acpi/acstruct.h Mon May 09 17:50:11 2005 +0000 36.3 @@ -5,7 +5,7 @@ 36.4 *****************************************************************************/ 36.5 36.6 /* 36.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 36.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 36.9 * All rights reserved. 36.10 * 36.11 * Redistribution and use in source and binary forms, with or without 36.12 @@ -69,13 +69,14 @@ 36.13 struct acpi_walk_state 36.14 { 36.15 u8 data_type; /* To differentiate various internal objs MUST BE FIRST!*/\ 36.16 + u8 walk_type; 36.17 acpi_owner_id owner_id; /* Owner of objects created during the walk */ 36.18 u8 last_predicate; /* Result of last predicate */ 36.19 + u8 reserved; /* For alignment */ 36.20 u8 current_result; /* */ 36.21 u8 next_op_info; /* Info about next_op */ 36.22 u8 num_operands; /* Stack pointer for Operands[] array */ 36.23 u8 return_used; 36.24 - u8 walk_type; 36.25 u16 opcode; /* Current AML opcode */ 36.26 u8 scope_depth; 36.27 u8 reserved1; 36.28 @@ -91,7 +92,8 @@ struct acpi_walk_state 36.29 struct acpi_namespace_node arguments[ACPI_METHOD_NUM_ARGS]; /* Control method arguments */ 36.30 union acpi_operand_object **caller_return_desc; 36.31 union acpi_generic_state *control_state; /* List of control states (nested IFs) */ 36.32 - struct acpi_namespace_node *deferred_node; /* Used when executing deferred opcodes */ 36.33 + struct acpi_namespace_node *deferred_node; /* Used when executing deferred opcodes */ 36.34 + struct acpi_gpe_event_info *gpe_event_info; /* Info for GPE (_Lxx/_Exx methods only */ 36.35 struct acpi_namespace_node local_variables[ACPI_METHOD_NUM_LOCALS]; /* Control method locals */ 36.36 struct acpi_namespace_node *method_call_node; /* Called method Node*/ 36.37 union acpi_parse_object *method_call_op; /* method_call Op if running a method */ 36.38 @@ -112,7 +114,7 @@ struct acpi_walk_state 36.39 union acpi_parse_object *next_op; /* next op to be processed */ 36.40 acpi_parse_downwards descending_callback; 36.41 acpi_parse_upwards ascending_callback; 36.42 - struct acpi_thread_state *acpi_thread; 36.43 + struct acpi_thread_state *thread; 36.44 struct acpi_walk_state *next; /* Next walk_state in list */ 36.45 }; 36.46 36.47 @@ -200,4 +202,21 @@ union acpi_aml_operands 36.48 }; 36.49 36.50 36.51 +/* Internal method parameter list */ 36.52 + 36.53 +struct acpi_parameter_info 36.54 +{ 36.55 + struct acpi_namespace_node *node; 36.56 + union acpi_operand_object **parameters; 36.57 + union acpi_operand_object *return_object; 36.58 + u8 parameter_type; 36.59 + u8 return_object_type; 36.60 +}; 36.61 + 36.62 +/* Types for parameter_type above */ 36.63 + 36.64 +#define ACPI_PARAM_ARGS 0 36.65 +#define ACPI_PARAM_GPE 1 36.66 + 36.67 + 36.68 #endif
37.1 --- a/xen/include/acpi/actbl.h Mon May 09 14:34:59 2005 +0000 37.2 +++ b/xen/include/acpi/actbl.h Mon May 09 17:50:11 2005 +0000 37.3 @@ -5,7 +5,7 @@ 37.4 *****************************************************************************/ 37.5 37.6 /* 37.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 37.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 37.9 * All rights reserved. 37.10 * 37.11 * Redistribution and use in source and binary forms, with or without 37.12 @@ -288,19 +288,6 @@ struct smart_battery_table 37.13 }; 37.14 37.15 37.16 -/* 37.17 - * High performance timer 37.18 - */ 37.19 -struct hpet_table 37.20 -{ 37.21 - ACPI_TABLE_HEADER_DEF 37.22 - u32 hardware_id; 37.23 - u32 base_address [3]; 37.24 - u8 hpet_number; 37.25 - u16 clock_tick; 37.26 - u8 attributes; 37.27 -}; 37.28 - 37.29 #pragma pack() 37.30 37.31 37.32 @@ -343,5 +330,23 @@ struct acpi_table_support 37.33 #include "actbl1.h" /* Acpi 1.0 table definitions */ 37.34 #include "actbl2.h" /* Acpi 2.0 table definitions */ 37.35 37.36 +extern u8 acpi_fadt_is_v1; /* is set to 1 if FADT is revision 1, 37.37 + * needed for certain workarounds */ 37.38 + 37.39 +#pragma pack(1) 37.40 +/* 37.41 + * High performance timer 37.42 + */ 37.43 +struct hpet_table 37.44 +{ 37.45 + ACPI_TABLE_HEADER_DEF 37.46 + u32 hardware_id; 37.47 + struct acpi_generic_address base_address; 37.48 + u8 hpet_number; 37.49 + u16 clock_tick; 37.50 + u8 attributes; 37.51 +}; 37.52 + 37.53 +#pragma pack() 37.54 37.55 #endif /* __ACTBL_H__ */
38.1 --- a/xen/include/acpi/actbl1.h Mon May 09 14:34:59 2005 +0000 38.2 +++ b/xen/include/acpi/actbl1.h Mon May 09 17:50:11 2005 +0000 38.3 @@ -5,7 +5,7 @@ 38.4 *****************************************************************************/ 38.5 38.6 /* 38.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 38.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 38.9 * All rights reserved. 38.10 * 38.11 * Redistribution and use in source and binary forms, with or without
39.1 --- a/xen/include/acpi/actbl2.h Mon May 09 14:34:59 2005 +0000 39.2 +++ b/xen/include/acpi/actbl2.h Mon May 09 17:50:11 2005 +0000 39.3 @@ -5,7 +5,7 @@ 39.4 *****************************************************************************/ 39.5 39.6 /* 39.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 39.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 39.9 * All rights reserved. 39.10 * 39.11 * Redistribution and use in source and binary forms, with or without 39.12 @@ -62,6 +62,7 @@ 39.13 #define BAF_8042_KEYBOARD_CONTROLLER 0x0002 39.14 39.15 #define FADT2_REVISION_ID 3 39.16 +#define FADT2_MINUS_REVISION_ID 2 39.17 39.18 39.19 #pragma pack(1) 39.20 @@ -114,53 +115,56 @@ struct acpi_generic_address 39.21 u8 address_space_id; /* Address space where struct or register exists. */ 39.22 u8 register_bit_width; /* Size in bits of given register */ 39.23 u8 register_bit_offset; /* Bit offset within the register */ 39.24 - u8 reserved; /* Must be 0 */ 39.25 + u8 access_width; /* Minimum Access size (ACPI 3.0) */ 39.26 u64 address; /* 64-bit address of struct or register */ 39.27 }; 39.28 39.29 39.30 +#define FADT_REV2_COMMON \ 39.31 + u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ \ 39.32 + u32 V1_dsdt; /* 32-bit physical address of DSDT */ \ 39.33 + u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ \ 39.34 + u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ \ 39.35 + u16 sci_int; /* System vector of SCI interrupt */ \ 39.36 + u32 smi_cmd; /* Port address of SMI command port */ \ 39.37 + u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ \ 39.38 + u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ \ 39.39 + u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ \ 39.40 + u8 pstate_cnt; /* Processor performance state control*/ \ 39.41 + u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */ \ 39.42 + u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */ \ 39.43 + u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ \ 39.44 + u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ \ 39.45 + u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ \ 39.46 + u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ \ 39.47 + u32 V1_gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ \ 39.48 + u32 V1_gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ \ 39.49 + u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */ \ 39.50 + u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */ \ 39.51 + u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ \ 39.52 + u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ \ 39.53 + u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ \ 39.54 + u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ \ 39.55 + u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ \ 39.56 + u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ \ 39.57 + u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ \ 39.58 + u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ \ 39.59 + u16 flush_size; /* Number of flush strides that need to be read */ \ 39.60 + u16 flush_stride; /* Processor's memory cache line width, in bytes */ \ 39.61 + u8 duty_offset; /* Processor's duty cycle index in processor's P_CNT reg*/ \ 39.62 + u8 duty_width; /* Processor's duty cycle value bit width in P_CNT register.*/ \ 39.63 + u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ \ 39.64 + u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ \ 39.65 + u8 century; /* Index to century in RTC CMOS RAM */ \ 39.66 + u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/ 39.67 + 39.68 /* 39.69 * ACPI 2.0 Fixed ACPI Description Table (FADT) 39.70 */ 39.71 struct fadt_descriptor_rev2 39.72 { 39.73 ACPI_TABLE_HEADER_DEF /* ACPI common table header */ 39.74 - u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ 39.75 - u32 V1_dsdt; /* 32-bit physical address of DSDT */ 39.76 - u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ 39.77 - u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ 39.78 - u16 sci_int; /* System vector of SCI interrupt */ 39.79 - u32 smi_cmd; /* Port address of SMI command port */ 39.80 - u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ 39.81 - u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ 39.82 - u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ 39.83 - u8 pstate_cnt; /* Processor performance state control*/ 39.84 - u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */ 39.85 - u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */ 39.86 - u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ 39.87 - u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ 39.88 - u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ 39.89 - u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ 39.90 - u32 V1_gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ 39.91 - u32 V1_gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ 39.92 - u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */ 39.93 - u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */ 39.94 - u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ 39.95 - u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ 39.96 - u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ 39.97 - u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ 39.98 - u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ 39.99 - u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ 39.100 - u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ 39.101 - u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ 39.102 - u16 flush_size; /* Number of flush strides that need to be read */ 39.103 - u16 flush_stride; /* Processor's memory cache line width, in bytes */ 39.104 - u8 duty_offset; /* Processor's duty cycle index in processor's P_CNT reg*/ 39.105 - u8 duty_width; /* Processor's duty cycle value bit width in P_CNT register.*/ 39.106 - u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ 39.107 - u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ 39.108 - u8 century; /* Index to century in RTC CMOS RAM */ 39.109 - u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/ 39.110 + FADT_REV2_COMMON 39.111 u8 reserved2; /* Reserved */ 39.112 u32 wb_invd : 1; /* The wbinvd instruction works properly */ 39.113 u32 wb_invd_flush : 1; /* The wbinvd flushes but does not invalidate */ 39.114 @@ -195,6 +199,20 @@ struct fadt_descriptor_rev2 39.115 }; 39.116 39.117 39.118 +/* "Downrevved" ACPI 2.0 FADT descriptor */ 39.119 + 39.120 +struct fadt_descriptor_rev2_minus 39.121 +{ 39.122 + ACPI_TABLE_HEADER_DEF /* ACPI common table header */ 39.123 + FADT_REV2_COMMON 39.124 + u8 reserved2; /* Reserved */ 39.125 + u32 flags; 39.126 + struct acpi_generic_address reset_register; /* Reset register address in GAS format */ 39.127 + u8 reset_value; /* Value to write to the reset_register port to reset the system. */ 39.128 + u8 reserved7[3]; /* These three bytes must be zero */ 39.129 +}; 39.130 + 39.131 + 39.132 /* Embedded Controller */ 39.133 39.134 struct ec_boot_resources
40.1 --- a/xen/include/acpi/actypes.h Mon May 09 14:34:59 2005 +0000 40.2 +++ b/xen/include/acpi/actypes.h Mon May 09 17:50:11 2005 +0000 40.3 @@ -5,7 +5,7 @@ 40.4 *****************************************************************************/ 40.5 40.6 /* 40.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 40.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 40.9 * All rights reserved. 40.10 * 40.11 * Redistribution and use in source and binary forms, with or without 40.12 @@ -303,7 +303,7 @@ struct uint32_struct 40.13 typedef u32 acpi_integer; 40.14 #define ACPI_INTEGER_MAX ACPI_UINT32_MAX 40.15 #define ACPI_INTEGER_BIT_SIZE 32 40.16 -#define ACPI_MAX_DECIMAL_DIGITS 10 40.17 +#define ACPI_MAX_DECIMAL_DIGITS 10 /* 2^32 = 4,294,967,296 */ 40.18 40.19 #define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 32-bit divide */ 40.20 40.21 @@ -315,13 +315,18 @@ typedef u32 40.22 typedef u64 acpi_integer; 40.23 #define ACPI_INTEGER_MAX ACPI_UINT64_MAX 40.24 #define ACPI_INTEGER_BIT_SIZE 64 40.25 -#define ACPI_MAX_DECIMAL_DIGITS 19 40.26 +#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ 40.27 + 40.28 40.29 #if ACPI_MACHINE_WIDTH == 64 40.30 #define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */ 40.31 #endif 40.32 #endif 40.33 40.34 +#define ACPI_MAX64_DECIMAL_DIGITS 20 40.35 +#define ACPI_MAX32_DECIMAL_DIGITS 10 40.36 +#define ACPI_MAX16_DECIMAL_DIGITS 5 40.37 +#define ACPI_MAX8_DECIMAL_DIGITS 3 40.38 40.39 /* 40.40 * Constants with special meanings 40.41 @@ -349,7 +354,6 @@ typedef u64 40.42 /* 40.43 * Power state values 40.44 */ 40.45 - 40.46 #define ACPI_STATE_UNKNOWN (u8) 0xFF 40.47 40.48 #define ACPI_STATE_S0 (u8) 0 40.49 @@ -393,7 +397,6 @@ typedef u64 40.50 #define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 6 40.51 #define ACPI_NOTIFY_POWER_FAULT (u8) 7 40.52 40.53 - 40.54 /* 40.55 * Table types. These values are passed to the table related APIs 40.56 */ 40.57 @@ -409,14 +412,13 @@ typedef u32 40.58 #define ACPI_TABLE_MAX 6 40.59 #define NUM_ACPI_TABLE_TYPES (ACPI_TABLE_MAX+1) 40.60 40.61 - 40.62 /* 40.63 * Types associated with ACPI names and objects. The first group of 40.64 * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition 40.65 * of the ACPI object_type() operator (See the ACPI Spec). Therefore, 40.66 * only add to the first group if the spec changes. 40.67 * 40.68 - * Types must be kept in sync with the global acpi_ns_properties 40.69 + * NOTE: Types must be kept in sync with the global acpi_ns_properties 40.70 * and acpi_ns_type_names arrays. 40.71 */ 40.72 typedef u32 acpi_object_type; 40.73 @@ -453,26 +455,27 @@ typedef u32 40.74 #define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13 40.75 #define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, ref_of, Index */ 40.76 #define ACPI_TYPE_LOCAL_ALIAS 0x15 40.77 -#define ACPI_TYPE_LOCAL_NOTIFY 0x16 40.78 -#define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x17 40.79 -#define ACPI_TYPE_LOCAL_RESOURCE 0x18 40.80 -#define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x19 40.81 -#define ACPI_TYPE_LOCAL_SCOPE 0x1A /* 1 Name, multiple object_list Nodes */ 40.82 +#define ACPI_TYPE_LOCAL_METHOD_ALIAS 0x16 40.83 +#define ACPI_TYPE_LOCAL_NOTIFY 0x17 40.84 +#define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x18 40.85 +#define ACPI_TYPE_LOCAL_RESOURCE 0x19 40.86 +#define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x1A 40.87 +#define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */ 40.88 40.89 -#define ACPI_TYPE_NS_NODE_MAX 0x1A /* Last typecode used within a NS Node */ 40.90 +#define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ 40.91 40.92 /* 40.93 * These are special object types that never appear in 40.94 * a Namespace node, only in an union acpi_operand_object 40.95 */ 40.96 -#define ACPI_TYPE_LOCAL_EXTRA 0x1B 40.97 -#define ACPI_TYPE_LOCAL_DATA 0x1C 40.98 +#define ACPI_TYPE_LOCAL_EXTRA 0x1C 40.99 +#define ACPI_TYPE_LOCAL_DATA 0x1D 40.100 40.101 -#define ACPI_TYPE_LOCAL_MAX 0x1C 40.102 +#define ACPI_TYPE_LOCAL_MAX 0x1D 40.103 40.104 /* All types above here are invalid */ 40.105 40.106 -#define ACPI_TYPE_INVALID 0x1D 40.107 +#define ACPI_TYPE_INVALID 0x1E 40.108 #define ACPI_TYPE_NOT_FOUND 0xFF 40.109 40.110 40.111 @@ -514,9 +517,8 @@ typedef u32 40.112 #define ACPI_WRITE 1 40.113 #define ACPI_IO_MASK 1 40.114 40.115 - 40.116 /* 40.117 - * Acpi Event Types: Fixed & General Purpose 40.118 + * Event Types: Fixed & General Purpose 40.119 */ 40.120 typedef u32 acpi_event_type; 40.121 40.122 @@ -531,25 +533,8 @@ typedef u32 40.123 #define ACPI_EVENT_MAX 4 40.124 #define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1 40.125 40.126 -#define ACPI_GPE_INVALID 0xFF 40.127 -#define ACPI_GPE_MAX 0xFF 40.128 -#define ACPI_NUM_GPE 256 40.129 - 40.130 -#define ACPI_EVENT_LEVEL_TRIGGERED 1 40.131 -#define ACPI_EVENT_EDGE_TRIGGERED 2 40.132 - 40.133 /* 40.134 - * Flags for GPE and Lock interfaces 40.135 - */ 40.136 -#define ACPI_EVENT_WAKE_ENABLE 0x2 40.137 -#define ACPI_EVENT_WAKE_DISABLE 0x2 40.138 - 40.139 -#define ACPI_NOT_ISR 0x1 40.140 -#define ACPI_ISR 0x0 40.141 - 40.142 - 40.143 -/* 40.144 - * acpi_event Status: 40.145 + * Event Status - Per event 40.146 * ------------- 40.147 * The encoding of acpi_event_status is illustrated below. 40.148 * Note that a set bit (1) indicates the property is TRUE 40.149 @@ -570,12 +555,74 @@ typedef u32 40.150 #define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 40.151 #define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 40.152 40.153 +/* 40.154 + * General Purpose Events (GPE) 40.155 + */ 40.156 +#define ACPI_GPE_INVALID 0xFF 40.157 +#define ACPI_GPE_MAX 0xFF 40.158 +#define ACPI_NUM_GPE 256 40.159 + 40.160 +#define ACPI_GPE_ENABLE 0 40.161 +#define ACPI_GPE_DISABLE 1 40.162 + 40.163 + 40.164 +/* 40.165 + * GPE info flags - Per GPE 40.166 + * +-+-+-+---+---+-+ 40.167 + * |7|6|5|4:3|2:1|0| 40.168 + * +-+-+-+---+---+-+ 40.169 + * | | | | | | 40.170 + * | | | | | +--- Interrupt type: Edge or Level Triggered 40.171 + * | | | | +--- Type: Wake-only, Runtime-only, or wake/runtime 40.172 + * | | | +--- Type of dispatch -- to method, handler, or none 40.173 + * | | +--- Enabled for runtime? 40.174 + * | +--- Enabled for wake? 40.175 + * +--- System state when GPE ocurred (running/waking) 40.176 + */ 40.177 +#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 40.178 +#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 40.179 +#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 40.180 + 40.181 +#define ACPI_GPE_TYPE_MASK (u8) 0x06 40.182 +#define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06 40.183 +#define ACPI_GPE_TYPE_WAKE (u8) 0x02 40.184 +#define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */ 40.185 + 40.186 +#define ACPI_GPE_DISPATCH_MASK (u8) 0x18 40.187 +#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08 40.188 +#define ACPI_GPE_DISPATCH_METHOD (u8) 0x10 40.189 +#define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */ 40.190 + 40.191 +#define ACPI_GPE_RUN_ENABLE_MASK (u8) 0x20 40.192 +#define ACPI_GPE_RUN_ENABLED (u8) 0x20 40.193 +#define ACPI_GPE_RUN_DISABLED (u8) 0x00 /* Default */ 40.194 + 40.195 +#define ACPI_GPE_WAKE_ENABLE_MASK (u8) 0x40 40.196 +#define ACPI_GPE_WAKE_ENABLED (u8) 0x40 40.197 +#define ACPI_GPE_WAKE_DISABLED (u8) 0x00 /* Default */ 40.198 + 40.199 +#define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */ 40.200 + 40.201 +#define ACPI_GPE_SYSTEM_MASK (u8) 0x80 40.202 +#define ACPI_GPE_SYSTEM_RUNNING (u8) 0x80 40.203 +#define ACPI_GPE_SYSTEM_WAKING (u8) 0x00 40.204 + 40.205 +/* 40.206 + * Flags for GPE and Lock interfaces 40.207 + */ 40.208 +#define ACPI_EVENT_WAKE_ENABLE 0x2 /* acpi_gpe_enable */ 40.209 +#define ACPI_EVENT_WAKE_DISABLE 0x2 /* acpi_gpe_disable */ 40.210 + 40.211 +#define ACPI_NOT_ISR 0x1 40.212 +#define ACPI_ISR 0x0 40.213 + 40.214 40.215 /* Notify types */ 40.216 40.217 -#define ACPI_SYSTEM_NOTIFY 0 40.218 -#define ACPI_DEVICE_NOTIFY 1 40.219 -#define ACPI_MAX_NOTIFY_HANDLER_TYPE 1 40.220 +#define ACPI_SYSTEM_NOTIFY 0x1 40.221 +#define ACPI_DEVICE_NOTIFY 0x2 40.222 +#define ACPI_ALL_NOTIFY 0x3 40.223 +#define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3 40.224 40.225 #define ACPI_MAX_SYS_NOTIFY 0x7f 40.226 40.227 @@ -756,11 +803,11 @@ struct acpi_system_info 40.228 */ 40.229 40.230 typedef u32 40.231 -(ACPI_SYSTEM_XFACE *OSD_HANDLER) ( 40.232 +(ACPI_SYSTEM_XFACE *acpi_osd_handler) ( 40.233 void *context); 40.234 40.235 typedef void 40.236 -(ACPI_SYSTEM_XFACE *OSD_EXECUTION_CALLBACK) ( 40.237 +(ACPI_SYSTEM_XFACE *acpi_osd_exec_callback) ( 40.238 void *context); 40.239 40.240 /* 40.241 @@ -771,10 +818,6 @@ u32 (*acpi_event_handler) ( 40.242 void *context); 40.243 40.244 typedef 40.245 -void (*acpi_gpe_handler) ( 40.246 - void *context); 40.247 - 40.248 -typedef 40.249 void (*acpi_notify_handler) ( 40.250 acpi_handle device, 40.251 u32 value, 40.252 @@ -793,8 +836,16 @@ acpi_status (*acpi_init_handler) ( 40.253 40.254 #define ACPI_INIT_DEVICE_INI 1 40.255 40.256 +typedef 40.257 +acpi_status (*acpi_exception_handler) ( 40.258 + acpi_status aml_status, 40.259 + acpi_name name, 40.260 + u16 opcode, 40.261 + u32 aml_offset, 40.262 + void *context); 40.263 40.264 -/* Address Spaces (Operation Regions */ 40.265 + 40.266 +/* Address Spaces (For Operation Regions) */ 40.267 40.268 typedef 40.269 acpi_status (*acpi_adr_space_handler) ( 40.270 @@ -861,6 +912,7 @@ struct acpi_compatible_id_list 40.271 #define ACPI_VALID_HID 0x0004 40.272 #define ACPI_VALID_UID 0x0008 40.273 #define ACPI_VALID_CID 0x0010 40.274 +#define ACPI_VALID_SXDS 0x0020 40.275 40.276 40.277 #define ACPI_COMMON_OBJ_INFO \ 40.278 @@ -880,11 +932,12 @@ struct acpi_device_info 40.279 { 40.280 ACPI_COMMON_OBJ_INFO; 40.281 40.282 - u32 valid; /* Indicates which fields are valid */ 40.283 + u32 valid; /* Indicates which fields below are valid */ 40.284 u32 current_status; /* _STA value */ 40.285 acpi_integer address; /* _ADR value if any */ 40.286 struct acpi_device_id hardware_id; /* _HID value if any */ 40.287 struct acpi_device_id unique_id; /* _UID value if any */ 40.288 + u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ 40.289 struct acpi_compatible_id_list compatibility_id; /* List of _CIDs if any */ 40.290 }; 40.291
41.1 --- a/xen/include/acpi/acutils.h Mon May 09 14:34:59 2005 +0000 41.2 +++ b/xen/include/acpi/acutils.h Mon May 09 17:50:11 2005 +0000 41.3 @@ -5,7 +5,7 @@ 41.4 *****************************************************************************/ 41.5 41.6 /* 41.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 41.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 41.9 * All rights reserved. 41.10 * 41.11 * Redistribution and use in source and binary forms, with or without 41.12 @@ -52,7 +52,6 @@ acpi_status (*acpi_pkg_callback) ( 41.13 union acpi_generic_state *state, 41.14 void *context); 41.15 41.16 - 41.17 acpi_status 41.18 acpi_ut_walk_package_tree ( 41.19 union acpi_operand_object *source_object, 41.20 @@ -60,7 +59,6 @@ acpi_ut_walk_package_tree ( 41.21 acpi_pkg_callback walk_callback, 41.22 void *context); 41.23 41.24 - 41.25 struct acpi_pkg_info 41.26 { 41.27 u8 *free_space; 41.28 @@ -180,6 +178,12 @@ acpi_ut_strncpy ( 41.29 acpi_size count); 41.30 41.31 int 41.32 +acpi_ut_memcmp ( 41.33 + const char *buffer1, 41.34 + const char *buffer2, 41.35 + acpi_size count); 41.36 + 41.37 +int 41.38 acpi_ut_strncmp ( 41.39 const char *string1, 41.40 const char *string2, 41.41 @@ -473,9 +477,14 @@ acpi_ut_delete_internal_object_list ( 41.42 #define METHOD_NAME__PRT "_PRT" 41.43 #define METHOD_NAME__CRS "_CRS" 41.44 #define METHOD_NAME__PRS "_PRS" 41.45 +#define METHOD_NAME__PRW "_PRW" 41.46 41.47 41.48 acpi_status 41.49 +acpi_ut_osi_implementation ( 41.50 + struct acpi_walk_state *walk_state); 41.51 + 41.52 +acpi_status 41.53 acpi_ut_evaluate_object ( 41.54 struct acpi_namespace_node *prefix_node, 41.55 char *path, 41.56 @@ -508,6 +517,10 @@ acpi_ut_execute_UID ( 41.57 struct acpi_namespace_node *device_node, 41.58 struct acpi_device_id *uid); 41.59 41.60 +acpi_status 41.61 +acpi_ut_execute_sxds ( 41.62 + struct acpi_namespace_node *device_node, 41.63 + u8 *highest); 41.64 41.65 /* 41.66 * ut_mutex - mutual exclusion interfaces 41.67 @@ -570,6 +583,10 @@ union acpi_operand_object * 41.68 acpi_ut_create_buffer_object ( 41.69 acpi_size buffer_size); 41.70 41.71 +union acpi_operand_object * 41.72 +acpi_ut_create_string_object ( 41.73 + acpi_size string_size); 41.74 + 41.75 41.76 /* 41.77 * ut_ref_cnt - Object reference count management 41.78 @@ -649,12 +666,14 @@ acpi_ut_create_update_state_and_push ( 41.79 u16 action, 41.80 union acpi_generic_state **state_list); 41.81 41.82 +#ifdef ACPI_FUTURE_USAGE 41.83 acpi_status 41.84 acpi_ut_create_pkg_state_and_push ( 41.85 void *internal_object, 41.86 void *external_object, 41.87 u16 index, 41.88 union acpi_generic_state **state_list); 41.89 +#endif 41.90 41.91 union acpi_generic_state * 41.92 acpi_ut_create_control_state ( 41.93 @@ -664,6 +683,7 @@ void 41.94 acpi_ut_delete_generic_state ( 41.95 union acpi_generic_state *state); 41.96 41.97 +#ifdef ACPI_ENABLE_OBJECT_CACHE 41.98 void 41.99 acpi_ut_delete_generic_state_cache ( 41.100 void); 41.101 @@ -671,6 +691,7 @@ acpi_ut_delete_generic_state_cache ( 41.102 void 41.103 acpi_ut_delete_object_cache ( 41.104 void); 41.105 +#endif 41.106 41.107 /* 41.108 * utmisc 41.109 @@ -683,14 +704,14 @@ acpi_ut_print_string ( 41.110 41.111 acpi_status 41.112 acpi_ut_divide ( 41.113 - acpi_integer *in_dividend, 41.114 - acpi_integer *in_divisor, 41.115 + acpi_integer in_dividend, 41.116 + acpi_integer in_divisor, 41.117 acpi_integer *out_quotient, 41.118 acpi_integer *out_remainder); 41.119 41.120 acpi_status 41.121 acpi_ut_short_divide ( 41.122 - acpi_integer *in_dividend, 41.123 + acpi_integer in_dividend, 41.124 u32 divisor, 41.125 acpi_integer *out_quotient, 41.126 u32 *out_remainder); 41.127 @@ -709,9 +730,15 @@ acpi_ut_strtoul64 ( 41.128 u32 base, 41.129 acpi_integer *ret_integer); 41.130 41.131 +/* Values for Base above (16=Hex, 10=Decimal) */ 41.132 + 41.133 +#define ACPI_ANY_BASE 0 41.134 + 41.135 +#ifdef ACPI_FUTURE_USAGE 41.136 char * 41.137 acpi_ut_strupr ( 41.138 char *src_string); 41.139 +#endif 41.140 41.141 u8 * 41.142 acpi_ut_get_resource_end_tag ( 41.143 @@ -753,9 +780,11 @@ acpi_ut_release_to_cache ( 41.144 u32 list_id, 41.145 void *object); 41.146 41.147 +#ifdef ACPI_ENABLE_OBJECT_CACHE 41.148 void 41.149 acpi_ut_delete_generic_cache ( 41.150 u32 list_id); 41.151 +#endif 41.152 41.153 acpi_status 41.154 acpi_ut_validate_buffer ( 41.155 @@ -830,9 +859,11 @@ acpi_ut_remove_allocation ( 41.156 char *module, 41.157 u32 line); 41.158 41.159 +#ifdef ACPI_FUTURE_USAGE 41.160 void 41.161 acpi_ut_dump_allocation_info ( 41.162 void); 41.163 +#endif 41.164 41.165 void 41.166 acpi_ut_dump_allocations (
42.1 --- a/xen/include/acpi/platform/acenv.h Mon May 09 14:34:59 2005 +0000 42.2 +++ b/xen/include/acpi/platform/acenv.h Mon May 09 17:50:11 2005 +0000 42.3 @@ -5,7 +5,7 @@ 42.4 *****************************************************************************/ 42.5 42.6 /* 42.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 42.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 42.9 * All rights reserved. 42.10 * 42.11 * Redistribution and use in source and binary forms, with or without 42.12 @@ -57,6 +57,7 @@ 42.13 #define ACPI_DISASSEMBLER 42.14 #define ACPI_NO_METHOD_EXECUTION 42.15 #define ACPI_USE_SYSTEM_CLIBRARY 42.16 +#define ACPI_ENABLE_OBJECT_CACHE 42.17 #endif 42.18 42.19 #ifdef _ACPI_EXEC_APP 42.20 @@ -67,6 +68,7 @@ 42.21 #define ACPI_DEBUGGER 42.22 #define ACPI_DISASSEMBLER 42.23 #define ACPI_USE_SYSTEM_CLIBRARY 42.24 +#define ACPI_ENABLE_OBJECT_CACHE 42.25 #endif 42.26 42.27 #ifdef _ACPI_ASL_COMPILER 42.28 @@ -75,6 +77,7 @@ 42.29 #define ACPI_DISASSEMBLER 42.30 #define ACPI_CONSTANT_EVAL_ONLY 42.31 #define ACPI_USE_SYSTEM_CLIBRARY 42.32 +#define ACPI_ENABLE_OBJECT_CACHE 42.33 #endif 42.34 42.35 /* 42.36 @@ -152,12 +155,8 @@ 42.37 #define COMPILER_DEPENDENT_INT64 long long 42.38 #define COMPILER_DEPENDENT_UINT64 unsigned long long 42.39 42.40 - 42.41 -/* Name of host operating system (returned by the _OS_ namespace object) */ 42.42 - 42.43 -#define ACPI_OS_NAME "Intel ACPI/CA Core Subsystem" 42.44 - 42.45 -/* This macro is used to tag functions as "printf-like" because 42.46 +/* 42.47 + * This macro is used to tag functions as "printf-like" because 42.48 * some compilers can catch printf format string problems. MSVC 42.49 * doesn't, so this is proprocessed away. 42.50 */ 42.51 @@ -227,7 +226,11 @@ 42.52 */ 42.53 42.54 #define ACPI_STRSTR(s1,s2) strstr((s1), (s2)) 42.55 + 42.56 +#ifdef ACPI_FUTURE_USAGE 42.57 #define ACPI_STRUPR(s) (void) acpi_ut_strupr ((s)) 42.58 +#endif 42.59 + 42.60 #define ACPI_STRLEN(s) (acpi_size) strlen((s)) 42.61 #define ACPI_STRCPY(d,s) (void) strcpy((d), (s)) 42.62 #define ACPI_STRNCPY(d,s,n) (void) strncpy((d), (s), (acpi_size)(n)) 42.63 @@ -236,6 +239,7 @@ 42.64 #define ACPI_STRCAT(d,s) (void) strcat((d), (s)) 42.65 #define ACPI_STRNCAT(d,s,n) strncat((d), (s), (acpi_size)(n)) 42.66 #define ACPI_STRTOUL(d,s,n) strtoul((d), (s), (acpi_size)(n)) 42.67 +#define ACPI_MEMCMP(s1,s2,n) memcmp((s1), (s2), (acpi_size)(n)) 42.68 #define ACPI_MEMCPY(d,s,n) (void) memcpy((d), (s), (acpi_size)(n)) 42.69 #define ACPI_MEMSET(d,s,n) (void) memset((d), (s), (acpi_size)(n)) 42.70 42.71 @@ -290,7 +294,11 @@ typedef char *va_list; 42.72 42.73 42.74 #define ACPI_STRSTR(s1,s2) acpi_ut_strstr ((s1), (s2)) 42.75 + 42.76 +#ifdef ACPI_FUTURE_USAGE 42.77 #define ACPI_STRUPR(s) (void) acpi_ut_strupr ((s)) 42.78 +#endif 42.79 + 42.80 #define ACPI_STRLEN(s) (acpi_size) acpi_ut_strlen ((s)) 42.81 #define ACPI_STRCPY(d,s) (void) acpi_ut_strcpy ((d), (s)) 42.82 #define ACPI_STRNCPY(d,s,n) (void) acpi_ut_strncpy ((d), (s), (acpi_size)(n)) 42.83 @@ -299,6 +307,7 @@ typedef char *va_list; 42.84 #define ACPI_STRCAT(d,s) (void) acpi_ut_strcat ((d), (s)) 42.85 #define ACPI_STRNCAT(d,s,n) acpi_ut_strncat ((d), (s), (acpi_size)(n)) 42.86 #define ACPI_STRTOUL(d,s,n) acpi_ut_strtoul ((d), (s), (acpi_size)(n)) 42.87 +#define ACPI_MEMCMP(s1,s2,n) acpi_ut_memcmp((s1), (s2), (acpi_size)(n)) 42.88 #define ACPI_MEMCPY(d,s,n) (void) acpi_ut_memcpy ((d), (s), (acpi_size)(n)) 42.89 #define ACPI_MEMSET(d,v,n) (void) acpi_ut_memset ((d), (v), (acpi_size)(n)) 42.90 #define ACPI_TOUPPER acpi_ut_to_upper
43.1 --- a/xen/include/acpi/platform/acgcc.h Mon May 09 14:34:59 2005 +0000 43.2 +++ b/xen/include/acpi/platform/acgcc.h Mon May 09 17:50:11 2005 +0000 43.3 @@ -5,7 +5,7 @@ 43.4 *****************************************************************************/ 43.5 43.6 /* 43.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 43.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 43.9 * All rights reserved. 43.10 * 43.11 * Redistribution and use in source and binary forms, with or without 43.12 @@ -54,6 +54,6 @@ 43.13 * to to tell the compiler warning in a per-variable manner that a variable 43.14 * is unused. 43.15 */ 43.16 -#define ACPI_UNUSED_VAR __attribute_used__ 43.17 +#define ACPI_UNUSED_VAR __attribute__ ((unused)) 43.18 43.19 #endif /* __ACGCC_H__ */
44.1 --- a/xen/include/acpi/platform/aclinux.h Mon May 09 14:34:59 2005 +0000 44.2 +++ b/xen/include/acpi/platform/aclinux.h Mon May 09 17:50:11 2005 +0000 44.3 @@ -5,7 +5,7 @@ 44.4 *****************************************************************************/ 44.5 44.6 /* 44.7 - * Copyright (C) 2000 - 2004, R. Byron Moore 44.8 + * Copyright (C) 2000 - 2005, R. Byron Moore 44.9 * All rights reserved. 44.10 * 44.11 * Redistribution and use in source and binary forms, with or without 44.12 @@ -44,8 +44,6 @@ 44.13 #ifndef __ACLINUX_H__ 44.14 #define __ACLINUX_H__ 44.15 44.16 -#define ACPI_OS_NAME "Linux" 44.17 - 44.18 #define ACPI_USE_SYSTEM_CLIBRARY 44.19 #define ACPI_USE_DO_WHILE_0 44.20 44.21 @@ -83,6 +81,8 @@ 44.22 #define ACPI_USE_NATIVE_DIVIDE 44.23 #endif 44.24 44.25 +#define __cdecl 44.26 +#define ACPI_FLUSH_CPU_CACHE() 44.27 #endif /* __KERNEL__ */ 44.28 44.29 /* Linux uses GCC */
45.1 --- a/xen/include/asm-ia64/config.h Mon May 09 14:34:59 2005 +0000 45.2 +++ b/xen/include/asm-ia64/config.h Mon May 09 17:50:11 2005 +0000 45.3 @@ -31,10 +31,6 @@ typedef int pid_t; 45.4 45.5 #define touch_nmi_watchdog() 45.6 // from linux/include/linux/types.h 45.7 -#define BITS_TO_LONGS(bits) \ 45.8 - (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) 45.9 -#define DECLARE_BITMAP(name,bits) \ 45.10 - unsigned long name[BITS_TO_LONGS(bits)] 45.11 #define CLEAR_BITMAP(name,bits) \ 45.12 memset(name, 0, BITS_TO_LONGS(bits)*sizeof(unsigned long)) 45.13 45.14 @@ -57,7 +53,6 @@ extern char _end[]; /* standard ELF symb 45.15 //#define __kernel 45.16 //#define __safe 45.17 #define __force 45.18 -#define __iomem 45.19 #define __chk_user_ptr(x) (void)0 45.20 //#define __chk_io_ptr(x) (void)0 45.21 //#define __builtin_warning(x, y...) (1) 45.22 @@ -77,9 +72,6 @@ extern char _end[]; /* standard ELF symb 45.23 //#define CONFIG_NR_CPUS 16 45.24 #define barrier() __asm__ __volatile__("": : :"memory") 45.25 45.26 -// linux/include/spinlock.h 45.27 -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED 45.28 - 45.29 /////////////////////////////////////////////////////////////// 45.30 // xen/include/asm/config.h 45.31 // Natural boundary upon TR size to define xenheap space
46.1 --- a/xen/include/asm-x86/apic.h Mon May 09 14:34:59 2005 +0000 46.2 +++ b/xen/include/asm-x86/apic.h Mon May 09 17:50:11 2005 +0000 46.3 @@ -2,19 +2,34 @@ 46.4 #define __ASM_APIC_H 46.5 46.6 #include <xen/config.h> 46.7 -#include <asm/regs.h> 46.8 +#include <asm/fixmap.h> 46.9 #include <asm/apicdef.h> 46.10 #include <asm/system.h> 46.11 46.12 -#ifdef CONFIG_X86_LOCAL_APIC 46.13 +#define Dprintk(x...) 46.14 46.15 -#define APIC_DEBUG 0 46.16 +/* 46.17 + * Debugging macros 46.18 + */ 46.19 +#define APIC_QUIET 0 46.20 +#define APIC_VERBOSE 1 46.21 +#define APIC_DEBUG 2 46.22 + 46.23 +extern int apic_verbosity; 46.24 46.25 -#if APIC_DEBUG 46.26 -#define Dprintk(x...) printk(x) 46.27 -#else 46.28 -#define Dprintk(x...) 46.29 -#endif 46.30 +/* 46.31 + * Define the default level of output to be very little 46.32 + * This can be turned up by using apic=verbose for more 46.33 + * information and apic=debug for _lots_ of information. 46.34 + * apic_verbosity is defined in apic.c 46.35 + */ 46.36 +#define apic_printk(v, s, a...) do { \ 46.37 + if ((v) <= apic_verbosity) \ 46.38 + printk(s, ##a); \ 46.39 + } while (0) 46.40 + 46.41 + 46.42 +#ifdef CONFIG_X86_LOCAL_APIC 46.43 46.44 /* 46.45 * Basic functions accessing APICs. 46.46 @@ -37,9 +52,12 @@ static __inline u32 apic_read(unsigned l 46.47 46.48 static __inline__ void apic_wait_icr_idle(void) 46.49 { 46.50 - do { } while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ); 46.51 + while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ) 46.52 + cpu_relax(); 46.53 } 46.54 46.55 +int get_physical_broadcast(void); 46.56 + 46.57 #ifdef CONFIG_X86_GOOD_APIC 46.58 # define FORCE_READ_AROUND_WRITE 0 46.59 # define apic_read_around(x) 46.60 @@ -63,21 +81,29 @@ static inline void ack_APIC_irq(void) 46.61 apic_write_around(APIC_EOI, 0); 46.62 } 46.63 46.64 +extern void (*wait_timer_tick)(void); 46.65 + 46.66 extern int get_maxlvt(void); 46.67 extern void clear_local_APIC(void); 46.68 extern void connect_bsp_APIC (void); 46.69 extern void disconnect_bsp_APIC (void); 46.70 extern void disable_local_APIC (void); 46.71 +extern void lapic_shutdown (void); 46.72 extern int verify_local_APIC (void); 46.73 extern void cache_APIC_registers (void); 46.74 extern void sync_Arb_IDs (void); 46.75 extern void init_bsp_APIC (void); 46.76 extern void setup_local_APIC (void); 46.77 extern void init_apic_mappings (void); 46.78 -extern void smp_local_timer_interrupt (struct cpu_user_regs * regs); 46.79 -extern void setup_APIC_clocks (void); 46.80 +extern void smp_local_timer_interrupt (struct cpu_user_regs *regs); 46.81 +extern void setup_boot_APIC_clock (void); 46.82 +extern void setup_secondary_APIC_clock (void); 46.83 extern void setup_apic_nmi_watchdog (void); 46.84 -extern void nmi_watchdog_tick (struct cpu_user_regs * regs); 46.85 +extern int reserve_lapic_nmi(void); 46.86 +extern void release_lapic_nmi(void); 46.87 +extern void disable_timer_nmi_watchdog(void); 46.88 +extern void enable_timer_nmi_watchdog(void); 46.89 +extern void nmi_watchdog_tick (struct cpu_user_regs *regs); 46.90 extern void touch_nmi_watchdog(void); 46.91 extern int APIC_init_uniprocessor (void); 46.92 extern void disable_APIC_timer(void); 46.93 @@ -85,6 +111,7 @@ extern void enable_APIC_timer(void); 46.94 46.95 extern unsigned int watchdog_on; 46.96 extern int check_nmi_watchdog (void); 46.97 +extern void enable_NMI_through_LVT0 (void * dummy); 46.98 46.99 extern unsigned int nmi_watchdog; 46.100 #define NMI_NONE 0 46.101 @@ -92,6 +119,9 @@ extern unsigned int nmi_watchdog; 46.102 #define NMI_LOCAL_APIC 2 46.103 #define NMI_INVALID 3 46.104 46.105 -#endif /* CONFIG_X86_LOCAL_APIC */ 46.106 +#else /* !CONFIG_X86_LOCAL_APIC */ 46.107 +static inline void lapic_shutdown(void) { } 46.108 + 46.109 +#endif /* !CONFIG_X86_LOCAL_APIC */ 46.110 46.111 #endif /* __ASM_APIC_H */
47.1 --- a/xen/include/asm-x86/apicdef.h Mon May 09 14:34:59 2005 +0000 47.2 +++ b/xen/include/asm-x86/apicdef.h Mon May 09 17:50:11 2005 +0000 47.3 @@ -11,14 +11,11 @@ 47.4 #define APIC_DEFAULT_PHYS_BASE 0xfee00000 47.5 47.6 #define APIC_ID 0x20 47.7 -#define APIC_ID_MASK (0x0F<<24) 47.8 -#define GET_APIC_ID(x) (((x)>>24)&0x0F) 47.9 #define APIC_LVR 0x30 47.10 #define APIC_LVR_MASK 0xFF00FF 47.11 #define GET_APIC_VERSION(x) ((x)&0xFF) 47.12 #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) 47.13 #define APIC_INTEGRATED(x) ((x)&0xF0) 47.14 -#define APIC_XAPIC_SUPPORT(x) ((x)>=0x14) 47.15 #define APIC_TASKPRI 0x80 47.16 #define APIC_TPRI_MASK 0xFF 47.17 #define APIC_ARBPRI 0x90 47.18 @@ -33,8 +30,8 @@ 47.19 #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) 47.20 #define APIC_ALL_CPUS 0xFF 47.21 #define APIC_DFR 0xE0 47.22 -#define APIC_DFR_CLUSTER 0x0FFFFFFFul /* Clustered */ 47.23 -#define APIC_DFR_FLAT 0xFFFFFFFFul /* Flat mode */ 47.24 +#define APIC_DFR_CLUSTER 0x0FFFFFFFul 47.25 +#define APIC_DFR_FLAT 0xFFFFFFFFul 47.26 #define APIC_SPIV 0xF0 47.27 #define APIC_SPIV_FOCUS_DISABLED (1<<9) 47.28 #define APIC_SPIV_APIC_ENABLED (1<<8) 47.29 @@ -60,7 +57,6 @@ 47.30 #define APIC_INT_LEVELTRIG 0x08000 47.31 #define APIC_INT_ASSERT 0x04000 47.32 #define APIC_ICR_BUSY 0x01000 47.33 -#define APIC_DEST_PHYSICAL 0x00000 47.34 #define APIC_DEST_LOGICAL 0x00800 47.35 #define APIC_DM_FIXED 0x00000 47.36 #define APIC_DM_LOWEST 0x00100 47.37 @@ -75,6 +71,7 @@ 47.38 #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) 47.39 #define SET_APIC_DEST_FIELD(x) ((x)<<24) 47.40 #define APIC_LVTT 0x320 47.41 +#define APIC_LVTTHMR 0x330 47.42 #define APIC_LVTPC 0x340 47.43 #define APIC_LVT0 0x350 47.44 #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) 47.45 @@ -111,18 +108,272 @@ 47.46 47.47 #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) 47.48 47.49 -#ifdef CONFIG_X86_CLUSTERED_APIC 47.50 -#define MAX_IO_APICS 32 47.51 +#ifdef CONFIG_NUMA 47.52 + #define MAX_IO_APICS 32 47.53 #else 47.54 -#define MAX_IO_APICS 8 47.55 + #define MAX_IO_APICS 8 47.56 #endif 47.57 47.58 - 47.59 /* 47.60 - * The broadcast ID is 0xF for old APICs and 0xFF for xAPICs. SAPICs 47.61 - * don't broadcast (yet?), but if they did, they might use 0xFFFF. 47.62 + * the local APIC register structure, memory mapped. Not terribly well 47.63 + * tested, but we might eventually use this one in the future - the 47.64 + * problem why we cannot use it right now is the P5 APIC, it has an 47.65 + * errata which cannot take 8-bit reads and writes, only 32-bit ones ... 47.66 */ 47.67 -#define APIC_BROADCAST_ID_XAPIC (0xFF) 47.68 -#define APIC_BROADCAST_ID_APIC (0x0F) 47.69 +#define u32 unsigned int 47.70 + 47.71 +#define lapic ((volatile struct local_apic *)APIC_BASE) 47.72 + 47.73 +#ifndef __ASSEMBLY__ 47.74 +struct local_apic { 47.75 + 47.76 +/*000*/ struct { u32 __reserved[4]; } __reserved_01; 47.77 + 47.78 +/*010*/ struct { u32 __reserved[4]; } __reserved_02; 47.79 + 47.80 +/*020*/ struct { /* APIC ID Register */ 47.81 + u32 __reserved_1 : 24, 47.82 + phys_apic_id : 4, 47.83 + __reserved_2 : 4; 47.84 + u32 __reserved[3]; 47.85 + } id; 47.86 + 47.87 +/*030*/ const 47.88 + struct { /* APIC Version Register */ 47.89 + u32 version : 8, 47.90 + __reserved_1 : 8, 47.91 + max_lvt : 8, 47.92 + __reserved_2 : 8; 47.93 + u32 __reserved[3]; 47.94 + } version; 47.95 + 47.96 +/*040*/ struct { u32 __reserved[4]; } __reserved_03; 47.97 + 47.98 +/*050*/ struct { u32 __reserved[4]; } __reserved_04; 47.99 + 47.100 +/*060*/ struct { u32 __reserved[4]; } __reserved_05; 47.101 + 47.102 +/*070*/ struct { u32 __reserved[4]; } __reserved_06; 47.103 + 47.104 +/*080*/ struct { /* Task Priority Register */ 47.105 + u32 priority : 8, 47.106 + __reserved_1 : 24; 47.107 + u32 __reserved_2[3]; 47.108 + } tpr; 47.109 + 47.110 +/*090*/ const 47.111 + struct { /* Arbitration Priority Register */ 47.112 + u32 priority : 8, 47.113 + __reserved_1 : 24; 47.114 + u32 __reserved_2[3]; 47.115 + } apr; 47.116 + 47.117 +/*0A0*/ const 47.118 + struct { /* Processor Priority Register */ 47.119 + u32 priority : 8, 47.120 + __reserved_1 : 24; 47.121 + u32 __reserved_2[3]; 47.122 + } ppr; 47.123 + 47.124 +/*0B0*/ struct { /* End Of Interrupt Register */ 47.125 + u32 eoi; 47.126 + u32 __reserved[3]; 47.127 + } eoi; 47.128 + 47.129 +/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; 47.130 + 47.131 +/*0D0*/ struct { /* Logical Destination Register */ 47.132 + u32 __reserved_1 : 24, 47.133 + logical_dest : 8; 47.134 + u32 __reserved_2[3]; 47.135 + } ldr; 47.136 + 47.137 +/*0E0*/ struct { /* Destination Format Register */ 47.138 + u32 __reserved_1 : 28, 47.139 + model : 4; 47.140 + u32 __reserved_2[3]; 47.141 + } dfr; 47.142 + 47.143 +/*0F0*/ struct { /* Spurious Interrupt Vector Register */ 47.144 + u32 spurious_vector : 8, 47.145 + apic_enabled : 1, 47.146 + focus_cpu : 1, 47.147 + __reserved_2 : 22; 47.148 + u32 __reserved_3[3]; 47.149 + } svr; 47.150 + 47.151 +/*100*/ struct { /* In Service Register */ 47.152 +/*170*/ u32 bitfield; 47.153 + u32 __reserved[3]; 47.154 + } isr [8]; 47.155 + 47.156 +/*180*/ struct { /* Trigger Mode Register */ 47.157 +/*1F0*/ u32 bitfield; 47.158 + u32 __reserved[3]; 47.159 + } tmr [8]; 47.160 + 47.161 +/*200*/ struct { /* Interrupt Request Register */ 47.162 +/*270*/ u32 bitfield; 47.163 + u32 __reserved[3]; 47.164 + } irr [8]; 47.165 + 47.166 +/*280*/ union { /* Error Status Register */ 47.167 + struct { 47.168 + u32 send_cs_error : 1, 47.169 + receive_cs_error : 1, 47.170 + send_accept_error : 1, 47.171 + receive_accept_error : 1, 47.172 + __reserved_1 : 1, 47.173 + send_illegal_vector : 1, 47.174 + receive_illegal_vector : 1, 47.175 + illegal_register_address : 1, 47.176 + __reserved_2 : 24; 47.177 + u32 __reserved_3[3]; 47.178 + } error_bits; 47.179 + struct { 47.180 + u32 errors; 47.181 + u32 __reserved_3[3]; 47.182 + } all_errors; 47.183 + } esr; 47.184 + 47.185 +/*290*/ struct { u32 __reserved[4]; } __reserved_08; 47.186 + 47.187 +/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; 47.188 + 47.189 +/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; 47.190 + 47.191 +/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; 47.192 + 47.193 +/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; 47.194 + 47.195 +/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; 47.196 + 47.197 +/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; 47.198 + 47.199 +/*300*/ struct { /* Interrupt Command Register 1 */ 47.200 + u32 vector : 8, 47.201 + delivery_mode : 3, 47.202 + destination_mode : 1, 47.203 + delivery_status : 1, 47.204 + __reserved_1 : 1, 47.205 + level : 1, 47.206 + trigger : 1, 47.207 + __reserved_2 : 2, 47.208 + shorthand : 2, 47.209 + __reserved_3 : 12; 47.210 + u32 __reserved_4[3]; 47.211 + } icr1; 47.212 + 47.213 +/*310*/ struct { /* Interrupt Command Register 2 */ 47.214 + union { 47.215 + u32 __reserved_1 : 24, 47.216 + phys_dest : 4, 47.217 + __reserved_2 : 4; 47.218 + u32 __reserved_3 : 24, 47.219 + logical_dest : 8; 47.220 + } dest; 47.221 + u32 __reserved_4[3]; 47.222 + } icr2; 47.223 + 47.224 +/*320*/ struct { /* LVT - Timer */ 47.225 + u32 vector : 8, 47.226 + __reserved_1 : 4, 47.227 + delivery_status : 1, 47.228 + __reserved_2 : 3, 47.229 + mask : 1, 47.230 + timer_mode : 1, 47.231 + __reserved_3 : 14; 47.232 + u32 __reserved_4[3]; 47.233 + } lvt_timer; 47.234 + 47.235 +/*330*/ struct { /* LVT - Thermal Sensor */ 47.236 + u32 vector : 8, 47.237 + delivery_mode : 3, 47.238 + __reserved_1 : 1, 47.239 + delivery_status : 1, 47.240 + __reserved_2 : 3, 47.241 + mask : 1, 47.242 + __reserved_3 : 15; 47.243 + u32 __reserved_4[3]; 47.244 + } lvt_thermal; 47.245 + 47.246 +/*340*/ struct { /* LVT - Performance Counter */ 47.247 + u32 vector : 8, 47.248 + delivery_mode : 3, 47.249 + __reserved_1 : 1, 47.250 + delivery_status : 1, 47.251 + __reserved_2 : 3, 47.252 + mask : 1, 47.253 + __reserved_3 : 15; 47.254 + u32 __reserved_4[3]; 47.255 + } lvt_pc; 47.256 + 47.257 +/*350*/ struct { /* LVT - LINT0 */ 47.258 + u32 vector : 8, 47.259 + delivery_mode : 3, 47.260 + __reserved_1 : 1, 47.261 + delivery_status : 1, 47.262 + polarity : 1, 47.263 + remote_irr : 1, 47.264 + trigger : 1, 47.265 + mask : 1, 47.266 + __reserved_2 : 15; 47.267 + u32 __reserved_3[3]; 47.268 + } lvt_lint0; 47.269 + 47.270 +/*360*/ struct { /* LVT - LINT1 */ 47.271 + u32 vector : 8, 47.272 + delivery_mode : 3, 47.273 + __reserved_1 : 1, 47.274 + delivery_status : 1, 47.275 + polarity : 1, 47.276 + remote_irr : 1, 47.277 + trigger : 1, 47.278 + mask : 1, 47.279 + __reserved_2 : 15; 47.280 + u32 __reserved_3[3]; 47.281 + } lvt_lint1; 47.282 + 47.283 +/*370*/ struct { /* LVT - Error */ 47.284 + u32 vector : 8, 47.285 + __reserved_1 : 4, 47.286 + delivery_status : 1, 47.287 + __reserved_2 : 3, 47.288 + mask : 1, 47.289 + __reserved_3 : 15; 47.290 + u32 __reserved_4[3]; 47.291 + } lvt_error; 47.292 + 47.293 +/*380*/ struct { /* Timer Initial Count Register */ 47.294 + u32 initial_count; 47.295 + u32 __reserved_2[3]; 47.296 + } timer_icr; 47.297 + 47.298 +/*390*/ const 47.299 + struct { /* Timer Current Count Register */ 47.300 + u32 curr_count; 47.301 + u32 __reserved_2[3]; 47.302 + } timer_ccr; 47.303 + 47.304 +/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; 47.305 + 47.306 +/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; 47.307 + 47.308 +/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; 47.309 + 47.310 +/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; 47.311 + 47.312 +/*3E0*/ struct { /* Timer Divide Configuration Register */ 47.313 + u32 divisor : 4, 47.314 + __reserved_1 : 28; 47.315 + u32 __reserved_2[3]; 47.316 + } timer_dcr; 47.317 + 47.318 +/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; 47.319 + 47.320 +} __attribute__ ((packed)); 47.321 +#endif /* !__ASSEMBLY__ */ 47.322 + 47.323 +#undef u32 47.324 47.325 #endif
48.1 --- a/xen/include/asm-x86/config.h Mon May 09 14:34:59 2005 +0000 48.2 +++ b/xen/include/asm-x86/config.h Mon May 09 17:50:11 2005 +0000 48.3 @@ -69,6 +69,8 @@ extern unsigned long _end; /* standard E 48.4 48.5 #if defined(__x86_64__) 48.6 48.7 +#define CONFIG_X86_64 1 48.8 + 48.9 #define asmlinkage 48.10 48.11 #define XENHEAP_DEFAULT_MB (16) 48.12 @@ -179,6 +181,8 @@ extern unsigned long _end; /* standard E 48.13 48.14 #elif defined(__i386__) 48.15 48.16 +#define CONFIG_X86_32 1 48.17 + 48.18 #define asmlinkage __attribute__((regparm(0))) 48.19 48.20 /*
49.1 --- a/xen/include/asm-x86/domain.h Mon May 09 14:34:59 2005 +0000 49.2 +++ b/xen/include/asm-x86/domain.h Mon May 09 17:50:11 2005 +0000 49.3 @@ -2,7 +2,9 @@ 49.4 #ifndef __ASM_DOMAIN_H__ 49.5 #define __ASM_DOMAIN_H__ 49.6 49.7 +#include <xen/config.h> 49.8 #include <xen/mm.h> 49.9 +#include <asm/vmx_vmcs.h> 49.10 49.11 struct trap_bounce { 49.12 unsigned long error_code;
50.1 --- a/xen/include/asm-x86/fixmap.h Mon May 09 14:34:59 2005 +0000 50.2 +++ b/xen/include/asm-x86/fixmap.h Mon May 09 17:50:11 2005 +0000 50.3 @@ -13,6 +13,7 @@ 50.4 #define _ASM_FIXMAP_H 50.5 50.6 #include <xen/config.h> 50.7 +#include <xen/lib.h> 50.8 #include <asm/acpi.h> 50.9 #include <asm/apicdef.h> 50.10 #include <asm/page.h> 50.11 @@ -52,6 +53,36 @@ extern void __set_fixmap( 50.12 #define set_fixmap_nocache(idx, phys) \ 50.13 __set_fixmap(idx, phys, PAGE_HYPERVISOR_NOCACHE) 50.14 50.15 -#define fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 50.16 +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 50.17 +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 50.18 + 50.19 +extern void __this_fixmap_does_not_exist(void); 50.20 + 50.21 +/* 50.22 + * 'index to address' translation. If anyone tries to use the idx 50.23 + * directly without translation, we catch the bug with a NULL-deference 50.24 + * kernel oops. Illegal ranges of incoming indices are caught too. 50.25 + */ 50.26 +static always_inline unsigned long fix_to_virt(const unsigned int idx) 50.27 +{ 50.28 + /* 50.29 + * This branch gets completely eliminated after inlining, except when 50.30 + * someone tries to use fixaddr indices in an illegal way (such as mixing 50.31 + * up address types or using out-of-range indices). 50.32 + * 50.33 + * If it doesn't get removed, the linker will complain loudly with a 50.34 + * reasonably clear error message. 50.35 + */ 50.36 + if (idx >= __end_of_fixed_addresses) 50.37 + __this_fixmap_does_not_exist(); 50.38 + 50.39 + return __fix_to_virt(idx); 50.40 +} 50.41 + 50.42 +static inline unsigned long virt_to_fix(const unsigned long vaddr) 50.43 +{ 50.44 + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 50.45 + return __virt_to_fix(vaddr); 50.46 +} 50.47 50.48 #endif
51.1 --- a/xen/include/asm-x86/io_apic.h Mon May 09 14:34:59 2005 +0000 51.2 +++ b/xen/include/asm-x86/io_apic.h Mon May 09 17:50:11 2005 +0000 51.3 @@ -2,7 +2,8 @@ 51.4 #define __ASM_IO_APIC_H 51.5 51.6 #include <xen/config.h> 51.7 -#include <xen/types.h> 51.8 +#include <asm/fixmap.h> 51.9 +#include <asm/types.h> 51.10 #include <asm/mpspec.h> 51.11 51.12 /* 51.13 @@ -13,42 +14,91 @@ 51.14 51.15 #ifdef CONFIG_X86_IO_APIC 51.16 51.17 -#define APIC_MISMATCH_DEBUG 51.18 +#ifdef CONFIG_PCI_MSI 51.19 +static inline int use_pci_vector(void) {return 1;} 51.20 +static inline void disable_edge_ioapic_vector(unsigned int vector) { } 51.21 +static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { } 51.22 +static inline void end_edge_ioapic_vector (unsigned int vector) { } 51.23 +#define startup_level_ioapic startup_level_ioapic_vector 51.24 +#define shutdown_level_ioapic mask_IO_APIC_vector 51.25 +#define enable_level_ioapic unmask_IO_APIC_vector 51.26 +#define disable_level_ioapic mask_IO_APIC_vector 51.27 +#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector 51.28 +#define end_level_ioapic end_level_ioapic_vector 51.29 +#define set_ioapic_affinity set_ioapic_affinity_vector 51.30 + 51.31 +#define startup_edge_ioapic startup_edge_ioapic_vector 51.32 +#define shutdown_edge_ioapic disable_edge_ioapic_vector 51.33 +#define enable_edge_ioapic unmask_IO_APIC_vector 51.34 +#define disable_edge_ioapic disable_edge_ioapic_vector 51.35 +#define ack_edge_ioapic ack_edge_ioapic_vector 51.36 +#define end_edge_ioapic end_edge_ioapic_vector 51.37 +#else 51.38 +static inline int use_pci_vector(void) {return 0;} 51.39 +static inline void disable_edge_ioapic_irq(unsigned int irq) { } 51.40 +static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { } 51.41 +static inline void end_edge_ioapic_irq (unsigned int irq) { } 51.42 +#define startup_level_ioapic startup_level_ioapic_irq 51.43 +#define shutdown_level_ioapic mask_IO_APIC_irq 51.44 +#define enable_level_ioapic unmask_IO_APIC_irq 51.45 +#define disable_level_ioapic mask_IO_APIC_irq 51.46 +#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq 51.47 +#define end_level_ioapic end_level_ioapic_irq 51.48 +#define set_ioapic_affinity set_ioapic_affinity_irq 51.49 + 51.50 +#define startup_edge_ioapic startup_edge_ioapic_irq 51.51 +#define shutdown_edge_ioapic disable_edge_ioapic_irq 51.52 +#define enable_edge_ioapic unmask_IO_APIC_irq 51.53 +#define disable_edge_ioapic disable_edge_ioapic_irq 51.54 +#define ack_edge_ioapic ack_edge_ioapic_irq 51.55 +#define end_edge_ioapic end_edge_ioapic_irq 51.56 +#endif 51.57 51.58 #define IO_APIC_BASE(idx) \ 51.59 - ((volatile int *)(fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \ 51.60 + ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \ 51.61 + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK))) 51.62 51.63 /* 51.64 * The structure of the IO-APIC: 51.65 */ 51.66 -struct IO_APIC_reg_00 { 51.67 - __u32 __reserved_2 : 14, 51.68 - LTS : 1, 51.69 - delivery_type : 1, 51.70 - __reserved_1 : 8, 51.71 - ID : 4, 51.72 - __reserved_0 : 4; 51.73 -} __attribute__ ((packed)); 51.74 +union IO_APIC_reg_00 { 51.75 + u32 raw; 51.76 + struct { 51.77 + u32 __reserved_2 : 14, 51.78 + LTS : 1, 51.79 + delivery_type : 1, 51.80 + __reserved_1 : 8, 51.81 + ID : 8; 51.82 + } __attribute__ ((packed)) bits; 51.83 +}; 51.84 51.85 -struct IO_APIC_reg_01 { 51.86 - __u32 version : 8, 51.87 - __reserved_2 : 7, 51.88 - PRQ : 1, 51.89 - entries : 8, 51.90 - __reserved_1 : 8; 51.91 -} __attribute__ ((packed)); 51.92 +union IO_APIC_reg_01 { 51.93 + u32 raw; 51.94 + struct { 51.95 + u32 version : 8, 51.96 + __reserved_2 : 7, 51.97 + PRQ : 1, 51.98 + entries : 8, 51.99 + __reserved_1 : 8; 51.100 + } __attribute__ ((packed)) bits; 51.101 +}; 51.102 51.103 -struct IO_APIC_reg_02 { 51.104 - __u32 __reserved_2 : 24, 51.105 - arbitration : 4, 51.106 - __reserved_1 : 4; 51.107 -} __attribute__ ((packed)); 51.108 +union IO_APIC_reg_02 { 51.109 + u32 raw; 51.110 + struct { 51.111 + u32 __reserved_2 : 24, 51.112 + arbitration : 4, 51.113 + __reserved_1 : 4; 51.114 + } __attribute__ ((packed)) bits; 51.115 +}; 51.116 51.117 -struct IO_APIC_reg_03 { 51.118 - __u32 boot_DT : 1, 51.119 - __reserved_1 : 31; 51.120 -} __attribute__ ((packed)); 51.121 +union IO_APIC_reg_03 { 51.122 + u32 raw; 51.123 + struct { 51.124 + u32 boot_DT : 1, 51.125 + __reserved_1 : 31; 51.126 + } __attribute__ ((packed)) bits; 51.127 +}; 51.128 51.129 /* 51.130 * # of IO-APICs and # of IRQ routing registers 51.131 @@ -106,7 +156,7 @@ extern struct mpc_config_ioapic mp_ioapi 51.132 extern int mp_irq_entries; 51.133 51.134 /* MP IRQ source entries */ 51.135 -extern struct mpc_config_intsrc *mp_irqs; 51.136 +extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 51.137 51.138 /* non-0 if default (table-less) MP configuration */ 51.139 extern int mpc_default_type; 51.140 @@ -124,47 +174,41 @@ static inline void io_apic_write(unsigne 51.141 } 51.142 51.143 /* 51.144 - * Synchronize the IO-APIC and the CPU by doing 51.145 - * a dummy read from the IO-APIC 51.146 + * Re-write a value: to be used for read-modify-write 51.147 + * cycles where the read already set up the index register. 51.148 + * 51.149 + * Older SiS APIC requires we rewrite the index regiser 51.150 */ 51.151 -static inline void io_apic_sync(unsigned int apic) 51.152 +#define sis_apic_bug 0 /* This may need propagating from domain0. */ 51.153 +static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 51.154 { 51.155 - (void) *(IO_APIC_BASE(apic)+4); 51.156 + if (sis_apic_bug) 51.157 + *IO_APIC_BASE(apic) = reg; 51.158 + *(IO_APIC_BASE(apic)+4) = value; 51.159 } 51.160 51.161 +/* 1 if "noapic" boot option passed */ 51.162 +extern int skip_ioapic_setup; 51.163 + 51.164 /* 51.165 * If we use the IO-APIC for IRQ routing, disable automatic 51.166 * assignment of PCI IRQ's. 51.167 */ 51.168 -#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup) 51.169 +#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) 51.170 51.171 #ifdef CONFIG_ACPI_BOOT 51.172 extern int io_apic_get_unique_id (int ioapic, int apic_id); 51.173 extern int io_apic_get_version (int ioapic); 51.174 extern int io_apic_get_redir_entries (int ioapic); 51.175 extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low); 51.176 -#endif 51.177 +#endif /*CONFIG_ACPI_BOOT*/ 51.178 51.179 -extern int skip_ioapic_setup; /* 1 for "noapic" */ 51.180 +extern int (*ioapic_renumber_irq)(int ioapic, int irq); 51.181 51.182 -static inline void disable_ioapic_setup(void) 51.183 -{ 51.184 - skip_ioapic_setup = 1; 51.185 -} 51.186 - 51.187 -static inline int ioapic_setup_disabled(void) 51.188 -{ 51.189 - return skip_ioapic_setup; 51.190 -} 51.191 +#else /* !CONFIG_X86_IO_APIC */ 51.192 +#define io_apic_assign_pci_irqs 0 51.193 +#endif 51.194 51.195 extern int assign_irq_vector(int irq); 51.196 51.197 -#else /* !CONFIG_X86_IO_APIC */ 51.198 -#define io_apic_assign_pci_irqs 0 51.199 - 51.200 -static inline void disable_ioapic_setup(void) 51.201 -{ } 51.202 - 51.203 -#endif /* !CONFIG_X86_IO_APIC */ 51.204 - 51.205 #endif
52.1 --- a/xen/include/asm-x86/io_ports.h Mon May 09 14:34:59 2005 +0000 52.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 52.3 @@ -1,30 +0,0 @@ 52.4 -/* 52.5 - * based on linux-2.6.10/include/asm-i386/mach-default/io_ports.h 52.6 - * 52.7 - * Machine specific IO port address definition for generic. 52.8 - * Written by Osamu Tomita <tomita@cinet.co.jp> 52.9 - */ 52.10 -#ifndef _MACH_IO_PORTS_H 52.11 -#define _MACH_IO_PORTS_H 52.12 - 52.13 -/* i8253A PIT registers */ 52.14 -#define PIT_MODE 0x43 52.15 -#define PIT_CH0 0x40 52.16 -#define PIT_CH2 0x42 52.17 - 52.18 -/* i8259A PIC registers */ 52.19 -#define PIC_MASTER_CMD 0x20 52.20 -#define PIC_MASTER_IMR 0x21 52.21 -#define PIC_MASTER_ISR PIC_MASTER_CMD 52.22 -#define PIC_MASTER_POLL PIC_MASTER_ISR 52.23 -#define PIC_MASTER_OCW3 PIC_MASTER_ISR 52.24 -#define PIC_SLAVE_CMD 0xa0 52.25 -#define PIC_SLAVE_IMR 0xa1 52.26 - 52.27 -/* i8259A PIC related value */ 52.28 -#define PIC_CASCADE_IR 2 52.29 -#define MASTER_ICW4_DEFAULT 0x01 52.30 -#define SLAVE_ICW4_DEFAULT 0x01 52.31 -#define PIC_ICW4_AEOI 2 52.32 - 52.33 -#endif /* !_MACH_IO_PORTS_H */
53.1 --- a/xen/include/asm-x86/irq.h Mon May 09 14:34:59 2005 +0000 53.2 +++ b/xen/include/asm-x86/irq.h Mon May 09 17:50:11 2005 +0000 53.3 @@ -6,59 +6,19 @@ 53.4 #include <xen/config.h> 53.5 #include <asm/atomic.h> 53.6 #include <asm/asm_defns.h> 53.7 +#include <irq_vectors.h> 53.8 53.9 extern void disable_irq(unsigned int); 53.10 extern void disable_irq_nosync(unsigned int); 53.11 extern void enable_irq(unsigned int); 53.12 53.13 -/* 53.14 - * IDT vectors usable for external interrupt sources start 53.15 - * at 0x20: 53.16 - */ 53.17 -#define FIRST_EXTERNAL_VECTOR 0x30 53.18 - 53.19 -#define NR_IRQS (256 - FIRST_EXTERNAL_VECTOR) 53.20 - 53.21 -#define HYPERCALL_VECTOR 0x82 53.22 - 53.23 -/* 53.24 - * Vectors 0x30-0x3f are used for ISA interrupts. 53.25 - */ 53.26 - 53.27 -/* 53.28 - * Special IRQ vectors used by the SMP architecture, 0xf0-0xff 53.29 - */ 53.30 -#define SPURIOUS_APIC_VECTOR 0xff 53.31 -#define ERROR_APIC_VECTOR 0xfe 53.32 -#define INVALIDATE_TLB_VECTOR 0xfd 53.33 -#define EVENT_CHECK_VECTOR 0xfc 53.34 -#define CALL_FUNCTION_VECTOR 0xfb 53.35 -#define KDB_VECTOR 0xfa 53.36 +extern u8 irq_vector[NR_IRQ_VECTORS]; 53.37 +#define IO_APIC_VECTOR(irq) irq_vector[irq] 53.38 +#define AUTO_ASSIGN -1 53.39 53.40 -/* 53.41 - * Local APIC timer IRQ vector is on a different priority level, 53.42 - * to work around the 'lost local interrupt if more than 2 IRQ 53.43 - * sources per level' errata. 53.44 - */ 53.45 -#define LOCAL_TIMER_VECTOR 0xef 53.46 +extern void (*interrupt[NR_IRQS])(void); 53.47 53.48 -/* 53.49 - * First APIC vector available to drivers: (vectors 0x40-0xee) 53.50 - * we start at 0x41 to spread out vectors evenly between priority 53.51 - * levels. (0x82 is the hypercall vector) 53.52 - */ 53.53 -#define FIRST_DEVICE_VECTOR 0x41 53.54 -#define FIRST_SYSTEM_VECTOR 0xef 53.55 - 53.56 -extern int irq_vector[NR_IRQS]; 53.57 -#define IO_APIC_VECTOR(irq) irq_vector[irq] 53.58 - 53.59 -/* 53.60 - * Various low-level irq details needed by irq.c, process.c, 53.61 - * time.c, io_apic.c and smp.c 53.62 - * 53.63 - * Interrupt entry/exit code at both C and assembly level 53.64 - */ 53.65 +#define platform_legacy_irq(irq) ((irq) < 16) 53.66 53.67 extern void mask_irq(unsigned int irq); 53.68 extern void unmask_irq(unsigned int irq);
54.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 54.2 +++ b/xen/include/asm-x86/mach-default/apm.h Mon May 09 17:50:11 2005 +0000 54.3 @@ -0,0 +1,75 @@ 54.4 +/* 54.5 + * include/asm-i386/mach-default/apm.h 54.6 + * 54.7 + * Machine specific APM BIOS functions for generic. 54.8 + * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> 54.9 + */ 54.10 + 54.11 +#ifndef _ASM_APM_H 54.12 +#define _ASM_APM_H 54.13 + 54.14 +#ifdef APM_ZERO_SEGS 54.15 +# define APM_DO_ZERO_SEGS \ 54.16 + "pushl %%ds\n\t" \ 54.17 + "pushl %%es\n\t" \ 54.18 + "xorl %%edx, %%edx\n\t" \ 54.19 + "mov %%dx, %%ds\n\t" \ 54.20 + "mov %%dx, %%es\n\t" \ 54.21 + "mov %%dx, %%fs\n\t" \ 54.22 + "mov %%dx, %%gs\n\t" 54.23 +# define APM_DO_POP_SEGS \ 54.24 + "popl %%es\n\t" \ 54.25 + "popl %%ds\n\t" 54.26 +#else 54.27 +# define APM_DO_ZERO_SEGS 54.28 +# define APM_DO_POP_SEGS 54.29 +#endif 54.30 + 54.31 +static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, 54.32 + u32 *eax, u32 *ebx, u32 *ecx, 54.33 + u32 *edx, u32 *esi) 54.34 +{ 54.35 + /* 54.36 + * N.B. We do NOT need a cld after the BIOS call 54.37 + * because we always save and restore the flags. 54.38 + */ 54.39 + __asm__ __volatile__(APM_DO_ZERO_SEGS 54.40 + "pushl %%edi\n\t" 54.41 + "pushl %%ebp\n\t" 54.42 + "lcall *%%cs:apm_bios_entry\n\t" 54.43 + "setc %%al\n\t" 54.44 + "popl %%ebp\n\t" 54.45 + "popl %%edi\n\t" 54.46 + APM_DO_POP_SEGS 54.47 + : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx), 54.48 + "=S" (*esi) 54.49 + : "a" (func), "b" (ebx_in), "c" (ecx_in) 54.50 + : "memory", "cc"); 54.51 +} 54.52 + 54.53 +static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, 54.54 + u32 ecx_in, u32 *eax) 54.55 +{ 54.56 + int cx, dx, si; 54.57 + u8 error; 54.58 + 54.59 + /* 54.60 + * N.B. We do NOT need a cld after the BIOS call 54.61 + * because we always save and restore the flags. 54.62 + */ 54.63 + __asm__ __volatile__(APM_DO_ZERO_SEGS 54.64 + "pushl %%edi\n\t" 54.65 + "pushl %%ebp\n\t" 54.66 + "lcall *%%cs:apm_bios_entry\n\t" 54.67 + "setc %%bl\n\t" 54.68 + "popl %%ebp\n\t" 54.69 + "popl %%edi\n\t" 54.70 + APM_DO_POP_SEGS 54.71 + : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx), 54.72 + "=S" (si) 54.73 + : "a" (func), "b" (ebx_in), "c" (ecx_in) 54.74 + : "memory", "cc"); 54.75 + return error; 54.76 +} 54.77 + 54.78 +#endif /* _ASM_APM_H */
55.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 55.2 +++ b/xen/include/asm-x86/mach-default/bios_ebda.h Mon May 09 17:50:11 2005 +0000 55.3 @@ -0,0 +1,15 @@ 55.4 +#ifndef _MACH_BIOS_EBDA_H 55.5 +#define _MACH_BIOS_EBDA_H 55.6 + 55.7 +/* 55.8 + * there is a real-mode segmented pointer pointing to the 55.9 + * 4K EBDA area at 0x40E. 55.10 + */ 55.11 +static inline unsigned int get_bios_ebda(void) 55.12 +{ 55.13 + unsigned int address = *(unsigned short *)phys_to_virt(0x40E); 55.14 + address <<= 4; 55.15 + return address; /* 0 means none */ 55.16 +} 55.17 + 55.18 +#endif /* _MACH_BIOS_EBDA_H */
56.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 56.2 +++ b/xen/include/asm-x86/mach-default/do_timer.h Mon May 09 17:50:11 2005 +0000 56.3 @@ -0,0 +1,85 @@ 56.4 +/* defines for inline arch setup functions */ 56.5 + 56.6 +#include <asm/apic.h> 56.7 + 56.8 +/** 56.9 + * do_timer_interrupt_hook - hook into timer tick 56.10 + * @regs: standard registers from interrupt 56.11 + * 56.12 + * Description: 56.13 + * This hook is called immediately after the timer interrupt is ack'd. 56.14 + * It's primary purpose is to allow architectures that don't possess 56.15 + * individual per CPU clocks (like the CPU APICs supply) to broadcast the 56.16 + * timer interrupt as a means of triggering reschedules etc. 56.17 + **/ 56.18 + 56.19 +static inline void do_timer_interrupt_hook(struct pt_regs *regs) 56.20 +{ 56.21 + do_timer(regs); 56.22 +#ifndef CONFIG_SMP 56.23 + update_process_times(user_mode(regs)); 56.24 +#endif 56.25 +/* 56.26 + * In the SMP case we use the local APIC timer interrupt to do the 56.27 + * profiling, except when we simulate SMP mode on a uniprocessor 56.28 + * system, in that case we have to call the local interrupt handler. 56.29 + */ 56.30 +#ifndef CONFIG_X86_LOCAL_APIC 56.31 + profile_tick(CPU_PROFILING, regs); 56.32 +#else 56.33 + if (!using_apic_timer) 56.34 + smp_local_timer_interrupt(regs); 56.35 +#endif 56.36 +} 56.37 + 56.38 + 56.39 +/* you can safely undefine this if you don't have the Neptune chipset */ 56.40 + 56.41 +#define BUGGY_NEPTUN_TIMER 56.42 + 56.43 +/** 56.44 + * do_timer_overflow - process a detected timer overflow condition 56.45 + * @count: hardware timer interrupt count on overflow 56.46 + * 56.47 + * Description: 56.48 + * This call is invoked when the jiffies count has not incremented but 56.49 + * the hardware timer interrupt has. It means that a timer tick interrupt 56.50 + * came along while the previous one was pending, thus a tick was missed 56.51 + **/ 56.52 +static inline int do_timer_overflow(int count) 56.53 +{ 56.54 + int i; 56.55 + 56.56 + spin_lock(&i8259A_lock); 56.57 + /* 56.58 + * This is tricky when I/O APICs are used; 56.59 + * see do_timer_interrupt(). 56.60 + */ 56.61 + i = inb(0x20); 56.62 + spin_unlock(&i8259A_lock); 56.63 + 56.64 + /* assumption about timer being IRQ0 */ 56.65 + if (i & 0x01) { 56.66 + /* 56.67 + * We cannot detect lost timer interrupts ... 56.68 + * well, that's why we call them lost, don't we? :) 56.69 + * [hmm, on the Pentium and Alpha we can ... sort of] 56.70 + */ 56.71 + count -= LATCH; 56.72 + } else { 56.73 +#ifdef BUGGY_NEPTUN_TIMER 56.74 + /* 56.75 + * for the Neptun bug we know that the 'latch' 56.76 + * command doesn't latch the high and low value 56.77 + * of the counter atomically. Thus we have to 56.78 + * substract 256 from the counter 56.79 + * ... funny, isnt it? :) 56.80 + */ 56.81 + 56.82 + count -= 256; 56.83 +#else 56.84 + printk("do_slow_gettimeoffset(): hardware timer problem?\n"); 56.85 +#endif 56.86 + } 56.87 + return count; 56.88 +}
57.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 57.2 +++ b/xen/include/asm-x86/mach-default/entry_arch.h Mon May 09 17:50:11 2005 +0000 57.3 @@ -0,0 +1,34 @@ 57.4 +/* 57.5 + * This file is designed to contain the BUILD_INTERRUPT specifications for 57.6 + * all of the extra named interrupt vectors used by the architecture. 57.7 + * Usually this is the Inter Process Interrupts (IPIs) 57.8 + */ 57.9 + 57.10 +/* 57.11 + * The following vectors are part of the Linux architecture, there 57.12 + * is no hardware IRQ pin equivalent for them, they are triggered 57.13 + * through the ICC by us (IPIs) 57.14 + */ 57.15 +#ifdef CONFIG_X86_SMP 57.16 +BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 57.17 +BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) 57.18 +BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 57.19 +#endif 57.20 + 57.21 +/* 57.22 + * every pentium local APIC has two 'local interrupts', with a 57.23 + * soft-definable vector attached to both interrupts, one of 57.24 + * which is a timer interrupt, the other one is error counter 57.25 + * overflow. Linux uses the local APIC timer interrupt to get 57.26 + * a much simpler SMP time architecture: 57.27 + */ 57.28 +#ifdef CONFIG_X86_LOCAL_APIC 57.29 +BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) 57.30 +BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) 57.31 +BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) 57.32 + 57.33 +#ifdef CONFIG_X86_MCE_P4THERMAL 57.34 +BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) 57.35 +#endif 57.36 + 57.37 +#endif
58.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 58.2 +++ b/xen/include/asm-x86/mach-default/io_ports.h Mon May 09 17:50:11 2005 +0000 58.3 @@ -0,0 +1,30 @@ 58.4 +/* 58.5 + * arch/i386/mach-generic/io_ports.h 58.6 + * 58.7 + * Machine specific IO port address definition for generic. 58.8 + * Written by Osamu Tomita <tomita@cinet.co.jp> 58.9 + */ 58.10 +#ifndef _MACH_IO_PORTS_H 58.11 +#define _MACH_IO_PORTS_H 58.12 + 58.13 +/* i8253A PIT registers */ 58.14 +#define PIT_MODE 0x43 58.15 +#define PIT_CH0 0x40 58.16 +#define PIT_CH2 0x42 58.17 + 58.18 +/* i8259A PIC registers */ 58.19 +#define PIC_MASTER_CMD 0x20 58.20 +#define PIC_MASTER_IMR 0x21 58.21 +#define PIC_MASTER_ISR PIC_MASTER_CMD 58.22 +#define PIC_MASTER_POLL PIC_MASTER_ISR 58.23 +#define PIC_MASTER_OCW3 PIC_MASTER_ISR 58.24 +#define PIC_SLAVE_CMD 0xa0 58.25 +#define PIC_SLAVE_IMR 0xa1 58.26 + 58.27 +/* i8259A PIC related value */ 58.28 +#define PIC_CASCADE_IR 2 58.29 +#define MASTER_ICW4_DEFAULT 0x01 58.30 +#define SLAVE_ICW4_DEFAULT 0x01 58.31 +#define PIC_ICW4_AEOI 2 58.32 + 58.33 +#endif /* !_MACH_IO_PORTS_H */
59.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 59.2 +++ b/xen/include/asm-x86/mach-default/irq_vectors.h Mon May 09 17:50:11 2005 +0000 59.3 @@ -0,0 +1,96 @@ 59.4 +/* 59.5 + * This file should contain #defines for all of the interrupt vector 59.6 + * numbers used by this architecture. 59.7 + * 59.8 + * In addition, there are some standard defines: 59.9 + * 59.10 + * FIRST_EXTERNAL_VECTOR: 59.11 + * The first free place for external interrupts 59.12 + * 59.13 + * SYSCALL_VECTOR: 59.14 + * The IRQ vector a syscall makes the user to kernel transition 59.15 + * under. 59.16 + * 59.17 + * TIMER_IRQ: 59.18 + * The IRQ number the timer interrupt comes in at. 59.19 + * 59.20 + * NR_IRQS: 59.21 + * The total number of interrupt vectors (including all the 59.22 + * architecture specific interrupts) needed. 59.23 + * 59.24 + */ 59.25 +#ifndef _ASM_IRQ_VECTORS_H 59.26 +#define _ASM_IRQ_VECTORS_H 59.27 + 59.28 +/* 59.29 + * IDT vectors usable for external interrupt sources start 59.30 + * at 0x20: 59.31 + */ 59.32 +#define FIRST_EXTERNAL_VECTOR 0x20 59.33 + 59.34 +#define HYPERCALL_VECTOR 0x82 59.35 + 59.36 +/* 59.37 + * Vectors 0x20-0x2f are used for ISA interrupts. 59.38 + */ 59.39 + 59.40 +/* 59.41 + * Special IRQ vectors used by the SMP architecture, 0xf0-0xff 59.42 + * 59.43 + * some of the following vectors are 'rare', they are merged 59.44 + * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. 59.45 + * TLB, reschedule and local APIC vectors are performance-critical. 59.46 + * 59.47 + * Vectors 0xf0-0xfa are free (reserved for future Linux use). 59.48 + */ 59.49 +#define SPURIOUS_APIC_VECTOR 0xff 59.50 +#define ERROR_APIC_VECTOR 0xfe 59.51 +#define INVALIDATE_TLB_VECTOR 0xfd 59.52 +#define EVENT_CHECK_VECTOR 0xfc 59.53 +#define CALL_FUNCTION_VECTOR 0xfb 59.54 + 59.55 +#define THERMAL_APIC_VECTOR 0xf0 59.56 +/* 59.57 + * Local APIC timer IRQ vector is on a different priority level, 59.58 + * to work around the 'lost local interrupt if more than 2 IRQ 59.59 + * sources per level' errata. 59.60 + */ 59.61 +#define LOCAL_TIMER_VECTOR 0xef 59.62 + 59.63 +/* 59.64 + * First APIC vector available to drivers: (vectors 0x30-0xee) 59.65 + * we start at 0x31 to spread out vectors evenly between priority 59.66 + * levels. (0x80 is the syscall vector) 59.67 + */ 59.68 +#define FIRST_DEVICE_VECTOR 0x31 59.69 +#define FIRST_SYSTEM_VECTOR 0xef 59.70 + 59.71 +#define TIMER_IRQ 0 59.72 + 59.73 +/* 59.74 + * 16 8259A IRQ's, 208 potential APIC interrupt sources. 59.75 + * Right now the APIC is mostly only used for SMP. 59.76 + * 256 vectors is an architectural limit. (we can have 59.77 + * more than 256 devices theoretically, but they will 59.78 + * have to use shared interrupts) 59.79 + * Since vectors 0x00-0x1f are used/reserved for the CPU, 59.80 + * the usable vector space is 0x20-0xff (224 vectors) 59.81 + */ 59.82 + 59.83 +/* 59.84 + * The maximum number of vectors supported by i386 processors 59.85 + * is limited to 256. For processors other than i386, NR_VECTORS 59.86 + * should be changed accordingly. 59.87 + */ 59.88 +#define NR_VECTORS 256 59.89 + 59.90 +#include "irq_vectors_limits.h" 59.91 + 59.92 +#define FPU_IRQ 13 59.93 + 59.94 +#define FIRST_VM86_IRQ 3 59.95 +#define LAST_VM86_IRQ 15 59.96 +#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) 59.97 + 59.98 + 59.99 +#endif /* _ASM_IRQ_VECTORS_H */
60.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 60.2 +++ b/xen/include/asm-x86/mach-default/irq_vectors_limits.h Mon May 09 17:50:11 2005 +0000 60.3 @@ -0,0 +1,21 @@ 60.4 +#ifndef _ASM_IRQ_VECTORS_LIMITS_H 60.5 +#define _ASM_IRQ_VECTORS_LIMITS_H 60.6 + 60.7 +#ifdef CONFIG_PCI_MSI 60.8 +#define NR_IRQS FIRST_SYSTEM_VECTOR 60.9 +#define NR_IRQ_VECTORS NR_IRQS 60.10 +#else 60.11 +#ifdef CONFIG_X86_IO_APIC 60.12 +#define NR_IRQS 224 60.13 +# if (224 >= 32 * NR_CPUS) 60.14 +# define NR_IRQ_VECTORS NR_IRQS 60.15 +# else 60.16 +# define NR_IRQ_VECTORS (32 * NR_CPUS) 60.17 +# endif 60.18 +#else 60.19 +#define NR_IRQS 16 60.20 +#define NR_IRQ_VECTORS NR_IRQS 60.21 +#endif 60.22 +#endif 60.23 + 60.24 +#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
61.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 61.2 +++ b/xen/include/asm-x86/mach-default/mach_apic.h Mon May 09 17:50:11 2005 +0000 61.3 @@ -0,0 +1,133 @@ 61.4 +#ifndef __ASM_MACH_APIC_H 61.5 +#define __ASM_MACH_APIC_H 61.6 + 61.7 +#include <mach_apicdef.h> 61.8 +#include <asm/smp.h> 61.9 + 61.10 +#define APIC_DFR_VALUE (APIC_DFR_FLAT) 61.11 + 61.12 +static inline cpumask_t target_cpus(void) 61.13 +{ 61.14 +#ifdef CONFIG_SMP 61.15 + return cpu_online_map; 61.16 +#else 61.17 + return cpumask_of_cpu(0); 61.18 +#endif 61.19 +} 61.20 +#define TARGET_CPUS (target_cpus()) 61.21 + 61.22 +#define NO_BALANCE_IRQ (0) 61.23 +#define esr_disable (0) 61.24 + 61.25 +#define NO_IOAPIC_CHECK (0) 61.26 + 61.27 +#define INT_DELIVERY_MODE dest_LowestPrio 61.28 +#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ 61.29 + 61.30 +static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) 61.31 +{ 61.32 + return physid_isset(apicid, bitmap); 61.33 +} 61.34 + 61.35 +static inline unsigned long check_apicid_present(int bit) 61.36 +{ 61.37 + return physid_isset(bit, phys_cpu_present_map); 61.38 +} 61.39 + 61.40 +/* 61.41 + * Set up the logical destination ID. 61.42 + * 61.43 + * Intel recommends to set DFR, LDR and TPR before enabling 61.44 + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel 61.45 + * document number 292116). So here it goes... 61.46 + */ 61.47 +static inline void init_apic_ldr(void) 61.48 +{ 61.49 + unsigned long val; 61.50 + 61.51 + apic_write_around(APIC_DFR, APIC_DFR_VALUE); 61.52 + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; 61.53 + val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); 61.54 + apic_write_around(APIC_LDR, val); 61.55 +} 61.56 + 61.57 +static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) 61.58 +{ 61.59 + return phys_map; 61.60 +} 61.61 + 61.62 +static inline void clustered_apic_check(void) 61.63 +{ 61.64 + printk("Enabling APIC mode: %s. Using %d I/O APICs\n", 61.65 + "Flat", nr_ioapics); 61.66 +} 61.67 + 61.68 +static inline int multi_timer_check(int apic, int irq) 61.69 +{ 61.70 + return 0; 61.71 +} 61.72 + 61.73 +static inline int apicid_to_node(int logical_apicid) 61.74 +{ 61.75 + return 0; 61.76 +} 61.77 + 61.78 +/* Mapping from cpu number to logical apicid */ 61.79 +static inline int cpu_to_logical_apicid(int cpu) 61.80 +{ 61.81 + return 1 << cpu; 61.82 +} 61.83 + 61.84 +static inline int cpu_present_to_apicid(int mps_cpu) 61.85 +{ 61.86 + if (mps_cpu < get_physical_broadcast()) 61.87 + return mps_cpu; 61.88 + else 61.89 + return BAD_APICID; 61.90 +} 61.91 + 61.92 +static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) 61.93 +{ 61.94 + return physid_mask_of_physid(phys_apicid); 61.95 +} 61.96 + 61.97 +static inline int mpc_apic_id(struct mpc_config_processor *m, 61.98 + struct mpc_config_translation *translation_record) 61.99 +{ 61.100 + printk("Processor #%d %d:%d APIC version %d\n", 61.101 + m->mpc_apicid, 61.102 + (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, 61.103 + (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, 61.104 + m->mpc_apicver); 61.105 + return (m->mpc_apicid); 61.106 +} 61.107 + 61.108 +static inline void setup_portio_remap(void) 61.109 +{ 61.110 +} 61.111 + 61.112 +static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) 61.113 +{ 61.114 + return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); 61.115 +} 61.116 + 61.117 +static inline int apic_id_registered(void) 61.118 +{ 61.119 + return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); 61.120 +} 61.121 + 61.122 +static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 61.123 +{ 61.124 + return cpus_addr(cpumask)[0]; 61.125 +} 61.126 + 61.127 +static inline void enable_apic_mode(void) 61.128 +{ 61.129 +} 61.130 + 61.131 +static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) 61.132 +{ 61.133 + return cpuid_apic >> index_msb; 61.134 +} 61.135 + 61.136 +#endif /* __ASM_MACH_APIC_H */
62.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 62.2 +++ b/xen/include/asm-x86/mach-default/mach_apicdef.h Mon May 09 17:50:11 2005 +0000 62.3 @@ -0,0 +1,13 @@ 62.4 +#ifndef __ASM_MACH_APICDEF_H 62.5 +#define __ASM_MACH_APICDEF_H 62.6 + 62.7 +#define APIC_ID_MASK (0xF<<24) 62.8 + 62.9 +static inline unsigned get_apic_id(unsigned long x) 62.10 +{ 62.11 + return (((x)>>24)&0xF); 62.12 +} 62.13 + 62.14 +#define GET_APIC_ID(x) get_apic_id(x) 62.15 + 62.16 +#endif
63.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 63.2 +++ b/xen/include/asm-x86/mach-default/mach_ipi.h Mon May 09 17:50:11 2005 +0000 63.3 @@ -0,0 +1,30 @@ 63.4 +#ifndef __ASM_MACH_IPI_H 63.5 +#define __ASM_MACH_IPI_H 63.6 + 63.7 +void send_IPI_mask_bitmask(cpumask_t mask, int vector); 63.8 +void __send_IPI_shortcut(unsigned int shortcut, int vector); 63.9 + 63.10 +static inline void send_IPI_mask(cpumask_t mask, int vector) 63.11 +{ 63.12 + send_IPI_mask_bitmask(mask, vector); 63.13 +} 63.14 + 63.15 +static inline void send_IPI_allbutself(int vector) 63.16 +{ 63.17 + /* 63.18 + * if there are no other CPUs in the system then we get an APIC send 63.19 + * error if we try to broadcast, thus avoid sending IPIs in this case. 63.20 + */ 63.21 + if (!(num_online_cpus() > 1)) 63.22 + return; 63.23 + 63.24 + __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); 63.25 + return; 63.26 +} 63.27 + 63.28 +static inline void send_IPI_all(int vector) 63.29 +{ 63.30 + __send_IPI_shortcut(APIC_DEST_ALLINC, vector); 63.31 +} 63.32 + 63.33 +#endif /* __ASM_MACH_IPI_H */
64.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 64.2 +++ b/xen/include/asm-x86/mach-default/mach_mpparse.h Mon May 09 17:50:11 2005 +0000 64.3 @@ -0,0 +1,28 @@ 64.4 +#ifndef __ASM_MACH_MPPARSE_H 64.5 +#define __ASM_MACH_MPPARSE_H 64.6 + 64.7 +static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 64.8 + struct mpc_config_translation *translation) 64.9 +{ 64.10 +// Dprintk("Bus #%d is %s\n", m->mpc_busid, name); 64.11 +} 64.12 + 64.13 +static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, 64.14 + struct mpc_config_translation *translation) 64.15 +{ 64.16 +} 64.17 + 64.18 +static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 64.19 + char *productid) 64.20 +{ 64.21 + return 0; 64.22 +} 64.23 + 64.24 +/* Hook from generic ACPI tables.c */ 64.25 +static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) 64.26 +{ 64.27 + return 0; 64.28 +} 64.29 + 64.30 + 64.31 +#endif /* __ASM_MACH_MPPARSE_H */
65.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 65.2 +++ b/xen/include/asm-x86/mach-default/mach_mpspec.h Mon May 09 17:50:11 2005 +0000 65.3 @@ -0,0 +1,8 @@ 65.4 +#ifndef __ASM_MACH_MPSPEC_H 65.5 +#define __ASM_MACH_MPSPEC_H 65.6 + 65.7 +#define MAX_IRQ_SOURCES 256 65.8 + 65.9 +#define MAX_MP_BUSSES 32 65.10 + 65.11 +#endif /* __ASM_MACH_MPSPEC_H */
66.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 66.2 +++ b/xen/include/asm-x86/mach-default/mach_reboot.h Mon May 09 17:50:11 2005 +0000 66.3 @@ -0,0 +1,30 @@ 66.4 +/* 66.5 + * arch/i386/mach-generic/mach_reboot.h 66.6 + * 66.7 + * Machine specific reboot functions for generic. 66.8 + * Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp> 66.9 + */ 66.10 +#ifndef _MACH_REBOOT_H 66.11 +#define _MACH_REBOOT_H 66.12 + 66.13 +static inline void kb_wait(void) 66.14 +{ 66.15 + int i; 66.16 + 66.17 + for (i = 0; i < 0x10000; i++) 66.18 + if ((inb_p(0x64) & 0x02) == 0) 66.19 + break; 66.20 +} 66.21 + 66.22 +static inline void mach_reboot(void) 66.23 +{ 66.24 + int i; 66.25 + for (i = 0; i < 100; i++) { 66.26 + kb_wait(); 66.27 + udelay(50); 66.28 + outb(0xfe, 0x64); /* pulse reset low */ 66.29 + udelay(50); 66.30 + } 66.31 +} 66.32 + 66.33 +#endif /* !_MACH_REBOOT_H */
67.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 67.2 +++ b/xen/include/asm-x86/mach-default/mach_time.h Mon May 09 17:50:11 2005 +0000 67.3 @@ -0,0 +1,122 @@ 67.4 +/* 67.5 + * include/asm-i386/mach-default/mach_time.h 67.6 + * 67.7 + * Machine specific set RTC function for generic. 67.8 + * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp> 67.9 + */ 67.10 +#ifndef _MACH_TIME_H 67.11 +#define _MACH_TIME_H 67.12 + 67.13 +#include <linux/mc146818rtc.h> 67.14 + 67.15 +/* for check timing call set_rtc_mmss() 500ms */ 67.16 +/* used in arch/i386/time.c::do_timer_interrupt() */ 67.17 +#define USEC_AFTER 500000 67.18 +#define USEC_BEFORE 500000 67.19 + 67.20 +/* 67.21 + * In order to set the CMOS clock precisely, set_rtc_mmss has to be 67.22 + * called 500 ms after the second nowtime has started, because when 67.23 + * nowtime is written into the registers of the CMOS clock, it will 67.24 + * jump to the next second precisely 500 ms later. Check the Motorola 67.25 + * MC146818A or Dallas DS12887 data sheet for details. 67.26 + * 67.27 + * BUG: This routine does not handle hour overflow properly; it just 67.28 + * sets the minutes. Usually you'll only notice that after reboot! 67.29 + */ 67.30 +static inline int mach_set_rtc_mmss(unsigned long nowtime) 67.31 +{ 67.32 + int retval = 0; 67.33 + int real_seconds, real_minutes, cmos_minutes; 67.34 + unsigned char save_control, save_freq_select; 67.35 + 67.36 + save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ 67.37 + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); 67.38 + 67.39 + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ 67.40 + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); 67.41 + 67.42 + cmos_minutes = CMOS_READ(RTC_MINUTES); 67.43 + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 67.44 + BCD_TO_BIN(cmos_minutes); 67.45 + 67.46 + /* 67.47 + * since we're only adjusting minutes and seconds, 67.48 + * don't interfere with hour overflow. This avoids 67.49 + * messing with unknown time zones but requires your 67.50 + * RTC not to be off by more than 15 minutes 67.51 + */ 67.52 + real_seconds = nowtime % 60; 67.53 + real_minutes = nowtime / 60; 67.54 + if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) 67.55 + real_minutes += 30; /* correct for half hour time zone */ 67.56 + real_minutes %= 60; 67.57 + 67.58 + if (abs(real_minutes - cmos_minutes) < 30) { 67.59 + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { 67.60 + BIN_TO_BCD(real_seconds); 67.61 + BIN_TO_BCD(real_minutes); 67.62 + } 67.63 + CMOS_WRITE(real_seconds,RTC_SECONDS); 67.64 + CMOS_WRITE(real_minutes,RTC_MINUTES); 67.65 + } else { 67.66 + printk(KERN_WARNING 67.67 + "set_rtc_mmss: can't update from %d to %d\n", 67.68 + cmos_minutes, real_minutes); 67.69 + retval = -1; 67.70 + } 67.71 + 67.72 + /* The following flags have to be released exactly in this order, 67.73 + * otherwise the DS12887 (popular MC146818A clone with integrated 67.74 + * battery and quartz) will not reset the oscillator and will not 67.75 + * update precisely 500 ms later. You won't find this mentioned in 67.76 + * the Dallas Semiconductor data sheets, but who believes data 67.77 + * sheets anyway ... -- Markus Kuhn 67.78 + */ 67.79 + CMOS_WRITE(save_control, RTC_CONTROL); 67.80 + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 67.81 + 67.82 + return retval; 67.83 +} 67.84 + 67.85 +static inline unsigned long mach_get_cmos_time(void) 67.86 +{ 67.87 + unsigned int year, mon, day, hour, min, sec; 67.88 + int i; 67.89 + 67.90 + /* The Linux interpretation of the CMOS clock register contents: 67.91 + * When the Update-In-Progress (UIP) flag goes from 1 to 0, the 67.92 + * RTC registers show the second which has precisely just started. 67.93 + * Let's hope other operating systems interpret the RTC the same way. 67.94 + */ 67.95 + /* read RTC exactly on falling edge of update flag */ 67.96 + for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ 67.97 + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) 67.98 + break; 67.99 + for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ 67.100 + if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) 67.101 + break; 67.102 + do { /* Isn't this overkill ? UIP above should guarantee consistency */ 67.103 + sec = CMOS_READ(RTC_SECONDS); 67.104 + min = CMOS_READ(RTC_MINUTES); 67.105 + hour = CMOS_READ(RTC_HOURS); 67.106 + day = CMOS_READ(RTC_DAY_OF_MONTH); 67.107 + mon = CMOS_READ(RTC_MONTH); 67.108 + year = CMOS_READ(RTC_YEAR); 67.109 + } while (sec != CMOS_READ(RTC_SECONDS)); 67.110 + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 67.111 + { 67.112 + BCD_TO_BIN(sec); 67.113 + BCD_TO_BIN(min); 67.114 + BCD_TO_BIN(hour); 67.115 + BCD_TO_BIN(day); 67.116 + BCD_TO_BIN(mon); 67.117 + BCD_TO_BIN(year); 67.118 + } 67.119 + if ((year += 1900) < 1970) 67.120 + year += 100; 67.121 + 67.122 + return mktime(year, mon, day, hour, min, sec); 67.123 +} 67.124 + 67.125 +#endif /* !_MACH_TIME_H */
68.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 68.2 +++ b/xen/include/asm-x86/mach-default/mach_timer.h Mon May 09 17:50:11 2005 +0000 68.3 @@ -0,0 +1,48 @@ 68.4 +/* 68.5 + * include/asm-i386/mach-default/mach_timer.h 68.6 + * 68.7 + * Machine specific calibrate_tsc() for generic. 68.8 + * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp> 68.9 + */ 68.10 +/* ------ Calibrate the TSC ------- 68.11 + * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset(). 68.12 + * Too much 64-bit arithmetic here to do this cleanly in C, and for 68.13 + * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2) 68.14 + * output busy loop as low as possible. We avoid reading the CTC registers 68.15 + * directly because of the awkward 8-bit access mechanism of the 82C54 68.16 + * device. 68.17 + */ 68.18 +#ifndef _MACH_TIMER_H 68.19 +#define _MACH_TIMER_H 68.20 + 68.21 +#define CALIBRATE_LATCH (5 * LATCH) 68.22 + 68.23 +static inline void mach_prepare_counter(void) 68.24 +{ 68.25 + /* Set the Gate high, disable speaker */ 68.26 + outb((inb(0x61) & ~0x02) | 0x01, 0x61); 68.27 + 68.28 + /* 68.29 + * Now let's take care of CTC channel 2 68.30 + * 68.31 + * Set the Gate high, program CTC channel 2 for mode 0, 68.32 + * (interrupt on terminal count mode), binary count, 68.33 + * load 5 * LATCH count, (LSB and MSB) to begin countdown. 68.34 + * 68.35 + * Some devices need a delay here. 68.36 + */ 68.37 + outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ 68.38 + outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ 68.39 + outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ 68.40 +} 68.41 + 68.42 +static inline void mach_countup(unsigned long *count_p) 68.43 +{ 68.44 + unsigned long count = 0; 68.45 + do { 68.46 + count++; 68.47 + } while ((inb_p(0x61) & 0x20) == 0); 68.48 + *count_p = count; 68.49 +} 68.50 + 68.51 +#endif /* !_MACH_TIMER_H */
69.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 69.2 +++ b/xen/include/asm-x86/mach-default/mach_traps.h Mon May 09 17:50:11 2005 +0000 69.3 @@ -0,0 +1,29 @@ 69.4 +/* 69.5 + * include/asm-i386/mach-default/mach_traps.h 69.6 + * 69.7 + * Machine specific NMI handling for generic. 69.8 + * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> 69.9 + */ 69.10 +#ifndef _MACH_TRAPS_H 69.11 +#define _MACH_TRAPS_H 69.12 + 69.13 +static inline void clear_mem_error(unsigned char reason) 69.14 +{ 69.15 + reason = (reason & 0xf) | 4; 69.16 + outb(reason, 0x61); 69.17 +} 69.18 + 69.19 +static inline unsigned char get_nmi_reason(void) 69.20 +{ 69.21 + return inb(0x61); 69.22 +} 69.23 + 69.24 +static inline void reassert_nmi(void) 69.25 +{ 69.26 + outb(0x8f, 0x70); 69.27 + inb(0x71); /* dummy */ 69.28 + outb(0x0f, 0x70); 69.29 + inb(0x71); /* dummy */ 69.30 +} 69.31 + 69.32 +#endif /* !_MACH_TRAPS_H */
70.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 70.2 +++ b/xen/include/asm-x86/mach-default/mach_wakecpu.h Mon May 09 17:50:11 2005 +0000 70.3 @@ -0,0 +1,41 @@ 70.4 +#ifndef __ASM_MACH_WAKECPU_H 70.5 +#define __ASM_MACH_WAKECPU_H 70.6 + 70.7 +/* 70.8 + * This file copes with machines that wakeup secondary CPUs by the 70.9 + * INIT, INIT, STARTUP sequence. 70.10 + */ 70.11 + 70.12 +#define WAKE_SECONDARY_VIA_INIT 70.13 + 70.14 +#define TRAMPOLINE_LOW phys_to_virt(0x467) 70.15 +#define TRAMPOLINE_HIGH phys_to_virt(0x469) 70.16 + 70.17 +#define boot_cpu_apicid boot_cpu_physical_apicid 70.18 + 70.19 +static inline void wait_for_init_deassert(atomic_t *deassert) 70.20 +{ 70.21 + while (!atomic_read(deassert)); 70.22 + return; 70.23 +} 70.24 + 70.25 +/* Nothing to do for most platforms, since cleared by the INIT cycle */ 70.26 +static inline void smp_callin_clear_local_apic(void) 70.27 +{ 70.28 +} 70.29 + 70.30 +static inline void store_NMI_vector(unsigned short *high, unsigned short *low) 70.31 +{ 70.32 +} 70.33 + 70.34 +static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) 70.35 +{ 70.36 +} 70.37 + 70.38 +#if APIC_DEBUG 70.39 + #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) 70.40 +#else 70.41 + #define inquire_remote_apic(apicid) {} 70.42 +#endif 70.43 + 70.44 +#endif /* __ASM_MACH_WAKECPU_H */
71.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 71.2 +++ b/xen/include/asm-x86/mach-default/pci-functions.h Mon May 09 17:50:11 2005 +0000 71.3 @@ -0,0 +1,19 @@ 71.4 +/* 71.5 + * PCI BIOS function numbering for conventional PCI BIOS 71.6 + * systems 71.7 + */ 71.8 + 71.9 +#define PCIBIOS_PCI_FUNCTION_ID 0xb1XX 71.10 +#define PCIBIOS_PCI_BIOS_PRESENT 0xb101 71.11 +#define PCIBIOS_FIND_PCI_DEVICE 0xb102 71.12 +#define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103 71.13 +#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106 71.14 +#define PCIBIOS_READ_CONFIG_BYTE 0xb108 71.15 +#define PCIBIOS_READ_CONFIG_WORD 0xb109 71.16 +#define PCIBIOS_READ_CONFIG_DWORD 0xb10a 71.17 +#define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b 71.18 +#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c 71.19 +#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d 71.20 +#define PCIBIOS_GET_ROUTING_OPTIONS 0xb10e 71.21 +#define PCIBIOS_SET_PCI_HW_INT 0xb10f 71.22 +
72.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 72.2 +++ b/xen/include/asm-x86/mach-default/setup_arch_post.h Mon May 09 17:50:11 2005 +0000 72.3 @@ -0,0 +1,40 @@ 72.4 +/** 72.5 + * machine_specific_memory_setup - Hook for machine specific memory setup. 72.6 + * 72.7 + * Description: 72.8 + * This is included late in kernel/setup.c so that it can make 72.9 + * use of all of the static functions. 72.10 + **/ 72.11 + 72.12 +static char * __init machine_specific_memory_setup(void) 72.13 +{ 72.14 + char *who; 72.15 + 72.16 + 72.17 + who = "BIOS-e820"; 72.18 + 72.19 + /* 72.20 + * Try to copy the BIOS-supplied E820-map. 72.21 + * 72.22 + * Otherwise fake a memory map; one section from 0k->640k, 72.23 + * the next section from 1mb->appropriate_mem_k 72.24 + */ 72.25 + sanitize_e820_map(E820_MAP, &E820_MAP_NR); 72.26 + if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) { 72.27 + unsigned long mem_size; 72.28 + 72.29 + /* compare results from other methods and take the greater */ 72.30 + if (ALT_MEM_K < EXT_MEM_K) { 72.31 + mem_size = EXT_MEM_K; 72.32 + who = "BIOS-88"; 72.33 + } else { 72.34 + mem_size = ALT_MEM_K; 72.35 + who = "BIOS-e801"; 72.36 + } 72.37 + 72.38 + e820.nr_map = 0; 72.39 + add_memory_region(0, LOWMEMSIZE(), E820_RAM); 72.40 + add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM); 72.41 + } 72.42 + return who; 72.43 +}
73.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 73.2 +++ b/xen/include/asm-x86/mach-default/setup_arch_pre.h Mon May 09 17:50:11 2005 +0000 73.3 @@ -0,0 +1,5 @@ 73.4 +/* Hook to call BIOS initialisation function */ 73.5 + 73.6 +/* no action for generic */ 73.7 + 73.8 +#define ARCH_SETUP
74.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 74.2 +++ b/xen/include/asm-x86/mach-default/smpboot_hooks.h Mon May 09 17:50:11 2005 +0000 74.3 @@ -0,0 +1,44 @@ 74.4 +/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws 74.5 + * which needs to alter them. */ 74.6 + 74.7 +static inline void smpboot_clear_io_apic_irqs(void) 74.8 +{ 74.9 + io_apic_irqs = 0; 74.10 +} 74.11 + 74.12 +static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) 74.13 +{ 74.14 + CMOS_WRITE(0xa, 0xf); 74.15 + local_flush_tlb(); 74.16 + Dprintk("1.\n"); 74.17 + *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; 74.18 + Dprintk("2.\n"); 74.19 + *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; 74.20 + Dprintk("3.\n"); 74.21 +} 74.22 + 74.23 +static inline void smpboot_restore_warm_reset_vector(void) 74.24 +{ 74.25 + /* 74.26 + * Install writable page 0 entry to set BIOS data area. 74.27 + */ 74.28 + local_flush_tlb(); 74.29 + 74.30 + /* 74.31 + * Paranoid: Set warm reset code and vector here back 74.32 + * to default values. 74.33 + */ 74.34 + CMOS_WRITE(0, 0xf); 74.35 + 74.36 + *((volatile long *) phys_to_virt(0x467)) = 0; 74.37 +} 74.38 + 74.39 +static inline void smpboot_setup_io_apic(void) 74.40 +{ 74.41 + /* 74.42 + * Here we can be sure that there is an IO-APIC in the system. Let's 74.43 + * go and set it up: 74.44 + */ 74.45 + if (!skip_ioapic_setup && nr_ioapics) 74.46 + setup_IO_APIC(); 74.47 +}
75.1 --- a/xen/include/asm-x86/mach_apic.h Mon May 09 14:34:59 2005 +0000 75.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 75.3 @@ -1,32 +0,0 @@ 75.4 -/* 75.5 - * based on linux-2.6.10/include/asm-i386/mach-default/mach_apic.h 75.6 - * 75.7 - */ 75.8 -#ifndef __ASM_MACH_APIC_H 75.9 -#define __ASM_MACH_APIC_H 75.10 - 75.11 -#define APIC_DFR_VALUE (APIC_DFR_FLAT) 75.12 -#define esr_disable (0) 75.13 - 75.14 -/* 75.15 - * Set up the logical destination ID. 75.16 - * 75.17 - * Intel recommends to set DFR, LDR and TPR before enabling 75.18 - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel 75.19 - * document number 292116). So here it goes... 75.20 - */ 75.21 -static inline void init_apic_ldr(void) 75.22 -{ 75.23 - unsigned long val; 75.24 - 75.25 - apic_write_around(APIC_DFR, APIC_DFR_VALUE); 75.26 - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; 75.27 - val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); 75.28 - apic_write_around(APIC_LDR, val); 75.29 -} 75.30 - 75.31 -static inline int apic_id_registered(void) 75.32 -{ 75.33 - return test_bit(GET_APIC_ID(apic_read(APIC_ID)), &phys_cpu_present_map); 75.34 -} 75.35 -#endif /* __ASM_MACH_APIC_H */
76.1 --- a/xen/include/asm-x86/mpspec.h Mon May 09 14:34:59 2005 +0000 76.2 +++ b/xen/include/asm-x86/mpspec.h Mon May 09 17:50:11 2005 +0000 76.3 @@ -1,242 +1,84 @@ 76.4 #ifndef __ASM_MPSPEC_H 76.5 #define __ASM_MPSPEC_H 76.6 76.7 -#include <xen/config.h> 76.8 -#include <xen/types.h> 76.9 - 76.10 -/* 76.11 - * Structure definitions for SMP machines following the 76.12 - * Intel Multiprocessing Specification 1.1 and 1.4. 76.13 - */ 76.14 - 76.15 -/* 76.16 - * This tag identifies where the SMP configuration 76.17 - * information is. 76.18 - */ 76.19 - 76.20 -#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') 76.21 - 76.22 -/* 76.23 - * a maximum of 16 APICs with the current APIC ID architecture. 76.24 - * xAPICs can have up to 256. SAPICs have 16 ID bits. 76.25 - */ 76.26 -#ifdef CONFIG_X86_CLUSTERED_APIC 76.27 -#define MAX_APICS 256 76.28 -#else 76.29 -#define MAX_APICS 16 76.30 -#endif 76.31 - 76.32 -#define MAX_MPC_ENTRY 1024 76.33 - 76.34 -struct intel_mp_floating 76.35 -{ 76.36 - char mpf_signature[4]; /* "_MP_" */ 76.37 - unsigned int mpf_physptr; /* Configuration table address */ 76.38 - unsigned char mpf_length; /* Our length (paragraphs) */ 76.39 - unsigned char mpf_specification;/* Specification version */ 76.40 - unsigned char mpf_checksum; /* Checksum (makes sum 0) */ 76.41 - unsigned char mpf_feature1; /* Standard or configuration ? */ 76.42 - unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ 76.43 - unsigned char mpf_feature3; /* Unused (0) */ 76.44 - unsigned char mpf_feature4; /* Unused (0) */ 76.45 - unsigned char mpf_feature5; /* Unused (0) */ 76.46 -}; 76.47 - 76.48 -struct mp_config_table 76.49 -{ 76.50 - char mpc_signature[4]; 76.51 -#define MPC_SIGNATURE "PCMP" 76.52 - unsigned short mpc_length; /* Size of table */ 76.53 - char mpc_spec; /* 0x01 */ 76.54 - char mpc_checksum; 76.55 - char mpc_oem[8]; 76.56 - char mpc_productid[12]; 76.57 - unsigned int mpc_oemptr; /* 0 if not present */ 76.58 - unsigned short mpc_oemsize; /* 0 if not present */ 76.59 - unsigned short mpc_oemcount; 76.60 - unsigned int mpc_lapic; /* APIC address */ 76.61 - unsigned int reserved; 76.62 -}; 76.63 - 76.64 -/* Followed by entries */ 76.65 - 76.66 -#define MP_PROCESSOR 0 76.67 -#define MP_BUS 1 76.68 -#define MP_IOAPIC 2 76.69 -#define MP_INTSRC 3 76.70 -#define MP_LINTSRC 4 76.71 -#define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */ 76.72 - 76.73 -struct mpc_config_processor 76.74 -{ 76.75 - unsigned char mpc_type; 76.76 - unsigned char mpc_apicid; /* Local APIC number */ 76.77 - unsigned char mpc_apicver; /* Its versions */ 76.78 - unsigned char mpc_cpuflag; 76.79 -#define CPU_ENABLED 1 /* Processor is available */ 76.80 -#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ 76.81 - unsigned int mpc_cpufeature; 76.82 -#define CPU_STEPPING_MASK 0x0F 76.83 -#define CPU_MODEL_MASK 0xF0 76.84 -#define CPU_FAMILY_MASK 0xF00 76.85 - unsigned int mpc_featureflag; /* CPUID feature value */ 76.86 - unsigned int mpc_reserved[2]; 76.87 -}; 76.88 - 76.89 -struct mpc_config_bus 76.90 -{ 76.91 - unsigned char mpc_type; 76.92 - unsigned char mpc_busid; 76.93 - unsigned char mpc_bustype[6] __attribute((packed)); 76.94 -}; 76.95 +#include <xen/cpumask.h> 76.96 +#include <asm/mpspec_def.h> 76.97 +#include <mach_mpspec.h> 76.98 76.99 -/* List of Bus Type string values, Intel MP Spec. */ 76.100 -#define BUSTYPE_EISA "EISA" 76.101 -#define BUSTYPE_ISA "ISA" 76.102 -#define BUSTYPE_INTERN "INTERN" /* Internal BUS */ 76.103 -#define BUSTYPE_MCA "MCA" 76.104 -#define BUSTYPE_VL "VL" /* Local bus */ 76.105 -#define BUSTYPE_PCI "PCI" 76.106 -#define BUSTYPE_PCMCIA "PCMCIA" 76.107 -#define BUSTYPE_CBUS "CBUS" 76.108 -#define BUSTYPE_CBUSII "CBUSII" 76.109 -#define BUSTYPE_FUTURE "FUTURE" 76.110 -#define BUSTYPE_MBI "MBI" 76.111 -#define BUSTYPE_MBII "MBII" 76.112 -#define BUSTYPE_MPI "MPI" 76.113 -#define BUSTYPE_MPSA "MPSA" 76.114 -#define BUSTYPE_NUBUS "NUBUS" 76.115 -#define BUSTYPE_TC "TC" 76.116 -#define BUSTYPE_VME "VME" 76.117 -#define BUSTYPE_XPRESS "XPRESS" 76.118 - 76.119 -struct mpc_config_ioapic 76.120 -{ 76.121 - unsigned char mpc_type; 76.122 - unsigned char mpc_apicid; 76.123 - unsigned char mpc_apicver; 76.124 - unsigned char mpc_flags; 76.125 -#define MPC_APIC_USABLE 0x01 76.126 - unsigned int mpc_apicaddr; 76.127 -}; 76.128 - 76.129 -struct mpc_config_intsrc 76.130 -{ 76.131 - unsigned char mpc_type; 76.132 - unsigned char mpc_irqtype; 76.133 - unsigned short mpc_irqflag; 76.134 - unsigned char mpc_srcbus; 76.135 - unsigned char mpc_srcbusirq; 76.136 - unsigned char mpc_dstapic; 76.137 - unsigned char mpc_dstirq; 76.138 -}; 76.139 - 76.140 -enum mp_irq_source_types { 76.141 - mp_INT = 0, 76.142 - mp_NMI = 1, 76.143 - mp_SMI = 2, 76.144 - mp_ExtINT = 3 76.145 -}; 76.146 - 76.147 -#define MP_IRQDIR_DEFAULT 0 76.148 -#define MP_IRQDIR_HIGH 1 76.149 -#define MP_IRQDIR_LOW 3 76.150 - 76.151 - 76.152 -struct mpc_config_lintsrc 76.153 -{ 76.154 - unsigned char mpc_type; 76.155 - unsigned char mpc_irqtype; 76.156 - unsigned short mpc_irqflag; 76.157 - unsigned char mpc_srcbusid; 76.158 - unsigned char mpc_srcbusirq; 76.159 - unsigned char mpc_destapic; 76.160 -#define MP_APIC_ALL 0xFF 76.161 - unsigned char mpc_destapiclint;