ia64/xen-unstable

changeset 17888:0ed5e1c8c778

[IA64] import linux-2.6.25-rc4 acpi related files.

import acpi.c, acpi_numa.c and acpi.h
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Wed Jun 11 15:39:41 2008 +0900 (2008-06-11)
parents cddaea5d6528
children 220f3d62e272
files xen/arch/ia64/linux-xen/README.origin xen/arch/ia64/linux-xen/acpi.c xen/arch/ia64/linux-xen/acpi_numa.c xen/include/asm-ia64/linux-xen/asm/README.origin xen/include/asm-ia64/linux-xen/asm/acpi.h
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/README.origin	Wed Jun 11 15:38:00 2008 +0900
     1.2 +++ b/xen/arch/ia64/linux-xen/README.origin	Wed Jun 11 15:39:41 2008 +0900
     1.3 @@ -44,3 +44,7 @@ perfmon_montecito.h	-> linux/arch/kernel
     1.4  # The files below are from Linux-2.6.21
     1.5  efi.c			-> linux/arch/ia64/kernel/efi.c
     1.6  pal.S			-> linux/arch/ia64/kernel/pal.S
     1.7 +
     1.8 +# The files below are from Linux-2.6.26-rc5
     1.9 +acpi.c			-> linux/arch/ia64/kernel/acpi.c
    1.10 +acpi_numa.c		-> linux/drivers/acpi/numa.c
    1.11 \ No newline at end of file
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/xen/arch/ia64/linux-xen/acpi.c	Wed Jun 11 15:39:41 2008 +0900
     2.3 @@ -0,0 +1,1022 @@
     2.4 +/*
     2.5 + *  acpi.c - Architecture-Specific Low-Level ACPI Support
     2.6 + *
     2.7 + *  Copyright (C) 1999 VA Linux Systems
     2.8 + *  Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
     2.9 + *  Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
    2.10 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    2.11 + *  Copyright (C) 2000 Intel Corp.
    2.12 + *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
    2.13 + *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
    2.14 + *  Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
    2.15 + *  Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
    2.16 + *  Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
    2.17 + *  Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
    2.18 + *
    2.19 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    2.20 + *
    2.21 + *  This program is free software; you can redistribute it and/or modify
    2.22 + *  it under the terms of the GNU General Public License as published by
    2.23 + *  the Free Software Foundation; either version 2 of the License, or
    2.24 + *  (at your option) any later version.
    2.25 + *
    2.26 + *  This program is distributed in the hope that it will be useful,
    2.27 + *  but WITHOUT ANY WARRANTY; without even the implied warranty of
    2.28 + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    2.29 + *  GNU General Public License for more details.
    2.30 + *
    2.31 + *  You should have received a copy of the GNU General Public License
    2.32 + *  along with this program; if not, write to the Free Software
    2.33 + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
    2.34 + *
    2.35 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    2.36 + */
    2.37 +
    2.38 +#include <linux/module.h>
    2.39 +#include <linux/init.h>
    2.40 +#include <linux/kernel.h>
    2.41 +#include <linux/sched.h>
    2.42 +#include <linux/smp.h>
    2.43 +#include <linux/string.h>
    2.44 +#include <linux/types.h>
    2.45 +#include <linux/irq.h>
    2.46 +#include <linux/acpi.h>
    2.47 +#include <linux/efi.h>
    2.48 +#include <linux/mmzone.h>
    2.49 +#include <linux/nodemask.h>
    2.50 +#include <asm/io.h>
    2.51 +#include <asm/iosapic.h>
    2.52 +#include <asm/machvec.h>
    2.53 +#include <asm/page.h>
    2.54 +#include <asm/system.h>
    2.55 +#include <asm/numa.h>
    2.56 +#include <asm/sal.h>
    2.57 +#include <asm/cyclone.h>
    2.58 +#include <asm/xen/hypervisor.h>
    2.59 +
    2.60 +#define BAD_MADT_ENTRY(entry, end) (                                        \
    2.61 +		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
    2.62 +		((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
    2.63 +
    2.64 +#define PREFIX			"ACPI: "
    2.65 +
    2.66 +void (*pm_idle) (void);
    2.67 +EXPORT_SYMBOL(pm_idle);
    2.68 +void (*pm_power_off) (void);
    2.69 +EXPORT_SYMBOL(pm_power_off);
    2.70 +
    2.71 +unsigned int acpi_cpei_override;
    2.72 +unsigned int acpi_cpei_phys_cpuid;
    2.73 +
    2.74 +unsigned long acpi_wakeup_address = 0;
    2.75 +
    2.76 +#ifdef CONFIG_IA64_GENERIC
    2.77 +static unsigned long __init acpi_find_rsdp(void)
    2.78 +{
    2.79 +	unsigned long rsdp_phys = 0;
    2.80 +
    2.81 +	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
    2.82 +		rsdp_phys = efi.acpi20;
    2.83 +	else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
    2.84 +		printk(KERN_WARNING PREFIX
    2.85 +		       "v1.0/r0.71 tables no longer supported\n");
    2.86 +	return rsdp_phys;
    2.87 +}
    2.88 +#endif
    2.89 +
    2.90 +const char __init *
    2.91 +acpi_get_sysname(void)
    2.92 +{
    2.93 +#ifdef CONFIG_IA64_GENERIC
    2.94 +	unsigned long rsdp_phys;
    2.95 +	struct acpi_table_rsdp *rsdp;
    2.96 +	struct acpi_table_xsdt *xsdt;
    2.97 +	struct acpi_table_header *hdr;
    2.98 +
    2.99 +	rsdp_phys = acpi_find_rsdp();
   2.100 +	if (!rsdp_phys) {
   2.101 +		printk(KERN_ERR
   2.102 +		       "ACPI 2.0 RSDP not found, default to \"dig\"\n");
   2.103 +		return "dig";
   2.104 +	}
   2.105 +
   2.106 +	rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
   2.107 +	if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) {
   2.108 +		printk(KERN_ERR
   2.109 +		       "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
   2.110 +		return "dig";
   2.111 +	}
   2.112 +
   2.113 +	xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
   2.114 +	hdr = &xsdt->header;
   2.115 +	if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
   2.116 +		printk(KERN_ERR
   2.117 +		       "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
   2.118 +		return "dig";
   2.119 +	}
   2.120 +
   2.121 +	if (!strcmp(hdr->oem_id, "HP")) {
   2.122 +		return "hpzx1";
   2.123 +	} else if (!strcmp(hdr->oem_id, "SGI")) {
   2.124 +		if (!strcmp(hdr->oem_table_id + 4, "UV"))
   2.125 +			return "uv";
   2.126 +		else
   2.127 +			return "sn2";
   2.128 +	} else if (is_running_on_xen() && !strcmp(hdr->oem_id, "XEN")) {
   2.129 +		return "xen";
   2.130 +	}
   2.131 +
   2.132 +	return "dig";
   2.133 +#else
   2.134 +# if defined (CONFIG_IA64_HP_SIM)
   2.135 +	return "hpsim";
   2.136 +# elif defined (CONFIG_IA64_HP_ZX1)
   2.137 +	return "hpzx1";
   2.138 +# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
   2.139 +	return "hpzx1_swiotlb";
   2.140 +# elif defined (CONFIG_IA64_SGI_SN2)
   2.141 +	return "sn2";
   2.142 +# elif defined (CONFIG_IA64_SGI_UV)
   2.143 +	return "uv";
   2.144 +# elif defined (CONFIG_IA64_DIG)
   2.145 +	return "dig";
   2.146 +# elif defined (CONFIG_IA64_XEN)
   2.147 +	return "xen";
   2.148 +# else
   2.149 +#	error Unknown platform.  Fix acpi.c.
   2.150 +# endif
   2.151 +#endif
   2.152 +}
   2.153 +
   2.154 +#ifdef CONFIG_ACPI
   2.155 +
   2.156 +#define ACPI_MAX_PLATFORM_INTERRUPTS	256
   2.157 +
   2.158 +/* Array to record platform interrupt vectors for generic interrupt routing. */
   2.159 +int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
   2.160 +	[0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
   2.161 +};
   2.162 +
   2.163 +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
   2.164 +
   2.165 +/*
   2.166 + * Interrupt routing API for device drivers.  Provides interrupt vector for
   2.167 + * a generic platform event.  Currently only CPEI is implemented.
   2.168 + */
   2.169 +int acpi_request_vector(u32 int_type)
   2.170 +{
   2.171 +	int vector = -1;
   2.172 +
   2.173 +	if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
   2.174 +		/* corrected platform error interrupt */
   2.175 +		vector = platform_intr_list[int_type];
   2.176 +	} else
   2.177 +		printk(KERN_ERR
   2.178 +		       "acpi_request_vector(): invalid interrupt type\n");
   2.179 +	return vector;
   2.180 +}
   2.181 +
   2.182 +char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
   2.183 +{
   2.184 +	return __va(phys_addr);
   2.185 +}
   2.186 +
   2.187 +/* --------------------------------------------------------------------------
   2.188 +                            Boot-time Table Parsing
   2.189 +   -------------------------------------------------------------------------- */
   2.190 +
   2.191 +static int total_cpus __initdata;
   2.192 +static int available_cpus __initdata;
   2.193 +struct acpi_table_madt *acpi_madt __initdata;
   2.194 +static u8 has_8259;
   2.195 +
   2.196 +static int __init
   2.197 +acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
   2.198 +			  const unsigned long end)
   2.199 +{
   2.200 +	struct acpi_madt_local_apic_override *lapic;
   2.201 +
   2.202 +	lapic = (struct acpi_madt_local_apic_override *)header;
   2.203 +
   2.204 +	if (BAD_MADT_ENTRY(lapic, end))
   2.205 +		return -EINVAL;
   2.206 +
   2.207 +	if (lapic->address) {
   2.208 +		iounmap(ipi_base_addr);
   2.209 +		ipi_base_addr = ioremap(lapic->address, 0);
   2.210 +	}
   2.211 +	return 0;
   2.212 +}
   2.213 +
   2.214 +static int __init
   2.215 +acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
   2.216 +{
   2.217 +	struct acpi_madt_local_sapic *lsapic;
   2.218 +
   2.219 +	lsapic = (struct acpi_madt_local_sapic *)header;
   2.220 +
   2.221 +	/*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
   2.222 +
   2.223 +	if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
   2.224 +#ifdef CONFIG_SMP
   2.225 +		smp_boot_data.cpu_phys_id[available_cpus] =
   2.226 +		    (lsapic->id << 8) | lsapic->eid;
   2.227 +#endif
   2.228 +		++available_cpus;
   2.229 +	}
   2.230 +
   2.231 +	total_cpus++;
   2.232 +	return 0;
   2.233 +}
   2.234 +
   2.235 +static int __init
   2.236 +acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
   2.237 +{
   2.238 +	struct acpi_madt_local_apic_nmi *lacpi_nmi;
   2.239 +
   2.240 +	lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
   2.241 +
   2.242 +	if (BAD_MADT_ENTRY(lacpi_nmi, end))
   2.243 +		return -EINVAL;
   2.244 +
   2.245 +	/* TBD: Support lapic_nmi entries */
   2.246 +	return 0;
   2.247 +}
   2.248 +
   2.249 +static int __init
   2.250 +acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
   2.251 +{
   2.252 +	struct acpi_madt_io_sapic *iosapic;
   2.253 +
   2.254 +	iosapic = (struct acpi_madt_io_sapic *)header;
   2.255 +
   2.256 +	if (BAD_MADT_ENTRY(iosapic, end))
   2.257 +		return -EINVAL;
   2.258 +
   2.259 +	return iosapic_init(iosapic->address, iosapic->global_irq_base);
   2.260 +}
   2.261 +
   2.262 +static unsigned int __initdata acpi_madt_rev;
   2.263 +
   2.264 +static int __init
   2.265 +acpi_parse_plat_int_src(struct acpi_subtable_header * header,
   2.266 +			const unsigned long end)
   2.267 +{
   2.268 +	struct acpi_madt_interrupt_source *plintsrc;
   2.269 +	int vector;
   2.270 +
   2.271 +	plintsrc = (struct acpi_madt_interrupt_source *)header;
   2.272 +
   2.273 +	if (BAD_MADT_ENTRY(plintsrc, end))
   2.274 +		return -EINVAL;
   2.275 +
   2.276 +	/*
   2.277 +	 * Get vector assignment for this interrupt, set attributes,
   2.278 +	 * and program the IOSAPIC routing table.
   2.279 +	 */
   2.280 +	vector = iosapic_register_platform_intr(plintsrc->type,
   2.281 +						plintsrc->global_irq,
   2.282 +						plintsrc->io_sapic_vector,
   2.283 +						plintsrc->eid,
   2.284 +						plintsrc->id,
   2.285 +						((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
   2.286 +						 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
   2.287 +						IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
   2.288 +						((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
   2.289 +						 ACPI_MADT_TRIGGER_EDGE) ?
   2.290 +						IOSAPIC_EDGE : IOSAPIC_LEVEL);
   2.291 +
   2.292 +	platform_intr_list[plintsrc->type] = vector;
   2.293 +	if (acpi_madt_rev > 1) {
   2.294 +		acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
   2.295 +	}
   2.296 +
   2.297 +	/*
   2.298 +	 * Save the physical id, so we can check when its being removed
   2.299 +	 */
   2.300 +	acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff;
   2.301 +
   2.302 +	return 0;
   2.303 +}
   2.304 +
   2.305 +#ifdef CONFIG_HOTPLUG_CPU
   2.306 +unsigned int can_cpei_retarget(void)
   2.307 +{
   2.308 +	extern int cpe_vector;
   2.309 +	extern unsigned int force_cpei_retarget;
   2.310 +
   2.311 +	/*
   2.312 +	 * Only if CPEI is supported and the override flag
   2.313 +	 * is present, otherwise return that its re-targettable
   2.314 +	 * if we are in polling mode.
   2.315 +	 */
   2.316 +	if (cpe_vector > 0) {
   2.317 +		if (acpi_cpei_override || force_cpei_retarget)
   2.318 +			return 1;
   2.319 +		else
   2.320 +			return 0;
   2.321 +	}
   2.322 +	return 1;
   2.323 +}
   2.324 +
   2.325 +unsigned int is_cpu_cpei_target(unsigned int cpu)
   2.326 +{
   2.327 +	unsigned int logical_id;
   2.328 +
   2.329 +	logical_id = cpu_logical_id(acpi_cpei_phys_cpuid);
   2.330 +
   2.331 +	if (logical_id == cpu)
   2.332 +		return 1;
   2.333 +	else
   2.334 +		return 0;
   2.335 +}
   2.336 +
   2.337 +void set_cpei_target_cpu(unsigned int cpu)
   2.338 +{
   2.339 +	acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
   2.340 +}
   2.341 +#endif
   2.342 +
   2.343 +unsigned int get_cpei_target_cpu(void)
   2.344 +{
   2.345 +	return acpi_cpei_phys_cpuid;
   2.346 +}
   2.347 +
   2.348 +static int __init
   2.349 +acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
   2.350 +		       const unsigned long end)
   2.351 +{
   2.352 +	struct acpi_madt_interrupt_override *p;
   2.353 +
   2.354 +	p = (struct acpi_madt_interrupt_override *)header;
   2.355 +
   2.356 +	if (BAD_MADT_ENTRY(p, end))
   2.357 +		return -EINVAL;
   2.358 +
   2.359 +	iosapic_override_isa_irq(p->source_irq, p->global_irq,
   2.360 +				 ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
   2.361 +				  ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
   2.362 +				 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
   2.363 +				 ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
   2.364 +				 ACPI_MADT_TRIGGER_EDGE) ?
   2.365 +				 IOSAPIC_EDGE : IOSAPIC_LEVEL);
   2.366 +	return 0;
   2.367 +}
   2.368 +
   2.369 +static int __init
   2.370 +acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
   2.371 +{
   2.372 +	struct acpi_madt_nmi_source *nmi_src;
   2.373 +
   2.374 +	nmi_src = (struct acpi_madt_nmi_source *)header;
   2.375 +
   2.376 +	if (BAD_MADT_ENTRY(nmi_src, end))
   2.377 +		return -EINVAL;
   2.378 +
   2.379 +	/* TBD: Support nimsrc entries */
   2.380 +	return 0;
   2.381 +}
   2.382 +
   2.383 +static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
   2.384 +{
   2.385 +	if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) {
   2.386 +
   2.387 +		/*
   2.388 +		 * Unfortunately ITC_DRIFT is not yet part of the
   2.389 +		 * official SAL spec, so the ITC_DRIFT bit is not
   2.390 +		 * set by the BIOS on this hardware.
   2.391 +		 */
   2.392 +		sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
   2.393 +
   2.394 +		cyclone_setup();
   2.395 +	}
   2.396 +}
   2.397 +
   2.398 +static int __init acpi_parse_madt(struct acpi_table_header *table)
   2.399 +{
   2.400 +	if (!table)
   2.401 +		return -EINVAL;
   2.402 +
   2.403 +	acpi_madt = (struct acpi_table_madt *)table;
   2.404 +
   2.405 +	acpi_madt_rev = acpi_madt->header.revision;
   2.406 +
   2.407 +	/* remember the value for reference after free_initmem() */
   2.408 +#ifdef CONFIG_ITANIUM
   2.409 +	has_8259 = 1;		/* Firmware on old Itanium systems is broken */
   2.410 +#else
   2.411 +	has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
   2.412 +#endif
   2.413 +	iosapic_system_init(has_8259);
   2.414 +
   2.415 +	/* Get base address of IPI Message Block */
   2.416 +
   2.417 +	if (acpi_madt->address)
   2.418 +		ipi_base_addr = ioremap(acpi_madt->address, 0);
   2.419 +
   2.420 +	printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
   2.421 +
   2.422 +	acpi_madt_oem_check(acpi_madt->header.oem_id,
   2.423 +			    acpi_madt->header.oem_table_id);
   2.424 +
   2.425 +	return 0;
   2.426 +}
   2.427 +
   2.428 +#ifdef CONFIG_ACPI_NUMA
   2.429 +
   2.430 +#undef SLIT_DEBUG
   2.431 +
   2.432 +#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
   2.433 +
   2.434 +static int __initdata srat_num_cpus;	/* number of cpus */
   2.435 +static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
   2.436 +#define pxm_bit_set(bit)	(set_bit(bit,(void *)pxm_flag))
   2.437 +#define pxm_bit_test(bit)	(test_bit(bit,(void *)pxm_flag))
   2.438 +static struct acpi_table_slit __initdata *slit_table;
   2.439 +cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
   2.440 +
   2.441 +static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
   2.442 +{
   2.443 +	int pxm;
   2.444 +
   2.445 +	pxm = pa->proximity_domain_lo;
   2.446 +	if (ia64_platform_is("sn2"))
   2.447 +		pxm += pa->proximity_domain_hi[0] << 8;
   2.448 +	return pxm;
   2.449 +}
   2.450 +
   2.451 +static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
   2.452 +{
   2.453 +	int pxm;
   2.454 +
   2.455 +	pxm = ma->proximity_domain;
   2.456 +	if (!ia64_platform_is("sn2"))
   2.457 +		pxm &= 0xff;
   2.458 +
   2.459 +	return pxm;
   2.460 +}
   2.461 +
   2.462 +/*
   2.463 + * ACPI 2.0 SLIT (System Locality Information Table)
   2.464 + * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
   2.465 + */
   2.466 +void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
   2.467 +{
   2.468 +	u32 len;
   2.469 +
   2.470 +	len = sizeof(struct acpi_table_header) + 8
   2.471 +	    + slit->locality_count * slit->locality_count;
   2.472 +	if (slit->header.length != len) {
   2.473 +		printk(KERN_ERR
   2.474 +		       "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
   2.475 +		       len, slit->header.length);
   2.476 +		memset(numa_slit, 10, sizeof(numa_slit));
   2.477 +		return;
   2.478 +	}
   2.479 +	slit_table = slit;
   2.480 +}
   2.481 +
   2.482 +void __init
   2.483 +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
   2.484 +{
   2.485 +	int pxm;
   2.486 +
   2.487 +	if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
   2.488 +		return;
   2.489 +
   2.490 +	pxm = get_processor_proximity_domain(pa);
   2.491 +
   2.492 +	/* record this node in proximity bitmap */
   2.493 +	pxm_bit_set(pxm);
   2.494 +
   2.495 +	node_cpuid[srat_num_cpus].phys_id =
   2.496 +	    (pa->apic_id << 8) | (pa->local_sapic_eid);
   2.497 +	/* nid should be overridden as logical node id later */
   2.498 +	node_cpuid[srat_num_cpus].nid = pxm;
   2.499 +	cpu_set(srat_num_cpus, early_cpu_possible_map);
   2.500 +	srat_num_cpus++;
   2.501 +}
   2.502 +
   2.503 +void __init
   2.504 +acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
   2.505 +{
   2.506 +	unsigned long paddr, size;
   2.507 +	int pxm;
   2.508 +	struct node_memblk_s *p, *q, *pend;
   2.509 +
   2.510 +	pxm = get_memory_proximity_domain(ma);
   2.511 +
   2.512 +	/* fill node memory chunk structure */
   2.513 +	paddr = ma->base_address;
   2.514 +	size = ma->length;
   2.515 +
   2.516 +	/* Ignore disabled entries */
   2.517 +	if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
   2.518 +		return;
   2.519 +
   2.520 +	/* record this node in proximity bitmap */
   2.521 +	pxm_bit_set(pxm);
   2.522 +
   2.523 +	/* Insertion sort based on base address */
   2.524 +	pend = &node_memblk[num_node_memblks];
   2.525 +	for (p = &node_memblk[0]; p < pend; p++) {
   2.526 +		if (paddr < p->start_paddr)
   2.527 +			break;
   2.528 +	}
   2.529 +	if (p < pend) {
   2.530 +		for (q = pend - 1; q >= p; q--)
   2.531 +			*(q + 1) = *q;
   2.532 +	}
   2.533 +	p->start_paddr = paddr;
   2.534 +	p->size = size;
   2.535 +	p->nid = pxm;
   2.536 +	num_node_memblks++;
   2.537 +}
   2.538 +
   2.539 +void __init acpi_numa_arch_fixup(void)
   2.540 +{
   2.541 +	int i, j, node_from, node_to;
   2.542 +
   2.543 +	/* If there's no SRAT, fix the phys_id and mark node 0 online */
   2.544 +	if (srat_num_cpus == 0) {
   2.545 +		node_set_online(0);
   2.546 +		node_cpuid[0].phys_id = hard_smp_processor_id();
   2.547 +		return;
   2.548 +	}
   2.549 +
   2.550 +	/*
   2.551 +	 * MCD - This can probably be dropped now.  No need for pxm ID to node ID
   2.552 +	 * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
   2.553 +	 */
   2.554 +	nodes_clear(node_online_map);
   2.555 +	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
   2.556 +		if (pxm_bit_test(i)) {
   2.557 +			int nid = acpi_map_pxm_to_node(i);
   2.558 +			node_set_online(nid);
   2.559 +		}
   2.560 +	}
   2.561 +
   2.562 +	/* set logical node id in memory chunk structure */
   2.563 +	for (i = 0; i < num_node_memblks; i++)
   2.564 +		node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
   2.565 +
   2.566 +	/* assign memory bank numbers for each chunk on each node */
   2.567 +	for_each_online_node(i) {
   2.568 +		int bank;
   2.569 +
   2.570 +		bank = 0;
   2.571 +		for (j = 0; j < num_node_memblks; j++)
   2.572 +			if (node_memblk[j].nid == i)
   2.573 +				node_memblk[j].bank = bank++;
   2.574 +	}
   2.575 +
   2.576 +	/* set logical node id in cpu structure */
   2.577 +	for_each_possible_early_cpu(i)
   2.578 +		node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
   2.579 +
   2.580 +	printk(KERN_INFO "Number of logical nodes in system = %d\n",
   2.581 +	       num_online_nodes());
   2.582 +	printk(KERN_INFO "Number of memory chunks in system = %d\n",
   2.583 +	       num_node_memblks);
   2.584 +
   2.585 +	if (!slit_table)
   2.586 +		return;
   2.587 +	memset(numa_slit, -1, sizeof(numa_slit));
   2.588 +	for (i = 0; i < slit_table->locality_count; i++) {
   2.589 +		if (!pxm_bit_test(i))
   2.590 +			continue;
   2.591 +		node_from = pxm_to_node(i);
   2.592 +		for (j = 0; j < slit_table->locality_count; j++) {
   2.593 +			if (!pxm_bit_test(j))
   2.594 +				continue;
   2.595 +			node_to = pxm_to_node(j);
   2.596 +			node_distance(node_from, node_to) =
   2.597 +			    slit_table->entry[i * slit_table->locality_count + j];
   2.598 +		}
   2.599 +	}
   2.600 +
   2.601 +#ifdef SLIT_DEBUG
   2.602 +	printk("ACPI 2.0 SLIT locality table:\n");
   2.603 +	for_each_online_node(i) {
   2.604 +		for_each_online_node(j)
   2.605 +		    printk("%03d ", node_distance(i, j));
   2.606 +		printk("\n");
   2.607 +	}
   2.608 +#endif
   2.609 +}
   2.610 +#endif				/* CONFIG_ACPI_NUMA */
   2.611 +
   2.612 +/*
   2.613 + * success: return IRQ number (>=0)
   2.614 + * failure: return < 0
   2.615 + */
   2.616 +int acpi_register_gsi(u32 gsi, int triggering, int polarity)
   2.617 +{
   2.618 +	if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
   2.619 +		return gsi;
   2.620 +
   2.621 +	if (has_8259 && gsi < 16)
   2.622 +		return isa_irq_to_vector(gsi);
   2.623 +
   2.624 +	return iosapic_register_intr(gsi,
   2.625 +				     (polarity ==
   2.626 +				      ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
   2.627 +				     IOSAPIC_POL_LOW,
   2.628 +				     (triggering ==
   2.629 +				      ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
   2.630 +				     IOSAPIC_LEVEL);
   2.631 +}
   2.632 +
   2.633 +void acpi_unregister_gsi(u32 gsi)
   2.634 +{
   2.635 +	if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
   2.636 +		return;
   2.637 +
   2.638 +	if (has_8259 && gsi < 16)
   2.639 +		return;
   2.640 +
   2.641 +	iosapic_unregister_intr(gsi);
   2.642 +}
   2.643 +
   2.644 +static int __init acpi_parse_fadt(struct acpi_table_header *table)
   2.645 +{
   2.646 +	struct acpi_table_header *fadt_header;
   2.647 +	struct acpi_table_fadt *fadt;
   2.648 +
   2.649 +	if (!table)
   2.650 +		return -EINVAL;
   2.651 +
   2.652 +	fadt_header = (struct acpi_table_header *)table;
   2.653 +	if (fadt_header->revision != 3)
   2.654 +		return -ENODEV;	/* Only deal with ACPI 2.0 FADT */
   2.655 +
   2.656 +	fadt = (struct acpi_table_fadt *)fadt_header;
   2.657 +
   2.658 +	acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
   2.659 +	return 0;
   2.660 +}
   2.661 +
   2.662 +int __init acpi_boot_init(void)
   2.663 +{
   2.664 +
   2.665 +	/*
   2.666 +	 * MADT
   2.667 +	 * ----
   2.668 +	 * Parse the Multiple APIC Description Table (MADT), if exists.
   2.669 +	 * Note that this table provides platform SMP configuration
   2.670 +	 * information -- the successor to MPS tables.
   2.671 +	 */
   2.672 +
   2.673 +	if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
   2.674 +		printk(KERN_ERR PREFIX "Can't find MADT\n");
   2.675 +		goto skip_madt;
   2.676 +	}
   2.677 +
   2.678 +	/* Local APIC */
   2.679 +
   2.680 +	if (acpi_table_parse_madt
   2.681 +	    (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
   2.682 +		printk(KERN_ERR PREFIX
   2.683 +		       "Error parsing LAPIC address override entry\n");
   2.684 +
   2.685 +	if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
   2.686 +	    < 1)
   2.687 +		printk(KERN_ERR PREFIX
   2.688 +		       "Error parsing MADT - no LAPIC entries\n");
   2.689 +
   2.690 +	if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
   2.691 +	    < 0)
   2.692 +		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
   2.693 +
   2.694 +	/* I/O APIC */
   2.695 +
   2.696 +	if (acpi_table_parse_madt
   2.697 +	    (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) {
   2.698 +		if (!ia64_platform_is("sn2"))
   2.699 +			printk(KERN_ERR PREFIX
   2.700 +			       "Error parsing MADT - no IOSAPIC entries\n");
   2.701 +	}
   2.702 +
   2.703 +	/* System-Level Interrupt Routing */
   2.704 +
   2.705 +	if (acpi_table_parse_madt
   2.706 +	    (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
   2.707 +	     ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
   2.708 +		printk(KERN_ERR PREFIX
   2.709 +		       "Error parsing platform interrupt source entry\n");
   2.710 +
   2.711 +	if (acpi_table_parse_madt
   2.712 +	    (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
   2.713 +		printk(KERN_ERR PREFIX
   2.714 +		       "Error parsing interrupt source overrides entry\n");
   2.715 +
   2.716 +	if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
   2.717 +		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
   2.718 +      skip_madt:
   2.719 +
   2.720 +	/*
   2.721 +	 * FADT says whether a legacy keyboard controller is present.
   2.722 +	 * The FADT also contains an SCI_INT line, by which the system
   2.723 +	 * gets interrupts such as power and sleep buttons.  If it's not
   2.724 +	 * on a Legacy interrupt, it needs to be setup.
   2.725 +	 */
   2.726 +	if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
   2.727 +		printk(KERN_ERR PREFIX "Can't find FADT\n");
   2.728 +
   2.729 +#ifdef CONFIG_SMP
   2.730 +	if (available_cpus == 0) {
   2.731 +		printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
   2.732 +		printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
   2.733 +		smp_boot_data.cpu_phys_id[available_cpus] =
   2.734 +		    hard_smp_processor_id();
   2.735 +		available_cpus = 1;	/* We've got at least one of these, no? */
   2.736 +	}
   2.737 +	smp_boot_data.cpu_count = available_cpus;
   2.738 +
   2.739 +	smp_build_cpu_map();
   2.740 +# ifdef CONFIG_ACPI_NUMA
   2.741 +	if (srat_num_cpus == 0) {
   2.742 +		int cpu, i = 1;
   2.743 +		for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
   2.744 +			if (smp_boot_data.cpu_phys_id[cpu] !=
   2.745 +			    hard_smp_processor_id())
   2.746 +				node_cpuid[i++].phys_id =
   2.747 +				    smp_boot_data.cpu_phys_id[cpu];
   2.748 +	}
   2.749 +# endif
   2.750 +#endif
   2.751 +#ifdef CONFIG_ACPI_NUMA
   2.752 +	build_cpu_to_node_map();
   2.753 +#endif
   2.754 +	/* Make boot-up look pretty */
   2.755 +	printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
   2.756 +	       total_cpus);
   2.757 +	return 0;
   2.758 +}
   2.759 +
   2.760 +int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
   2.761 +{
   2.762 +	int tmp;
   2.763 +
   2.764 +	if (has_8259 && gsi < 16)
   2.765 +		*irq = isa_irq_to_vector(gsi);
   2.766 +	else {
   2.767 +		tmp = gsi_to_irq(gsi);
   2.768 +		if (tmp == -1)
   2.769 +			return -1;
   2.770 +		*irq = tmp;
   2.771 +	}
   2.772 +	return 0;
   2.773 +}
   2.774 +
   2.775 +/*
   2.776 + *  ACPI based hotplug CPU support
   2.777 + */
   2.778 +#ifdef CONFIG_ACPI_HOTPLUG_CPU
   2.779 +static
   2.780 +int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
   2.781 +{
   2.782 +#ifdef CONFIG_ACPI_NUMA
   2.783 +	int pxm_id;
   2.784 +	int nid;
   2.785 +
   2.786 +	pxm_id = acpi_get_pxm(handle);
   2.787 +	/*
   2.788 +	 * We don't have cpu-only-node hotadd. But if the system equips
   2.789 +	 * SRAT table, pxm is already found and node is ready.
   2.790 +  	 * So, just pxm_to_nid(pxm) is OK.
   2.791 +	 * This code here is for the system which doesn't have full SRAT
   2.792 +  	 * table for possible cpus.
   2.793 +	 */
   2.794 +	nid = acpi_map_pxm_to_node(pxm_id);
   2.795 +	node_cpuid[cpu].phys_id = physid;
   2.796 +	node_cpuid[cpu].nid = nid;
   2.797 +#endif
   2.798 +	return (0);
   2.799 +}
   2.800 +
   2.801 +int additional_cpus __initdata = -1;
   2.802 +
   2.803 +static __init int setup_additional_cpus(char *s)
   2.804 +{
   2.805 +	if (s)
   2.806 +		additional_cpus = simple_strtol(s, NULL, 0);
   2.807 +
   2.808 +	return 0;
   2.809 +}
   2.810 +
   2.811 +early_param("additional_cpus", setup_additional_cpus);
   2.812 +
   2.813 +/*
   2.814 + * cpu_possible_map should be static, it cannot change as CPUs
   2.815 + * are onlined, or offlined. The reason is per-cpu data-structures
   2.816 + * are allocated by some modules at init time, and dont expect to
   2.817 + * do this dynamically on cpu arrival/departure.
   2.818 + * cpu_present_map on the other hand can change dynamically.
   2.819 + * In case when cpu_hotplug is not compiled, then we resort to current
   2.820 + * behaviour, which is cpu_possible == cpu_present.
   2.821 + * - Ashok Raj
   2.822 + *
   2.823 + * Three ways to find out the number of additional hotplug CPUs:
   2.824 + * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
   2.825 + * - The user can overwrite it with additional_cpus=NUM
   2.826 + * - Otherwise don't reserve additional CPUs.
   2.827 + */
   2.828 +__init void prefill_possible_map(void)
   2.829 +{
   2.830 +	int i;
   2.831 +	int possible, disabled_cpus;
   2.832 +
   2.833 +	disabled_cpus = total_cpus - available_cpus;
   2.834 +
   2.835 + 	if (additional_cpus == -1) {
   2.836 + 		if (disabled_cpus > 0)
   2.837 +			additional_cpus = disabled_cpus;
   2.838 + 		else
   2.839 +			additional_cpus = 0;
   2.840 + 	}
   2.841 +
   2.842 +	possible = available_cpus + additional_cpus;
   2.843 +
   2.844 +	if (possible > NR_CPUS)
   2.845 +		possible = NR_CPUS;
   2.846 +
   2.847 +	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
   2.848 +		possible, max((possible - available_cpus), 0));
   2.849 +
   2.850 +	for (i = 0; i < possible; i++)
   2.851 +		cpu_set(i, cpu_possible_map);
   2.852 +}
   2.853 +
   2.854 +int acpi_map_lsapic(acpi_handle handle, int *pcpu)
   2.855 +{
   2.856 +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
   2.857 +	union acpi_object *obj;
   2.858 +	struct acpi_madt_local_sapic *lsapic;
   2.859 +	cpumask_t tmp_map;
   2.860 +	long physid;
   2.861 +	int cpu;
   2.862 +
   2.863 +	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
   2.864 +		return -EINVAL;
   2.865 +
   2.866 +	if (!buffer.length || !buffer.pointer)
   2.867 +		return -EINVAL;
   2.868 +
   2.869 +	obj = buffer.pointer;
   2.870 +	if (obj->type != ACPI_TYPE_BUFFER)
   2.871 +	{
   2.872 +		kfree(buffer.pointer);
   2.873 +		return -EINVAL;
   2.874 +	}
   2.875 +
   2.876 +	lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
   2.877 +
   2.878 +	if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
   2.879 +	    (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) {
   2.880 +		kfree(buffer.pointer);
   2.881 +		return -EINVAL;
   2.882 +	}
   2.883 +
   2.884 +	physid = ((lsapic->id << 8) | (lsapic->eid));
   2.885 +
   2.886 +	kfree(buffer.pointer);
   2.887 +	buffer.length = ACPI_ALLOCATE_BUFFER;
   2.888 +	buffer.pointer = NULL;
   2.889 +
   2.890 +	cpus_complement(tmp_map, cpu_present_map);
   2.891 +	cpu = first_cpu(tmp_map);
   2.892 +	if (cpu >= NR_CPUS)
   2.893 +		return -EINVAL;
   2.894 +
   2.895 +	acpi_map_cpu2node(handle, cpu, physid);
   2.896 +
   2.897 +	cpu_set(cpu, cpu_present_map);
   2.898 +	ia64_cpu_to_sapicid[cpu] = physid;
   2.899 +
   2.900 +	*pcpu = cpu;
   2.901 +	return (0);
   2.902 +}
   2.903 +
   2.904 +EXPORT_SYMBOL(acpi_map_lsapic);
   2.905 +
   2.906 +int acpi_unmap_lsapic(int cpu)
   2.907 +{
   2.908 +	ia64_cpu_to_sapicid[cpu] = -1;
   2.909 +	cpu_clear(cpu, cpu_present_map);
   2.910 +
   2.911 +#ifdef CONFIG_ACPI_NUMA
   2.912 +	/* NUMA specific cleanup's */
   2.913 +#endif
   2.914 +
   2.915 +	return (0);
   2.916 +}
   2.917 +
   2.918 +EXPORT_SYMBOL(acpi_unmap_lsapic);
   2.919 +#endif				/* CONFIG_ACPI_HOTPLUG_CPU */
   2.920 +
   2.921 +#ifdef CONFIG_ACPI_NUMA
   2.922 +static acpi_status __devinit
   2.923 +acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
   2.924 +{
   2.925 +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
   2.926 +	union acpi_object *obj;
   2.927 +	struct acpi_madt_io_sapic *iosapic;
   2.928 +	unsigned int gsi_base;
   2.929 +	int pxm, node;
   2.930 +
   2.931 +	/* Only care about objects w/ a method that returns the MADT */
   2.932 +	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
   2.933 +		return AE_OK;
   2.934 +
   2.935 +	if (!buffer.length || !buffer.pointer)
   2.936 +		return AE_OK;
   2.937 +
   2.938 +	obj = buffer.pointer;
   2.939 +	if (obj->type != ACPI_TYPE_BUFFER ||
   2.940 +	    obj->buffer.length < sizeof(*iosapic)) {
   2.941 +		kfree(buffer.pointer);
   2.942 +		return AE_OK;
   2.943 +	}
   2.944 +
   2.945 +	iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
   2.946 +
   2.947 +	if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
   2.948 +		kfree(buffer.pointer);
   2.949 +		return AE_OK;
   2.950 +	}
   2.951 +
   2.952 +	gsi_base = iosapic->global_irq_base;
   2.953 +
   2.954 +	kfree(buffer.pointer);
   2.955 +
   2.956 +	/*
   2.957 +	 * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
   2.958 +	 * us which node to associate this with.
   2.959 +	 */
   2.960 +	pxm = acpi_get_pxm(handle);
   2.961 +	if (pxm < 0)
   2.962 +		return AE_OK;
   2.963 +
   2.964 +	node = pxm_to_node(pxm);
   2.965 +
   2.966 +	if (node >= MAX_NUMNODES || !node_online(node) ||
   2.967 +	    cpus_empty(node_to_cpumask(node)))
   2.968 +		return AE_OK;
   2.969 +
   2.970 +	/* We know a gsi to node mapping! */
   2.971 +	map_iosapic_to_node(gsi_base, node);
   2.972 +	return AE_OK;
   2.973 +}
   2.974 +
   2.975 +static int __init
   2.976 +acpi_map_iosapics (void)
   2.977 +{
   2.978 +	acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
   2.979 +	return 0;
   2.980 +}
   2.981 +
   2.982 +fs_initcall(acpi_map_iosapics);
   2.983 +#endif				/* CONFIG_ACPI_NUMA */
   2.984 +
   2.985 +int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
   2.986 +{
   2.987 +	int err;
   2.988 +
   2.989 +	if ((err = iosapic_init(phys_addr, gsi_base)))
   2.990 +		return err;
   2.991 +
   2.992 +#ifdef CONFIG_ACPI_NUMA
   2.993 +	acpi_map_iosapic(handle, 0, NULL, NULL);
   2.994 +#endif				/* CONFIG_ACPI_NUMA */
   2.995 +
   2.996 +	return 0;
   2.997 +}
   2.998 +
   2.999 +EXPORT_SYMBOL(acpi_register_ioapic);
  2.1000 +
  2.1001 +int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
  2.1002 +{
  2.1003 +	return iosapic_remove(gsi_base);
  2.1004 +}
  2.1005 +
  2.1006 +EXPORT_SYMBOL(acpi_unregister_ioapic);
  2.1007 +
  2.1008 +/*
  2.1009 + * acpi_save_state_mem() - save kernel state
  2.1010 + *
  2.1011 + * TBD when when IA64 starts to support suspend...
  2.1012 + */
  2.1013 +int acpi_save_state_mem(void) { return 0; } 
  2.1014 +
  2.1015 +/*
  2.1016 + * acpi_restore_state()
  2.1017 + */
  2.1018 +void acpi_restore_state_mem(void) {}
  2.1019 +
  2.1020 +/*
  2.1021 + * do_suspend_lowlevel()
  2.1022 + */
  2.1023 +void do_suspend_lowlevel(void) {}
  2.1024 +
  2.1025 +#endif				/* CONFIG_ACPI */
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/ia64/linux-xen/acpi_numa.c	Wed Jun 11 15:39:41 2008 +0900
     3.3 @@ -0,0 +1,263 @@
     3.4 +/*
     3.5 + *  acpi_numa.c - ACPI NUMA support
     3.6 + *
     3.7 + *  Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
     3.8 + *
     3.9 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    3.10 + *
    3.11 + *  This program is free software; you can redistribute it and/or modify
    3.12 + *  it under the terms of the GNU General Public License as published by
    3.13 + *  the Free Software Foundation; either version 2 of the License, or
    3.14 + *  (at your option) any later version.
    3.15 + *
    3.16 + *  This program is distributed in the hope that it will be useful,
    3.17 + *  but WITHOUT ANY WARRANTY; without even the implied warranty of
    3.18 + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    3.19 + *  GNU General Public License for more details.
    3.20 + *
    3.21 + *  You should have received a copy of the GNU General Public License
    3.22 + *  along with this program; if not, write to the Free Software
    3.23 + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    3.24 + *
    3.25 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    3.26 + *
    3.27 + */
    3.28 +#include <linux/module.h>
    3.29 +#include <linux/init.h>
    3.30 +#include <linux/kernel.h>
    3.31 +#include <linux/types.h>
    3.32 +#include <linux/errno.h>
    3.33 +#include <linux/acpi.h>
    3.34 +#include <acpi/acpi_bus.h>
    3.35 +#include <acpi/acmacros.h>
    3.36 +
    3.37 +#define ACPI_NUMA	0x80000000
    3.38 +#define _COMPONENT	ACPI_NUMA
    3.39 +ACPI_MODULE_NAME("numa");
    3.40 +
    3.41 +static nodemask_t nodes_found_map = NODE_MASK_NONE;
    3.42 +
    3.43 +/* maps to convert between proximity domain and logical node ID */
    3.44 +static int pxm_to_node_map[MAX_PXM_DOMAINS]
    3.45 +				= { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
    3.46 +static int node_to_pxm_map[MAX_NUMNODES]
    3.47 +				= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
    3.48 +
    3.49 +int pxm_to_node(int pxm)
    3.50 +{
    3.51 +	if (pxm < 0)
    3.52 +		return NID_INVAL;
    3.53 +	return pxm_to_node_map[pxm];
    3.54 +}
    3.55 +
    3.56 +int node_to_pxm(int node)
    3.57 +{
    3.58 +	if (node < 0)
    3.59 +		return PXM_INVAL;
    3.60 +	return node_to_pxm_map[node];
    3.61 +}
    3.62 +
    3.63 +void __acpi_map_pxm_to_node(int pxm, int node)
    3.64 +{
    3.65 +	pxm_to_node_map[pxm] = node;
    3.66 +	node_to_pxm_map[node] = pxm;
    3.67 +}
    3.68 +
    3.69 +int acpi_map_pxm_to_node(int pxm)
    3.70 +{
    3.71 +	int node = pxm_to_node_map[pxm];
    3.72 +
    3.73 +	if (node < 0){
    3.74 +		if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
    3.75 +			return NID_INVAL;
    3.76 +		node = first_unset_node(nodes_found_map);
    3.77 +		__acpi_map_pxm_to_node(pxm, node);
    3.78 +		node_set(node, nodes_found_map);
    3.79 +	}
    3.80 +
    3.81 +	return node;
    3.82 +}
    3.83 +
    3.84 +#if 0
    3.85 +void __cpuinit acpi_unmap_pxm_to_node(int node)
    3.86 +{
    3.87 +	int pxm = node_to_pxm_map[node];
    3.88 +	pxm_to_node_map[pxm] = NID_INVAL;
    3.89 +	node_to_pxm_map[node] = PXM_INVAL;
    3.90 +	node_clear(node, nodes_found_map);
    3.91 +}
    3.92 +#endif  /*  0  */
    3.93 +
    3.94 +static void __init
    3.95 +acpi_table_print_srat_entry(struct acpi_subtable_header *header)
    3.96 +{
    3.97 +
    3.98 +	ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
    3.99 +
   3.100 +	if (!header)
   3.101 +		return;
   3.102 +
   3.103 +	switch (header->type) {
   3.104 +
   3.105 +	case ACPI_SRAT_TYPE_CPU_AFFINITY:
   3.106 +#ifdef ACPI_DEBUG_OUTPUT
   3.107 +		{
   3.108 +			struct acpi_srat_cpu_affinity *p =
   3.109 +			    (struct acpi_srat_cpu_affinity *)header;
   3.110 +			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
   3.111 +					  "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
   3.112 +					  p->apic_id, p->local_sapic_eid,
   3.113 +					  p->proximity_domain_lo,
   3.114 +					  (p->flags & ACPI_SRAT_CPU_ENABLED)?
   3.115 +					  "enabled" : "disabled"));
   3.116 +		}
   3.117 +#endif				/* ACPI_DEBUG_OUTPUT */
   3.118 +		break;
   3.119 +
   3.120 +	case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
   3.121 +#ifdef ACPI_DEBUG_OUTPUT
   3.122 +		{
   3.123 +			struct acpi_srat_mem_affinity *p =
   3.124 +			    (struct acpi_srat_mem_affinity *)header;
   3.125 +			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
   3.126 +					  "SRAT Memory (0x%lx length 0x%lx type 0x%x) in proximity domain %d %s%s\n",
   3.127 +					  (unsigned long)p->base_address,
   3.128 +					  (unsigned long)p->length,
   3.129 +					  p->memory_type, p->proximity_domain,
   3.130 +					  (p->flags & ACPI_SRAT_MEM_ENABLED)?
   3.131 +					  "enabled" : "disabled",
   3.132 +					  (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
   3.133 +					  " hot-pluggable" : ""));
   3.134 +		}
   3.135 +#endif				/* ACPI_DEBUG_OUTPUT */
   3.136 +		break;
   3.137 +
   3.138 +	default:
   3.139 +		printk(KERN_WARNING PREFIX
   3.140 +		       "Found unsupported SRAT entry (type = 0x%x)\n",
   3.141 +		       header->type);
   3.142 +		break;
   3.143 +	}
   3.144 +}
   3.145 +
   3.146 +static int __init acpi_parse_slit(struct acpi_table_header *table)
   3.147 +{
   3.148 +	struct acpi_table_slit *slit;
   3.149 +	u32 localities;
   3.150 +
   3.151 +	if (!table)
   3.152 +		return -EINVAL;
   3.153 +
   3.154 +	slit = (struct acpi_table_slit *)table;
   3.155 +
   3.156 +	/* downcast just for %llu vs %lu for i386/ia64  */
   3.157 +	localities = (u32) slit->locality_count;
   3.158 +
   3.159 +	acpi_numa_slit_init(slit);
   3.160 +
   3.161 +	return 0;
   3.162 +}
   3.163 +
   3.164 +static int __init
   3.165 +acpi_parse_processor_affinity(struct acpi_subtable_header * header,
   3.166 +			      const unsigned long end)
   3.167 +{
   3.168 +	struct acpi_srat_cpu_affinity *processor_affinity;
   3.169 +
   3.170 +	processor_affinity = (struct acpi_srat_cpu_affinity *)header;
   3.171 +	if (!processor_affinity)
   3.172 +		return -EINVAL;
   3.173 +
   3.174 +	acpi_table_print_srat_entry(header);
   3.175 +
   3.176 +	/* let architecture-dependent part to do it */
   3.177 +	acpi_numa_processor_affinity_init(processor_affinity);
   3.178 +
   3.179 +	return 0;
   3.180 +}
   3.181 +
   3.182 +static int __init
   3.183 +acpi_parse_memory_affinity(struct acpi_subtable_header * header,
   3.184 +			   const unsigned long end)
   3.185 +{
   3.186 +	struct acpi_srat_mem_affinity *memory_affinity;
   3.187 +
   3.188 +	memory_affinity = (struct acpi_srat_mem_affinity *)header;
   3.189 +	if (!memory_affinity)
   3.190 +		return -EINVAL;
   3.191 +
   3.192 +	acpi_table_print_srat_entry(header);
   3.193 +
   3.194 +	/* let architecture-dependent part to do it */
   3.195 +	acpi_numa_memory_affinity_init(memory_affinity);
   3.196 +
   3.197 +	return 0;
   3.198 +}
   3.199 +
   3.200 +static int __init acpi_parse_srat(struct acpi_table_header *table)
   3.201 +{
   3.202 +	struct acpi_table_srat *srat;
   3.203 +
   3.204 +	if (!table)
   3.205 +		return -EINVAL;
   3.206 +
   3.207 +	srat = (struct acpi_table_srat *)table;
   3.208 +
   3.209 +	return 0;
   3.210 +}
   3.211 +
   3.212 +static int __init
   3.213 +acpi_table_parse_srat(enum acpi_srat_type id,
   3.214 +		      acpi_table_entry_handler handler, unsigned int max_entries)
   3.215 +{
   3.216 +	return acpi_table_parse_entries(ACPI_SIG_SRAT,
   3.217 +					    sizeof(struct acpi_table_srat), id,
   3.218 +					    handler, max_entries);
   3.219 +}
   3.220 +
   3.221 +int __init acpi_numa_init(void)
   3.222 +{
   3.223 +	/* SRAT: Static Resource Affinity Table */
   3.224 +	if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
   3.225 +		acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
   3.226 +				      acpi_parse_processor_affinity, NR_CPUS);
   3.227 +		acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
   3.228 +				      acpi_parse_memory_affinity,
   3.229 +				      NR_NODE_MEMBLKS);
   3.230 +	}
   3.231 +
   3.232 +	/* SLIT: System Locality Information Table */
   3.233 +	acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
   3.234 +
   3.235 +	acpi_numa_arch_fixup();
   3.236 +	return 0;
   3.237 +}
   3.238 +
   3.239 +int acpi_get_pxm(acpi_handle h)
   3.240 +{
   3.241 +	unsigned long pxm;
   3.242 +	acpi_status status;
   3.243 +	acpi_handle handle;
   3.244 +	acpi_handle phandle = h;
   3.245 +
   3.246 +	do {
   3.247 +		handle = phandle;
   3.248 +		status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
   3.249 +		if (ACPI_SUCCESS(status))
   3.250 +			return pxm;
   3.251 +		status = acpi_get_parent(handle, &phandle);
   3.252 +	} while (ACPI_SUCCESS(status));
   3.253 +	return -1;
   3.254 +}
   3.255 +
   3.256 +int acpi_get_node(acpi_handle *handle)
   3.257 +{
   3.258 +	int pxm, node = -1;
   3.259 +
   3.260 +	pxm = acpi_get_pxm(handle);
   3.261 +	if (pxm >= 0)
   3.262 +		node = acpi_map_pxm_to_node(pxm);
   3.263 +
   3.264 +	return node;
   3.265 +}
   3.266 +EXPORT_SYMBOL(acpi_get_node);
     4.1 --- a/xen/include/asm-ia64/linux-xen/asm/README.origin	Wed Jun 11 15:38:00 2008 +0900
     4.2 +++ b/xen/include/asm-ia64/linux-xen/asm/README.origin	Wed Jun 11 15:39:41 2008 +0900
     4.3 @@ -5,7 +5,6 @@
     4.4  # (e.g. with #ifdef XEN or XEN in a comment) so that they can be
     4.5  # easily updated to future versions of the corresponding Linux files.
     4.6  
     4.7 -acpi.h			-> linux/include/asm-ia64/acpi.h
     4.8  atomic.h		-> linux/include/asm-ia64/atomic.h
     4.9  cache.h			-> linux/include/asm-ia64/cache.h
    4.10  gcc_intrin.h		-> linux/include/asm-ia64/gcc_intrin.h
    4.11 @@ -43,3 +42,6 @@ machvec_pci.h		-> linux/include/asm-ia64
    4.12  
    4.13  # The files below are from Linux-2.6.21
    4.14  pal.h			-> linux/include/asm-ia64/pal.h
    4.15 +
    4.16 +# The files below are from Linux-2.6.26-rc5
    4.17 +acpi.h			-> linux/include/asm-ia64/acpi.h
     5.1 --- a/xen/include/asm-ia64/linux-xen/asm/acpi.h	Wed Jun 11 15:38:00 2008 +0900
     5.2 +++ b/xen/include/asm-ia64/linux-xen/asm/acpi.h	Wed Jun 11 15:39:41 2008 +0900
     5.3 @@ -30,9 +30,12 @@
     5.4  
     5.5  #ifdef __KERNEL__
     5.6  
     5.7 +#include <acpi/pdc_intel.h>
     5.8 +
     5.9  #include <linux/init.h>
    5.10  #include <linux/numa.h>
    5.11  #include <asm/system.h>
    5.12 +#include <asm/numa.h>
    5.13  
    5.14  #define COMPILER_DEPENDENT_INT64	long
    5.15  #define COMPILER_DEPENDENT_UINT64	unsigned long
    5.16 @@ -82,22 +85,28 @@ ia64_acpi_release_global_lock (unsigned 
    5.17  	return old & 0x1;
    5.18  }
    5.19  
    5.20 -#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq)				\
    5.21 -	((Acq) = ia64_acpi_acquire_global_lock((unsigned int *) GLptr))
    5.22 +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq)				\
    5.23 +	((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
    5.24  
    5.25 -#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq)				\
    5.26 -	((Acq) = ia64_acpi_release_global_lock((unsigned int *) GLptr))
    5.27 +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq)				\
    5.28 +	((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
    5.29  
    5.30  #define acpi_disabled 0	/* ACPI always enabled on IA64 */
    5.31  #define acpi_noirq 0	/* ACPI always enabled on IA64 */
    5.32  #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
    5.33  #define acpi_strict 1	/* no ACPI spec workarounds on IA64 */
    5.34 +#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
    5.35  static inline void disable_acpi(void) { }
    5.36  
    5.37  const char *acpi_get_sysname (void);
    5.38  int acpi_request_vector (u32 int_type);
    5.39  int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
    5.40  
    5.41 +/* routines for saving/restoring kernel state */
    5.42 +extern int acpi_save_state_mem(void);
    5.43 +extern void acpi_restore_state_mem(void);
    5.44 +extern unsigned long acpi_wakeup_address;
    5.45 +
    5.46  /*
    5.47   * Record the cpei override flag and current logical cpu. This is
    5.48   * useful for CPU removal.
    5.49 @@ -106,15 +115,52 @@ extern unsigned int can_cpei_retarget(vo
    5.50  extern unsigned int is_cpu_cpei_target(unsigned int cpu);
    5.51  extern void set_cpei_target_cpu(unsigned int cpu);
    5.52  extern unsigned int get_cpei_target_cpu(void);
    5.53 +extern void prefill_possible_map(void);
    5.54 +#ifdef CONFIG_ACPI_HOTPLUG_CPU
    5.55 +extern int additional_cpus;
    5.56 +#else
    5.57 +#define additional_cpus 0
    5.58 +#endif
    5.59  
    5.60  #ifdef CONFIG_ACPI_NUMA
    5.61 -/* Proximity bitmap length; _PXM is at most 255 (8 bit)*/
    5.62 +#if MAX_NUMNODES > 256
    5.63 +#define MAX_PXM_DOMAINS MAX_NUMNODES
    5.64 +#else
    5.65  #define MAX_PXM_DOMAINS (256)
    5.66 +#endif
    5.67  extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
    5.68  extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
    5.69  #endif
    5.70  
    5.71 -extern u16 ia64_acpiid_to_sapicid[];
    5.72 +#define acpi_unlazy_tlb(x)
    5.73 +
    5.74 +#ifdef CONFIG_ACPI_NUMA
    5.75 +extern cpumask_t early_cpu_possible_map;
    5.76 +#define for_each_possible_early_cpu(cpu)  \
    5.77 +	for_each_cpu_mask((cpu), early_cpu_possible_map)
    5.78 +
    5.79 +static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
    5.80 +{
    5.81 +	int low_cpu, high_cpu;
    5.82 +	int cpu;
    5.83 +	int next_nid = 0;
    5.84 +
    5.85 +	low_cpu = cpus_weight(early_cpu_possible_map);
    5.86 +
    5.87 +	high_cpu = max(low_cpu, min_cpus);
    5.88 +	high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
    5.89 +
    5.90 +	for (cpu = low_cpu; cpu < high_cpu; cpu++) {
    5.91 +		cpu_set(cpu, early_cpu_possible_map);
    5.92 +		if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
    5.93 +			node_cpuid[cpu].nid = next_nid;
    5.94 +			next_nid++;
    5.95 +			if (next_nid >= num_online_nodes())
    5.96 +				next_nid = 0;
    5.97 +		}
    5.98 +	}
    5.99 +}
   5.100 +#endif /* CONFIG_ACPI_NUMA */
   5.101  
   5.102  #endif /*__KERNEL__*/
   5.103