direct-io.hg

changeset 15095:642a9bcaf19c

[IA64] Add sparse files for machine vector support

These are necessary to create a Xen machine vector.

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author Alex Williamson <alex.williamson@hp.com>
date Sun May 06 20:34:15 2007 -0600 (2007-05-06)
parents e60051ca408f
children d4f59e652078
files linux-2.6-xen-sparse/arch/ia64/kernel/acpi.c linux-2.6-xen-sparse/include/asm-ia64/machvec.h
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/acpi.c	Sun May 06 20:34:15 2007 -0600
     1.3 @@ -0,0 +1,1004 @@
     1.4 +/*
     1.5 + *  acpi.c - Architecture-Specific Low-Level ACPI Support
     1.6 + *
     1.7 + *  Copyright (C) 1999 VA Linux Systems
     1.8 + *  Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
     1.9 + *  Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
    1.10 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    1.11 + *  Copyright (C) 2000 Intel Corp.
    1.12 + *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
    1.13 + *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
    1.14 + *  Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
    1.15 + *  Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
    1.16 + *  Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
    1.17 + *  Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
    1.18 + *
    1.19 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    1.20 + *
    1.21 + *  This program is free software; you can redistribute it and/or modify
    1.22 + *  it under the terms of the GNU General Public License as published by
    1.23 + *  the Free Software Foundation; either version 2 of the License, or
    1.24 + *  (at your option) any later version.
    1.25 + *
    1.26 + *  This program is distributed in the hope that it will be useful,
    1.27 + *  but WITHOUT ANY WARRANTY; without even the implied warranty of
    1.28 + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    1.29 + *  GNU General Public License for more details.
    1.30 + *
    1.31 + *  You should have received a copy of the GNU General Public License
    1.32 + *  along with this program; if not, write to the Free Software
    1.33 + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
    1.34 + *
    1.35 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    1.36 + */
    1.37 +
    1.38 +#include <linux/module.h>
    1.39 +#include <linux/init.h>
    1.40 +#include <linux/kernel.h>
    1.41 +#include <linux/sched.h>
    1.42 +#include <linux/smp.h>
    1.43 +#include <linux/string.h>
    1.44 +#include <linux/types.h>
    1.45 +#include <linux/irq.h>
    1.46 +#include <linux/acpi.h>
    1.47 +#include <linux/efi.h>
    1.48 +#include <linux/mmzone.h>
    1.49 +#include <linux/nodemask.h>
    1.50 +#include <asm/io.h>
    1.51 +#include <asm/iosapic.h>
    1.52 +#include <asm/machvec.h>
    1.53 +#include <asm/page.h>
    1.54 +#include <asm/system.h>
    1.55 +#include <asm/numa.h>
    1.56 +#include <asm/sal.h>
    1.57 +#include <asm/cyclone.h>
    1.58 +
    1.59 +#define BAD_MADT_ENTRY(entry, end) (                                        \
    1.60 +		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
    1.61 +		((acpi_table_entry_header *)entry)->length < sizeof(*entry))
    1.62 +
    1.63 +#define PREFIX			"ACPI: "
    1.64 +
    1.65 +void (*pm_idle) (void);
    1.66 +EXPORT_SYMBOL(pm_idle);
    1.67 +void (*pm_power_off) (void);
    1.68 +EXPORT_SYMBOL(pm_power_off);
    1.69 +
    1.70 +unsigned char acpi_kbd_controller_present = 1;
    1.71 +unsigned char acpi_legacy_devices;
    1.72 +
    1.73 +unsigned int acpi_cpei_override;
    1.74 +unsigned int acpi_cpei_phys_cpuid;
    1.75 +
    1.76 +#define MAX_SAPICS 256
    1.77 +u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
    1.78 +
    1.79 +EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
    1.80 +
    1.81 +const char *acpi_get_sysname(void)
    1.82 +{
    1.83 +#ifdef CONFIG_IA64_GENERIC
    1.84 +	unsigned long rsdp_phys;
    1.85 +	struct acpi20_table_rsdp *rsdp;
    1.86 +	struct acpi_table_xsdt *xsdt;
    1.87 +	struct acpi_table_header *hdr;
    1.88 +
    1.89 +	rsdp_phys = acpi_find_rsdp();
    1.90 +	if (!rsdp_phys) {
    1.91 +		printk(KERN_ERR
    1.92 +		       "ACPI 2.0 RSDP not found, default to \"dig\"\n");
    1.93 +		return "dig";
    1.94 +	}
    1.95 +
    1.96 +	rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys);
    1.97 +	if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
    1.98 +		printk(KERN_ERR
    1.99 +		       "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
   1.100 +		return "dig";
   1.101 +	}
   1.102 +
   1.103 +	xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address);
   1.104 +	hdr = &xsdt->header;
   1.105 +	if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
   1.106 +		printk(KERN_ERR
   1.107 +		       "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
   1.108 +		return "dig";
   1.109 +	}
   1.110 +
   1.111 +	if (!strcmp(hdr->oem_id, "HP")) {
   1.112 +		return "hpzx1";
   1.113 +	} else if (!strcmp(hdr->oem_id, "SGI")) {
   1.114 +		return "sn2";
   1.115 +	}
   1.116 +
   1.117 +	return "dig";
   1.118 +#else
   1.119 +# if defined (CONFIG_IA64_HP_SIM)
   1.120 +	return "hpsim";
   1.121 +# elif defined (CONFIG_IA64_HP_ZX1)
   1.122 +	return "hpzx1";
   1.123 +# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
   1.124 +	return "hpzx1_swiotlb";
   1.125 +# elif defined (CONFIG_IA64_SGI_SN2)
   1.126 +	return "sn2";
   1.127 +# elif defined (CONFIG_IA64_DIG)
   1.128 +	return "dig";
   1.129 +# else
   1.130 +#	error Unknown platform.  Fix acpi.c.
   1.131 +# endif
   1.132 +#endif
   1.133 +}
   1.134 +
   1.135 +#ifdef CONFIG_ACPI
   1.136 +
   1.137 +#define ACPI_MAX_PLATFORM_INTERRUPTS	256
   1.138 +
   1.139 +/* Array to record platform interrupt vectors for generic interrupt routing. */
   1.140 +int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
   1.141 +	[0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
   1.142 +};
   1.143 +
   1.144 +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
   1.145 +
   1.146 +/*
   1.147 + * Interrupt routing API for device drivers.  Provides interrupt vector for
   1.148 + * a generic platform event.  Currently only CPEI is implemented.
   1.149 + */
   1.150 +int acpi_request_vector(u32 int_type)
   1.151 +{
   1.152 +	int vector = -1;
   1.153 +
   1.154 +	if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
   1.155 +		/* corrected platform error interrupt */
   1.156 +		vector = platform_intr_list[int_type];
   1.157 +	} else
   1.158 +		printk(KERN_ERR
   1.159 +		       "acpi_request_vector(): invalid interrupt type\n");
   1.160 +	return vector;
   1.161 +}
   1.162 +
   1.163 +char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
   1.164 +{
   1.165 +	return __va(phys_addr);
   1.166 +}
   1.167 +
   1.168 +/* --------------------------------------------------------------------------
   1.169 +                            Boot-time Table Parsing
   1.170 +   -------------------------------------------------------------------------- */
   1.171 +
   1.172 +static int total_cpus __initdata;
   1.173 +static int available_cpus __initdata;
   1.174 +struct acpi_table_madt *acpi_madt __initdata;
   1.175 +static u8 has_8259;
   1.176 +
   1.177 +static int __init
   1.178 +acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
   1.179 +			  const unsigned long end)
   1.180 +{
   1.181 +	struct acpi_table_lapic_addr_ovr *lapic;
   1.182 +
   1.183 +	lapic = (struct acpi_table_lapic_addr_ovr *)header;
   1.184 +
   1.185 +	if (BAD_MADT_ENTRY(lapic, end))
   1.186 +		return -EINVAL;
   1.187 +
   1.188 +	if (lapic->address) {
   1.189 +		iounmap(ipi_base_addr);
   1.190 +		ipi_base_addr = ioremap(lapic->address, 0);
   1.191 +	}
   1.192 +	return 0;
   1.193 +}
   1.194 +
   1.195 +static int __init
   1.196 +acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
   1.197 +{
   1.198 +	struct acpi_table_lsapic *lsapic;
   1.199 +
   1.200 +	lsapic = (struct acpi_table_lsapic *)header;
   1.201 +
   1.202 +	if (BAD_MADT_ENTRY(lsapic, end))
   1.203 +		return -EINVAL;
   1.204 +
   1.205 +	if (lsapic->flags.enabled) {
   1.206 +#ifdef CONFIG_SMP
   1.207 +		smp_boot_data.cpu_phys_id[available_cpus] =
   1.208 +		    (lsapic->id << 8) | lsapic->eid;
   1.209 +#endif
   1.210 +		ia64_acpiid_to_sapicid[lsapic->acpi_id] =
   1.211 +		    (lsapic->id << 8) | lsapic->eid;
   1.212 +		++available_cpus;
   1.213 +	}
   1.214 +
   1.215 +	total_cpus++;
   1.216 +	return 0;
   1.217 +}
   1.218 +
   1.219 +static int __init
   1.220 +acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
   1.221 +{
   1.222 +	struct acpi_table_lapic_nmi *lacpi_nmi;
   1.223 +
   1.224 +	lacpi_nmi = (struct acpi_table_lapic_nmi *)header;
   1.225 +
   1.226 +	if (BAD_MADT_ENTRY(lacpi_nmi, end))
   1.227 +		return -EINVAL;
   1.228 +
   1.229 +	/* TBD: Support lapic_nmi entries */
   1.230 +	return 0;
   1.231 +}
   1.232 +
   1.233 +static int __init
   1.234 +acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
   1.235 +{
   1.236 +	struct acpi_table_iosapic *iosapic;
   1.237 +
   1.238 +	iosapic = (struct acpi_table_iosapic *)header;
   1.239 +
   1.240 +	if (BAD_MADT_ENTRY(iosapic, end))
   1.241 +		return -EINVAL;
   1.242 +
   1.243 +	return iosapic_init(iosapic->address, iosapic->global_irq_base);
   1.244 +}
   1.245 +
   1.246 +static unsigned int __initdata acpi_madt_rev;
   1.247 +
   1.248 +static int __init
   1.249 +acpi_parse_plat_int_src(acpi_table_entry_header * header,
   1.250 +			const unsigned long end)
   1.251 +{
   1.252 +	struct acpi_table_plat_int_src *plintsrc;
   1.253 +	int vector;
   1.254 +
   1.255 +	plintsrc = (struct acpi_table_plat_int_src *)header;
   1.256 +
   1.257 +	if (BAD_MADT_ENTRY(plintsrc, end))
   1.258 +		return -EINVAL;
   1.259 +
   1.260 +	/*
   1.261 +	 * Get vector assignment for this interrupt, set attributes,
   1.262 +	 * and program the IOSAPIC routing table.
   1.263 +	 */
   1.264 +	vector = iosapic_register_platform_intr(plintsrc->type,
   1.265 +						plintsrc->global_irq,
   1.266 +						plintsrc->iosapic_vector,
   1.267 +						plintsrc->eid,
   1.268 +						plintsrc->id,
   1.269 +						(plintsrc->flags.polarity ==
   1.270 +						 1) ? IOSAPIC_POL_HIGH :
   1.271 +						IOSAPIC_POL_LOW,
   1.272 +						(plintsrc->flags.trigger ==
   1.273 +						 1) ? IOSAPIC_EDGE :
   1.274 +						IOSAPIC_LEVEL);
   1.275 +
   1.276 +	platform_intr_list[plintsrc->type] = vector;
   1.277 +	if (acpi_madt_rev > 1) {
   1.278 +		acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag;
   1.279 +	}
   1.280 +
   1.281 +	/*
   1.282 +	 * Save the physical id, so we can check when its being removed
   1.283 +	 */
   1.284 +	acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff;
   1.285 +
   1.286 +	return 0;
   1.287 +}
   1.288 +
   1.289 +#ifdef CONFIG_HOTPLUG_CPU
   1.290 +unsigned int can_cpei_retarget(void)
   1.291 +{
   1.292 +	extern int cpe_vector;
   1.293 +	extern unsigned int force_cpei_retarget;
   1.294 +
   1.295 +	/*
   1.296 +	 * Only if CPEI is supported and the override flag
   1.297 +	 * is present, otherwise return that its re-targettable
   1.298 +	 * if we are in polling mode.
   1.299 +	 */
   1.300 +	if (cpe_vector > 0) {
   1.301 +		if (acpi_cpei_override || force_cpei_retarget)
   1.302 +			return 1;
   1.303 +		else
   1.304 +			return 0;
   1.305 +	}
   1.306 +	return 1;
   1.307 +}
   1.308 +
   1.309 +unsigned int is_cpu_cpei_target(unsigned int cpu)
   1.310 +{
   1.311 +	unsigned int logical_id;
   1.312 +
   1.313 +	logical_id = cpu_logical_id(acpi_cpei_phys_cpuid);
   1.314 +
   1.315 +	if (logical_id == cpu)
   1.316 +		return 1;
   1.317 +	else
   1.318 +		return 0;
   1.319 +}
   1.320 +
   1.321 +void set_cpei_target_cpu(unsigned int cpu)
   1.322 +{
   1.323 +	acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
   1.324 +}
   1.325 +#endif
   1.326 +
   1.327 +unsigned int get_cpei_target_cpu(void)
   1.328 +{
   1.329 +	return acpi_cpei_phys_cpuid;
   1.330 +}
   1.331 +
   1.332 +static int __init
   1.333 +acpi_parse_int_src_ovr(acpi_table_entry_header * header,
   1.334 +		       const unsigned long end)
   1.335 +{
   1.336 +	struct acpi_table_int_src_ovr *p;
   1.337 +
   1.338 +	p = (struct acpi_table_int_src_ovr *)header;
   1.339 +
   1.340 +	if (BAD_MADT_ENTRY(p, end))
   1.341 +		return -EINVAL;
   1.342 +
   1.343 +	iosapic_override_isa_irq(p->bus_irq, p->global_irq,
   1.344 +				 (p->flags.polarity ==
   1.345 +				  1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
   1.346 +				 (p->flags.trigger ==
   1.347 +				  1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
   1.348 +	return 0;
   1.349 +}
   1.350 +
   1.351 +static int __init
   1.352 +acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
   1.353 +{
   1.354 +	struct acpi_table_nmi_src *nmi_src;
   1.355 +
   1.356 +	nmi_src = (struct acpi_table_nmi_src *)header;
   1.357 +
   1.358 +	if (BAD_MADT_ENTRY(nmi_src, end))
   1.359 +		return -EINVAL;
   1.360 +
   1.361 +	/* TBD: Support nimsrc entries */
   1.362 +	return 0;
   1.363 +}
   1.364 +
   1.365 +static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
   1.366 +{
   1.367 +	if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) {
   1.368 +
   1.369 +		/*
   1.370 +		 * Unfortunately ITC_DRIFT is not yet part of the
   1.371 +		 * official SAL spec, so the ITC_DRIFT bit is not
   1.372 +		 * set by the BIOS on this hardware.
   1.373 +		 */
   1.374 +		sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
   1.375 +
   1.376 +		cyclone_setup();
   1.377 +	}
   1.378 +}
   1.379 +
   1.380 +static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
   1.381 +{
   1.382 +	if (!phys_addr || !size)
   1.383 +		return -EINVAL;
   1.384 +
   1.385 +	acpi_madt = (struct acpi_table_madt *)__va(phys_addr);
   1.386 +
   1.387 +	acpi_madt_rev = acpi_madt->header.revision;
   1.388 +
   1.389 +	/* remember the value for reference after free_initmem() */
   1.390 +#ifdef CONFIG_ITANIUM
   1.391 +	has_8259 = 1;		/* Firmware on old Itanium systems is broken */
   1.392 +#else
   1.393 +	has_8259 = acpi_madt->flags.pcat_compat;
   1.394 +#endif
   1.395 +	iosapic_system_init(has_8259);
   1.396 +
   1.397 +	/* Get base address of IPI Message Block */
   1.398 +
   1.399 +	if (acpi_madt->lapic_address)
   1.400 +		ipi_base_addr = ioremap(acpi_madt->lapic_address, 0);
   1.401 +
   1.402 +	printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
   1.403 +
   1.404 +	acpi_madt_oem_check(acpi_madt->header.oem_id,
   1.405 +			    acpi_madt->header.oem_table_id);
   1.406 +
   1.407 +	return 0;
   1.408 +}
   1.409 +
   1.410 +#ifdef CONFIG_ACPI_NUMA
   1.411 +
   1.412 +#undef SLIT_DEBUG
   1.413 +
   1.414 +#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
   1.415 +
   1.416 +static int __initdata srat_num_cpus;	/* number of cpus */
   1.417 +static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
   1.418 +#define pxm_bit_set(bit)	(set_bit(bit,(void *)pxm_flag))
   1.419 +#define pxm_bit_test(bit)	(test_bit(bit,(void *)pxm_flag))
   1.420 +static struct acpi_table_slit __initdata *slit_table;
   1.421 +
   1.422 +static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa)
   1.423 +{
   1.424 +	int pxm;
   1.425 +
   1.426 +	pxm = pa->proximity_domain;
   1.427 +	if (ia64_platform_is("sn2"))
   1.428 +		pxm += pa->reserved[0] << 8;
   1.429 +	return pxm;
   1.430 +}
   1.431 +
   1.432 +static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma)
   1.433 +{
   1.434 +	int pxm;
   1.435 +
   1.436 +	pxm = ma->proximity_domain;
   1.437 +	if (ia64_platform_is("sn2"))
   1.438 +		pxm += ma->reserved1[0] << 8;
   1.439 +	return pxm;
   1.440 +}
   1.441 +
   1.442 +/*
   1.443 + * ACPI 2.0 SLIT (System Locality Information Table)
   1.444 + * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
   1.445 + */
   1.446 +void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
   1.447 +{
   1.448 +	u32 len;
   1.449 +
   1.450 +	len = sizeof(struct acpi_table_header) + 8
   1.451 +	    + slit->localities * slit->localities;
   1.452 +	if (slit->header.length != len) {
   1.453 +		printk(KERN_ERR
   1.454 +		       "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
   1.455 +		       len, slit->header.length);
   1.456 +		memset(numa_slit, 10, sizeof(numa_slit));
   1.457 +		return;
   1.458 +	}
   1.459 +	slit_table = slit;
   1.460 +}
   1.461 +
   1.462 +void __init
   1.463 +acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
   1.464 +{
   1.465 +	int pxm;
   1.466 +
   1.467 +	if (!pa->flags.enabled)
   1.468 +		return;
   1.469 +
   1.470 +	pxm = get_processor_proximity_domain(pa);
   1.471 +
   1.472 +	/* record this node in proximity bitmap */
   1.473 +	pxm_bit_set(pxm);
   1.474 +
   1.475 +	node_cpuid[srat_num_cpus].phys_id =
   1.476 +	    (pa->apic_id << 8) | (pa->lsapic_eid);
   1.477 +	/* nid should be overridden as logical node id later */
   1.478 +	node_cpuid[srat_num_cpus].nid = pxm;
   1.479 +	srat_num_cpus++;
   1.480 +}
   1.481 +
   1.482 +void __init
   1.483 +acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
   1.484 +{
   1.485 +	unsigned long paddr, size;
   1.486 +	int pxm;
   1.487 +	struct node_memblk_s *p, *q, *pend;
   1.488 +
   1.489 +	pxm = get_memory_proximity_domain(ma);
   1.490 +
   1.491 +	/* fill node memory chunk structure */
   1.492 +	paddr = ma->base_addr_hi;
   1.493 +	paddr = (paddr << 32) | ma->base_addr_lo;
   1.494 +	size = ma->length_hi;
   1.495 +	size = (size << 32) | ma->length_lo;
   1.496 +
   1.497 +	/* Ignore disabled entries */
   1.498 +	if (!ma->flags.enabled)
   1.499 +		return;
   1.500 +
   1.501 +	/* record this node in proximity bitmap */
   1.502 +	pxm_bit_set(pxm);
   1.503 +
   1.504 +	/* Insertion sort based on base address */
   1.505 +	pend = &node_memblk[num_node_memblks];
   1.506 +	for (p = &node_memblk[0]; p < pend; p++) {
   1.507 +		if (paddr < p->start_paddr)
   1.508 +			break;
   1.509 +	}
   1.510 +	if (p < pend) {
   1.511 +		for (q = pend - 1; q >= p; q--)
   1.512 +			*(q + 1) = *q;
   1.513 +	}
   1.514 +	p->start_paddr = paddr;
   1.515 +	p->size = size;
   1.516 +	p->nid = pxm;
   1.517 +	num_node_memblks++;
   1.518 +}
   1.519 +
   1.520 +void __init acpi_numa_arch_fixup(void)
   1.521 +{
   1.522 +	int i, j, node_from, node_to;
   1.523 +
   1.524 +	/* If there's no SRAT, fix the phys_id and mark node 0 online */
   1.525 +	if (srat_num_cpus == 0) {
   1.526 +		node_set_online(0);
   1.527 +		node_cpuid[0].phys_id = hard_smp_processor_id();
   1.528 +		return;
   1.529 +	}
   1.530 +
   1.531 +	/*
   1.532 +	 * MCD - This can probably be dropped now.  No need for pxm ID to node ID
   1.533 +	 * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
   1.534 +	 */
   1.535 +	nodes_clear(node_online_map);
   1.536 +	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
   1.537 +		if (pxm_bit_test(i)) {
   1.538 +			int nid = acpi_map_pxm_to_node(i);
   1.539 +			node_set_online(nid);
   1.540 +		}
   1.541 +	}
   1.542 +
   1.543 +	/* set logical node id in memory chunk structure */
   1.544 +	for (i = 0; i < num_node_memblks; i++)
   1.545 +		node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
   1.546 +
   1.547 +	/* assign memory bank numbers for each chunk on each node */
   1.548 +	for_each_online_node(i) {
   1.549 +		int bank;
   1.550 +
   1.551 +		bank = 0;
   1.552 +		for (j = 0; j < num_node_memblks; j++)
   1.553 +			if (node_memblk[j].nid == i)
   1.554 +				node_memblk[j].bank = bank++;
   1.555 +	}
   1.556 +
   1.557 +	/* set logical node id in cpu structure */
   1.558 +	for (i = 0; i < srat_num_cpus; i++)
   1.559 +		node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
   1.560 +
   1.561 +	printk(KERN_INFO "Number of logical nodes in system = %d\n",
   1.562 +	       num_online_nodes());
   1.563 +	printk(KERN_INFO "Number of memory chunks in system = %d\n",
   1.564 +	       num_node_memblks);
   1.565 +
   1.566 +	if (!slit_table)
   1.567 +		return;
   1.568 +	memset(numa_slit, -1, sizeof(numa_slit));
   1.569 +	for (i = 0; i < slit_table->localities; i++) {
   1.570 +		if (!pxm_bit_test(i))
   1.571 +			continue;
   1.572 +		node_from = pxm_to_node(i);
   1.573 +		for (j = 0; j < slit_table->localities; j++) {
   1.574 +			if (!pxm_bit_test(j))
   1.575 +				continue;
   1.576 +			node_to = pxm_to_node(j);
   1.577 +			node_distance(node_from, node_to) =
   1.578 +			    slit_table->entry[i * slit_table->localities + j];
   1.579 +		}
   1.580 +	}
   1.581 +
   1.582 +#ifdef SLIT_DEBUG
   1.583 +	printk("ACPI 2.0 SLIT locality table:\n");
   1.584 +	for_each_online_node(i) {
   1.585 +		for_each_online_node(j)
   1.586 +		    printk("%03d ", node_distance(i, j));
   1.587 +		printk("\n");
   1.588 +	}
   1.589 +#endif
   1.590 +}
   1.591 +#endif				/* CONFIG_ACPI_NUMA */
   1.592 +
   1.593 +/*
   1.594 + * success: return IRQ number (>=0)
   1.595 + * failure: return < 0
   1.596 + */
   1.597 +int acpi_register_gsi(u32 gsi, int triggering, int polarity)
   1.598 +{
   1.599 +	if (has_8259 && gsi < 16)
   1.600 +		return isa_irq_to_vector(gsi);
   1.601 +
   1.602 +	return iosapic_register_intr(gsi,
   1.603 +				     (polarity ==
   1.604 +				      ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
   1.605 +				     IOSAPIC_POL_LOW,
   1.606 +				     (triggering ==
   1.607 +				      ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
   1.608 +				     IOSAPIC_LEVEL);
   1.609 +}
   1.610 +
   1.611 +EXPORT_SYMBOL(acpi_register_gsi);
   1.612 +
   1.613 +void acpi_unregister_gsi(u32 gsi)
   1.614 +{
   1.615 +	iosapic_unregister_intr(gsi);
   1.616 +}
   1.617 +
   1.618 +EXPORT_SYMBOL(acpi_unregister_gsi);
   1.619 +
   1.620 +static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
   1.621 +{
   1.622 +	struct acpi_table_header *fadt_header;
   1.623 +	struct fadt_descriptor *fadt;
   1.624 +
   1.625 +	if (!phys_addr || !size)
   1.626 +		return -EINVAL;
   1.627 +
   1.628 +	fadt_header = (struct acpi_table_header *)__va(phys_addr);
   1.629 +	if (fadt_header->revision != 3)
   1.630 +		return -ENODEV;	/* Only deal with ACPI 2.0 FADT */
   1.631 +
   1.632 +	fadt = (struct fadt_descriptor *)fadt_header;
   1.633 +
   1.634 +	if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
   1.635 +		acpi_kbd_controller_present = 0;
   1.636 +
   1.637 +	if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
   1.638 +		acpi_legacy_devices = 1;
   1.639 +
   1.640 +	acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
   1.641 +	return 0;
   1.642 +}
   1.643 +
   1.644 +unsigned long __init acpi_find_rsdp(void)
   1.645 +{
   1.646 +	unsigned long rsdp_phys = 0;
   1.647 +
   1.648 +	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
   1.649 +		rsdp_phys = efi.acpi20;
   1.650 +	else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
   1.651 +		printk(KERN_WARNING PREFIX
   1.652 +		       "v1.0/r0.71 tables no longer supported\n");
   1.653 +	return rsdp_phys;
   1.654 +}
   1.655 +
   1.656 +int __init acpi_boot_init(void)
   1.657 +{
   1.658 +
   1.659 +	/*
   1.660 +	 * MADT
   1.661 +	 * ----
   1.662 +	 * Parse the Multiple APIC Description Table (MADT), if exists.
   1.663 +	 * Note that this table provides platform SMP configuration
   1.664 +	 * information -- the successor to MPS tables.
   1.665 +	 */
   1.666 +
   1.667 +	if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) {
   1.668 +		printk(KERN_ERR PREFIX "Can't find MADT\n");
   1.669 +		goto skip_madt;
   1.670 +	}
   1.671 +
   1.672 +	/* Local APIC */
   1.673 +
   1.674 +	if (acpi_table_parse_madt
   1.675 +	    (ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0)
   1.676 +		printk(KERN_ERR PREFIX
   1.677 +		       "Error parsing LAPIC address override entry\n");
   1.678 +
   1.679 +	if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS)
   1.680 +	    < 1)
   1.681 +		printk(KERN_ERR PREFIX
   1.682 +		       "Error parsing MADT - no LAPIC entries\n");
   1.683 +
   1.684 +	if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0)
   1.685 +	    < 0)
   1.686 +		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
   1.687 +
   1.688 +	/* I/O APIC */
   1.689 +
   1.690 +	if (acpi_table_parse_madt
   1.691 +	    (ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
   1.692 +		printk(KERN_ERR PREFIX
   1.693 +		       "Error parsing MADT - no IOSAPIC entries\n");
   1.694 +
   1.695 +	/* System-Level Interrupt Routing */
   1.696 +
   1.697 +	if (acpi_table_parse_madt
   1.698 +	    (ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src,
   1.699 +	     ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
   1.700 +		printk(KERN_ERR PREFIX
   1.701 +		       "Error parsing platform interrupt source entry\n");
   1.702 +
   1.703 +	if (acpi_table_parse_madt
   1.704 +	    (ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0)
   1.705 +		printk(KERN_ERR PREFIX
   1.706 +		       "Error parsing interrupt source overrides entry\n");
   1.707 +
   1.708 +	if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0)
   1.709 +		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
   1.710 +      skip_madt:
   1.711 +
   1.712 +	/*
   1.713 +	 * FADT says whether a legacy keyboard controller is present.
   1.714 +	 * The FADT also contains an SCI_INT line, by which the system
   1.715 +	 * gets interrupts such as power and sleep buttons.  If it's not
   1.716 +	 * on a Legacy interrupt, it needs to be setup.
   1.717 +	 */
   1.718 +	if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
   1.719 +		printk(KERN_ERR PREFIX "Can't find FADT\n");
   1.720 +
   1.721 +#ifdef CONFIG_SMP
   1.722 +	if (available_cpus == 0) {
   1.723 +		printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
   1.724 +		printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
   1.725 +		smp_boot_data.cpu_phys_id[available_cpus] =
   1.726 +		    hard_smp_processor_id();
   1.727 +		available_cpus = 1;	/* We've got at least one of these, no? */
   1.728 +	}
   1.729 +	smp_boot_data.cpu_count = available_cpus;
   1.730 +
   1.731 +	smp_build_cpu_map();
   1.732 +# ifdef CONFIG_ACPI_NUMA
   1.733 +	if (srat_num_cpus == 0) {
   1.734 +		int cpu, i = 1;
   1.735 +		for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
   1.736 +			if (smp_boot_data.cpu_phys_id[cpu] !=
   1.737 +			    hard_smp_processor_id())
   1.738 +				node_cpuid[i++].phys_id =
   1.739 +				    smp_boot_data.cpu_phys_id[cpu];
   1.740 +	}
   1.741 +# endif
   1.742 +#endif
   1.743 +#ifdef CONFIG_ACPI_NUMA
   1.744 +	build_cpu_to_node_map();
   1.745 +#endif
   1.746 +	/* Make boot-up look pretty */
   1.747 +	printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
   1.748 +	       total_cpus);
   1.749 +	return 0;
   1.750 +}
   1.751 +
   1.752 +int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
   1.753 +{
   1.754 +	int vector;
   1.755 +
   1.756 +	if (has_8259 && gsi < 16)
   1.757 +		*irq = isa_irq_to_vector(gsi);
   1.758 +	else {
   1.759 +		vector = gsi_to_vector(gsi);
   1.760 +		if (vector == -1)
   1.761 +			return -1;
   1.762 +
   1.763 +		*irq = vector;
   1.764 +	}
   1.765 +	return 0;
   1.766 +}
   1.767 +
   1.768 +/*
   1.769 + *  ACPI based hotplug CPU support
   1.770 + */
   1.771 +#ifdef CONFIG_ACPI_HOTPLUG_CPU
   1.772 +static
   1.773 +int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
   1.774 +{
   1.775 +#ifdef CONFIG_ACPI_NUMA
   1.776 +	int pxm_id;
   1.777 +
   1.778 +	pxm_id = acpi_get_pxm(handle);
   1.779 +
   1.780 +	/*
   1.781 +	 * Assuming that the container driver would have set the proximity
   1.782 +	 * domain and would have initialized pxm_to_node(pxm_id) && pxm_flag
   1.783 +	 */
   1.784 +	node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_node(pxm_id);
   1.785 +
   1.786 +	node_cpuid[cpu].phys_id = physid;
   1.787 +#endif
   1.788 +	return (0);
   1.789 +}
   1.790 +
   1.791 +int additional_cpus __initdata = -1;
   1.792 +
   1.793 +static __init int setup_additional_cpus(char *s)
   1.794 +{
   1.795 +	if (s)
   1.796 +		additional_cpus = simple_strtol(s, NULL, 0);
   1.797 +
   1.798 +	return 0;
   1.799 +}
   1.800 +
   1.801 +early_param("additional_cpus", setup_additional_cpus);
   1.802 +
   1.803 +/*
   1.804 + * cpu_possible_map should be static, it cannot change as cpu's
   1.805 + * are onlined, or offlined. The reason is per-cpu data-structures
   1.806 + * are allocated by some modules at init time, and dont expect to
   1.807 + * do this dynamically on cpu arrival/departure.
   1.808 + * cpu_present_map on the other hand can change dynamically.
   1.809 + * In case when cpu_hotplug is not compiled, then we resort to current
   1.810 + * behaviour, which is cpu_possible == cpu_present.
   1.811 + * - Ashok Raj
   1.812 + *
   1.813 + * Three ways to find out the number of additional hotplug CPUs:
   1.814 + * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
   1.815 + * - The user can overwrite it with additional_cpus=NUM
   1.816 + * - Otherwise don't reserve additional CPUs.
   1.817 + */
   1.818 +__init void prefill_possible_map(void)
   1.819 +{
   1.820 +	int i;
   1.821 +	int possible, disabled_cpus;
   1.822 +
   1.823 +	disabled_cpus = total_cpus - available_cpus;
   1.824 +
   1.825 + 	if (additional_cpus == -1) {
   1.826 + 		if (disabled_cpus > 0)
   1.827 +			additional_cpus = disabled_cpus;
   1.828 + 		else
   1.829 +			additional_cpus = 0;
   1.830 + 	}
   1.831 +
   1.832 +	possible = available_cpus + additional_cpus;
   1.833 +
   1.834 +	if (possible > NR_CPUS)
   1.835 +		possible = NR_CPUS;
   1.836 +
   1.837 +	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
   1.838 +		possible, max((possible - available_cpus), 0));
   1.839 +
   1.840 +	for (i = 0; i < possible; i++)
   1.841 +		cpu_set(i, cpu_possible_map);
   1.842 +}
   1.843 +
   1.844 +int acpi_map_lsapic(acpi_handle handle, int *pcpu)
   1.845 +{
   1.846 +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
   1.847 +	union acpi_object *obj;
   1.848 +	struct acpi_table_lsapic *lsapic;
   1.849 +	cpumask_t tmp_map;
   1.850 +	long physid;
   1.851 +	int cpu;
   1.852 +
   1.853 +	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
   1.854 +		return -EINVAL;
   1.855 +
   1.856 +	if (!buffer.length || !buffer.pointer)
   1.857 +		return -EINVAL;
   1.858 +
   1.859 +	obj = buffer.pointer;
   1.860 +	if (obj->type != ACPI_TYPE_BUFFER ||
   1.861 +	    obj->buffer.length < sizeof(*lsapic)) {
   1.862 +		kfree(buffer.pointer);
   1.863 +		return -EINVAL;
   1.864 +	}
   1.865 +
   1.866 +	lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer;
   1.867 +
   1.868 +	if ((lsapic->header.type != ACPI_MADT_LSAPIC) ||
   1.869 +	    (!lsapic->flags.enabled)) {
   1.870 +		kfree(buffer.pointer);
   1.871 +		return -EINVAL;
   1.872 +	}
   1.873 +
   1.874 +	physid = ((lsapic->id << 8) | (lsapic->eid));
   1.875 +
   1.876 +	kfree(buffer.pointer);
   1.877 +	buffer.length = ACPI_ALLOCATE_BUFFER;
   1.878 +	buffer.pointer = NULL;
   1.879 +
   1.880 +	cpus_complement(tmp_map, cpu_present_map);
   1.881 +	cpu = first_cpu(tmp_map);
   1.882 +	if (cpu >= NR_CPUS)
   1.883 +		return -EINVAL;
   1.884 +
   1.885 +	acpi_map_cpu2node(handle, cpu, physid);
   1.886 +
   1.887 +	cpu_set(cpu, cpu_present_map);
   1.888 +	ia64_cpu_to_sapicid[cpu] = physid;
   1.889 +	ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
   1.890 +
   1.891 +	*pcpu = cpu;
   1.892 +	return (0);
   1.893 +}
   1.894 +
   1.895 +EXPORT_SYMBOL(acpi_map_lsapic);
   1.896 +
   1.897 +int acpi_unmap_lsapic(int cpu)
   1.898 +{
   1.899 +	int i;
   1.900 +
   1.901 +	for (i = 0; i < MAX_SAPICS; i++) {
   1.902 +		if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
   1.903 +			ia64_acpiid_to_sapicid[i] = -1;
   1.904 +			break;
   1.905 +		}
   1.906 +	}
   1.907 +	ia64_cpu_to_sapicid[cpu] = -1;
   1.908 +	cpu_clear(cpu, cpu_present_map);
   1.909 +
   1.910 +#ifdef CONFIG_ACPI_NUMA
   1.911 +	/* NUMA specific cleanup's */
   1.912 +#endif
   1.913 +
   1.914 +	return (0);
   1.915 +}
   1.916 +
   1.917 +EXPORT_SYMBOL(acpi_unmap_lsapic);
   1.918 +#endif				/* CONFIG_ACPI_HOTPLUG_CPU */
   1.919 +
   1.920 +#ifdef CONFIG_ACPI_NUMA
   1.921 +static acpi_status __devinit
   1.922 +acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
   1.923 +{
   1.924 +	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
   1.925 +	union acpi_object *obj;
   1.926 +	struct acpi_table_iosapic *iosapic;
   1.927 +	unsigned int gsi_base;
   1.928 +	int pxm, node;
   1.929 +
   1.930 +	/* Only care about objects w/ a method that returns the MADT */
   1.931 +	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
   1.932 +		return AE_OK;
   1.933 +
   1.934 +	if (!buffer.length || !buffer.pointer)
   1.935 +		return AE_OK;
   1.936 +
   1.937 +	obj = buffer.pointer;
   1.938 +	if (obj->type != ACPI_TYPE_BUFFER ||
   1.939 +	    obj->buffer.length < sizeof(*iosapic)) {
   1.940 +		kfree(buffer.pointer);
   1.941 +		return AE_OK;
   1.942 +	}
   1.943 +
   1.944 +	iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer;
   1.945 +
   1.946 +	if (iosapic->header.type != ACPI_MADT_IOSAPIC) {
   1.947 +		kfree(buffer.pointer);
   1.948 +		return AE_OK;
   1.949 +	}
   1.950 +
   1.951 +	gsi_base = iosapic->global_irq_base;
   1.952 +
   1.953 +	kfree(buffer.pointer);
   1.954 +
   1.955 +	/*
   1.956 +	 * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
   1.957 +	 * us which node to associate this with.
   1.958 +	 */
   1.959 +	pxm = acpi_get_pxm(handle);
   1.960 +	if (pxm < 0)
   1.961 +		return AE_OK;
   1.962 +
   1.963 +	node = pxm_to_node(pxm);
   1.964 +
   1.965 +	if (node >= MAX_NUMNODES || !node_online(node) ||
   1.966 +	    cpus_empty(node_to_cpumask(node)))
   1.967 +		return AE_OK;
   1.968 +
   1.969 +	/* We know a gsi to node mapping! */
   1.970 +	map_iosapic_to_node(gsi_base, node);
   1.971 +	return AE_OK;
   1.972 +}
   1.973 +
   1.974 +static int __init
   1.975 +acpi_map_iosapics (void)
   1.976 +{
   1.977 +	acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
   1.978 +	return 0;
   1.979 +}
   1.980 +
   1.981 +fs_initcall(acpi_map_iosapics);
   1.982 +#endif				/* CONFIG_ACPI_NUMA */
   1.983 +
   1.984 +int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
   1.985 +{
   1.986 +	int err;
   1.987 +
   1.988 +	if ((err = iosapic_init(phys_addr, gsi_base)))
   1.989 +		return err;
   1.990 +
   1.991 +#ifdef CONFIG_ACPI_NUMA
   1.992 +	acpi_map_iosapic(handle, 0, NULL, NULL);
   1.993 +#endif				/* CONFIG_ACPI_NUMA */
   1.994 +
   1.995 +	return 0;
   1.996 +}
   1.997 +
   1.998 +EXPORT_SYMBOL(acpi_register_ioapic);
   1.999 +
  1.1000 +int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
  1.1001 +{
  1.1002 +	return iosapic_remove(gsi_base);
  1.1003 +}
  1.1004 +
  1.1005 +EXPORT_SYMBOL(acpi_unregister_ioapic);
  1.1006 +
  1.1007 +#endif				/* CONFIG_ACPI */
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/machvec.h	Sun May 06 20:34:15 2007 -0600
     2.3 @@ -0,0 +1,411 @@
     2.4 +/*
     2.5 + * Machine vector for IA-64.
     2.6 + *
     2.7 + * Copyright (C) 1999 Silicon Graphics, Inc.
     2.8 + * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
     2.9 + * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
    2.10 + * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
    2.11 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    2.12 + */
    2.13 +#ifndef _ASM_IA64_MACHVEC_H
    2.14 +#define _ASM_IA64_MACHVEC_H
    2.15 +
    2.16 +#include <linux/types.h>
    2.17 +
    2.18 +/* forward declarations: */
    2.19 +struct device;
    2.20 +struct pt_regs;
    2.21 +struct scatterlist;
    2.22 +struct page;
    2.23 +struct mm_struct;
    2.24 +struct pci_bus;
    2.25 +struct task_struct;
    2.26 +
    2.27 +typedef void ia64_mv_setup_t (char **);
    2.28 +typedef void ia64_mv_cpu_init_t (void);
    2.29 +typedef void ia64_mv_irq_init_t (void);
    2.30 +typedef void ia64_mv_send_ipi_t (int, int, int, int);
    2.31 +typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
    2.32 +typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
    2.33 +typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
    2.34 +typedef unsigned int ia64_mv_local_vector_to_irq (u8);
    2.35 +typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
    2.36 +typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
    2.37 +				       u8 size);
    2.38 +typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
    2.39 +					u8 size);
    2.40 +typedef void ia64_mv_migrate_t(struct task_struct * task);
    2.41 +
    2.42 +/* DMA-mapping interface: */
    2.43 +typedef void ia64_mv_dma_init (void);
    2.44 +typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
    2.45 +typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
    2.46 +typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
    2.47 +typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
    2.48 +typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
    2.49 +typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
    2.50 +typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
    2.51 +typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
    2.52 +typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
    2.53 +typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
    2.54 +typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
    2.55 +typedef int ia64_mv_dma_supported (struct device *, u64);
    2.56 +
    2.57 +/*
    2.58 + * WARNING: The legacy I/O space is _architected_.  Platforms are
    2.59 + * expected to follow this architected model (see Section 10.7 in the
    2.60 + * IA-64 Architecture Software Developer's Manual).  Unfortunately,
    2.61 + * some broken machines do not follow that model, which is why we have
    2.62 + * to make the inX/outX operations part of the machine vector.
    2.63 + * Platform designers should follow the architected model whenever
    2.64 + * possible.
    2.65 + */
    2.66 +typedef unsigned int ia64_mv_inb_t (unsigned long);
    2.67 +typedef unsigned int ia64_mv_inw_t (unsigned long);
    2.68 +typedef unsigned int ia64_mv_inl_t (unsigned long);
    2.69 +typedef void ia64_mv_outb_t (unsigned char, unsigned long);
    2.70 +typedef void ia64_mv_outw_t (unsigned short, unsigned long);
    2.71 +typedef void ia64_mv_outl_t (unsigned int, unsigned long);
    2.72 +typedef void ia64_mv_mmiowb_t (void);
    2.73 +typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
    2.74 +typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
    2.75 +typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
    2.76 +typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
    2.77 +typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
    2.78 +typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
    2.79 +typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
    2.80 +typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
    2.81 +typedef int ia64_mv_msi_init_t (void);
    2.82 +
    2.83 +static inline void
    2.84 +machvec_noop (void)
    2.85 +{
    2.86 +}
    2.87 +
    2.88 +static inline void
    2.89 +machvec_noop_mm (struct mm_struct *mm)
    2.90 +{
    2.91 +}
    2.92 +
    2.93 +static inline void
    2.94 +machvec_noop_task (struct task_struct *task)
    2.95 +{
    2.96 +}
    2.97 +
    2.98 +extern void machvec_setup (char **);
    2.99 +extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
   2.100 +extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
   2.101 +extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
   2.102 +extern void machvec_tlb_migrate_finish (struct mm_struct *);
   2.103 +
   2.104 +# if defined (CONFIG_IA64_HP_SIM)
   2.105 +#  include <asm/machvec_hpsim.h>
   2.106 +# elif defined (CONFIG_IA64_DIG)
   2.107 +#  include <asm/machvec_dig.h>
   2.108 +# elif defined (CONFIG_IA64_HP_ZX1)
   2.109 +#  include <asm/machvec_hpzx1.h>
   2.110 +# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
   2.111 +#  include <asm/machvec_hpzx1_swiotlb.h>
   2.112 +# elif defined (CONFIG_IA64_SGI_SN2)
   2.113 +#  include <asm/machvec_sn2.h>
   2.114 +# elif defined (CONFIG_IA64_GENERIC)
   2.115 +
   2.116 +# ifdef MACHVEC_PLATFORM_HEADER
   2.117 +#  include MACHVEC_PLATFORM_HEADER
   2.118 +# else
   2.119 +#  define platform_name		ia64_mv.name
   2.120 +#  define platform_setup	ia64_mv.setup
   2.121 +#  define platform_cpu_init	ia64_mv.cpu_init
   2.122 +#  define platform_irq_init	ia64_mv.irq_init
   2.123 +#  define platform_send_ipi	ia64_mv.send_ipi
   2.124 +#  define platform_timer_interrupt	ia64_mv.timer_interrupt
   2.125 +#  define platform_global_tlb_purge	ia64_mv.global_tlb_purge
   2.126 +#  define platform_tlb_migrate_finish	ia64_mv.tlb_migrate_finish
   2.127 +#  define platform_dma_init		ia64_mv.dma_init
   2.128 +#  define platform_dma_alloc_coherent	ia64_mv.dma_alloc_coherent
   2.129 +#  define platform_dma_free_coherent	ia64_mv.dma_free_coherent
   2.130 +#  define platform_dma_map_single	ia64_mv.dma_map_single
   2.131 +#  define platform_dma_unmap_single	ia64_mv.dma_unmap_single
   2.132 +#  define platform_dma_map_sg		ia64_mv.dma_map_sg
   2.133 +#  define platform_dma_unmap_sg		ia64_mv.dma_unmap_sg
   2.134 +#  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
   2.135 +#  define platform_dma_sync_sg_for_cpu	ia64_mv.dma_sync_sg_for_cpu
   2.136 +#  define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
   2.137 +#  define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
   2.138 +#  define platform_dma_mapping_error		ia64_mv.dma_mapping_error
   2.139 +#  define platform_dma_supported	ia64_mv.dma_supported
   2.140 +#  define platform_local_vector_to_irq	ia64_mv.local_vector_to_irq
   2.141 +#  define platform_pci_get_legacy_mem	ia64_mv.pci_get_legacy_mem
   2.142 +#  define platform_pci_legacy_read	ia64_mv.pci_legacy_read
   2.143 +#  define platform_pci_legacy_write	ia64_mv.pci_legacy_write
   2.144 +#  define platform_inb		ia64_mv.inb
   2.145 +#  define platform_inw		ia64_mv.inw
   2.146 +#  define platform_inl		ia64_mv.inl
   2.147 +#  define platform_outb		ia64_mv.outb
   2.148 +#  define platform_outw		ia64_mv.outw
   2.149 +#  define platform_outl		ia64_mv.outl
   2.150 +#  define platform_mmiowb	ia64_mv.mmiowb
   2.151 +#  define platform_readb        ia64_mv.readb
   2.152 +#  define platform_readw        ia64_mv.readw
   2.153 +#  define platform_readl        ia64_mv.readl
   2.154 +#  define platform_readq        ia64_mv.readq
   2.155 +#  define platform_readb_relaxed        ia64_mv.readb_relaxed
   2.156 +#  define platform_readw_relaxed        ia64_mv.readw_relaxed
   2.157 +#  define platform_readl_relaxed        ia64_mv.readl_relaxed
   2.158 +#  define platform_readq_relaxed        ia64_mv.readq_relaxed
   2.159 +#  define platform_migrate		ia64_mv.migrate
   2.160 +#  define platform_msi_init		ia64_mv.msi_init
   2.161 +# endif
   2.162 +
   2.163 +/* __attribute__((__aligned__(16))) is required to make size of the
   2.164 + * structure multiple of 16 bytes.
   2.165 + * This will fillup the holes created because of section 3.3.1 in
   2.166 + * Software Conventions guide.
   2.167 + */
   2.168 +struct ia64_machine_vector {
   2.169 +	const char *name;
   2.170 +	ia64_mv_setup_t *setup;
   2.171 +	ia64_mv_cpu_init_t *cpu_init;
   2.172 +	ia64_mv_irq_init_t *irq_init;
   2.173 +	ia64_mv_send_ipi_t *send_ipi;
   2.174 +	ia64_mv_timer_interrupt_t *timer_interrupt;
   2.175 +	ia64_mv_global_tlb_purge_t *global_tlb_purge;
   2.176 +	ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
   2.177 +	ia64_mv_dma_init *dma_init;
   2.178 +	ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
   2.179 +	ia64_mv_dma_free_coherent *dma_free_coherent;
   2.180 +	ia64_mv_dma_map_single *dma_map_single;
   2.181 +	ia64_mv_dma_unmap_single *dma_unmap_single;
   2.182 +	ia64_mv_dma_map_sg *dma_map_sg;
   2.183 +	ia64_mv_dma_unmap_sg *dma_unmap_sg;
   2.184 +	ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
   2.185 +	ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
   2.186 +	ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
   2.187 +	ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
   2.188 +	ia64_mv_dma_mapping_error *dma_mapping_error;
   2.189 +	ia64_mv_dma_supported *dma_supported;
   2.190 +	ia64_mv_local_vector_to_irq *local_vector_to_irq;
   2.191 +	ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
   2.192 +	ia64_mv_pci_legacy_read_t *pci_legacy_read;
   2.193 +	ia64_mv_pci_legacy_write_t *pci_legacy_write;
   2.194 +	ia64_mv_inb_t *inb;
   2.195 +	ia64_mv_inw_t *inw;
   2.196 +	ia64_mv_inl_t *inl;
   2.197 +	ia64_mv_outb_t *outb;
   2.198 +	ia64_mv_outw_t *outw;
   2.199 +	ia64_mv_outl_t *outl;
   2.200 +	ia64_mv_mmiowb_t *mmiowb;
   2.201 +	ia64_mv_readb_t *readb;
   2.202 +	ia64_mv_readw_t *readw;
   2.203 +	ia64_mv_readl_t *readl;
   2.204 +	ia64_mv_readq_t *readq;
   2.205 +	ia64_mv_readb_relaxed_t *readb_relaxed;
   2.206 +	ia64_mv_readw_relaxed_t *readw_relaxed;
   2.207 +	ia64_mv_readl_relaxed_t *readl_relaxed;
   2.208 +	ia64_mv_readq_relaxed_t *readq_relaxed;
   2.209 +	ia64_mv_migrate_t *migrate;
   2.210 +	ia64_mv_msi_init_t *msi_init;
   2.211 +} __attribute__((__aligned__(16))); /* align attrib? see above comment */
   2.212 +
   2.213 +#define MACHVEC_INIT(name)			\
   2.214 +{						\
   2.215 +	#name,					\
   2.216 +	platform_setup,				\
   2.217 +	platform_cpu_init,			\
   2.218 +	platform_irq_init,			\
   2.219 +	platform_send_ipi,			\
   2.220 +	platform_timer_interrupt,		\
   2.221 +	platform_global_tlb_purge,		\
   2.222 +	platform_tlb_migrate_finish,		\
   2.223 +	platform_dma_init,			\
   2.224 +	platform_dma_alloc_coherent,		\
   2.225 +	platform_dma_free_coherent,		\
   2.226 +	platform_dma_map_single,		\
   2.227 +	platform_dma_unmap_single,		\
   2.228 +	platform_dma_map_sg,			\
   2.229 +	platform_dma_unmap_sg,			\
   2.230 +	platform_dma_sync_single_for_cpu,	\
   2.231 +	platform_dma_sync_sg_for_cpu,		\
   2.232 +	platform_dma_sync_single_for_device,	\
   2.233 +	platform_dma_sync_sg_for_device,	\
   2.234 +	platform_dma_mapping_error,			\
   2.235 +	platform_dma_supported,			\
   2.236 +	platform_local_vector_to_irq,		\
   2.237 +	platform_pci_get_legacy_mem,		\
   2.238 +	platform_pci_legacy_read,		\
   2.239 +	platform_pci_legacy_write,		\
   2.240 +	platform_inb,				\
   2.241 +	platform_inw,				\
   2.242 +	platform_inl,				\
   2.243 +	platform_outb,				\
   2.244 +	platform_outw,				\
   2.245 +	platform_outl,				\
   2.246 +	platform_mmiowb,			\
   2.247 +	platform_readb,				\
   2.248 +	platform_readw,				\
   2.249 +	platform_readl,				\
   2.250 +	platform_readq,				\
   2.251 +	platform_readb_relaxed,			\
   2.252 +	platform_readw_relaxed,			\
   2.253 +	platform_readl_relaxed,			\
   2.254 +	platform_readq_relaxed,			\
   2.255 +	platform_migrate,			\
   2.256 +	platform_msi_init,			\
   2.257 +}
   2.258 +
   2.259 +extern struct ia64_machine_vector ia64_mv;
   2.260 +extern void machvec_init (const char *name);
   2.261 +
   2.262 +# else
   2.263 +#  error Unknown configuration.  Update asm-ia64/machvec.h.
   2.264 +# endif /* CONFIG_IA64_GENERIC */
   2.265 +
   2.266 +/*
   2.267 + * Declare default routines which aren't declared anywhere else:
   2.268 + */
   2.269 +extern ia64_mv_dma_init			swiotlb_init;
   2.270 +extern ia64_mv_dma_alloc_coherent	swiotlb_alloc_coherent;
   2.271 +extern ia64_mv_dma_free_coherent	swiotlb_free_coherent;
   2.272 +extern ia64_mv_dma_map_single		swiotlb_map_single;
   2.273 +extern ia64_mv_dma_unmap_single		swiotlb_unmap_single;
   2.274 +extern ia64_mv_dma_map_sg		swiotlb_map_sg;
   2.275 +extern ia64_mv_dma_unmap_sg		swiotlb_unmap_sg;
   2.276 +extern ia64_mv_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu;
   2.277 +extern ia64_mv_dma_sync_sg_for_cpu	swiotlb_sync_sg_for_cpu;
   2.278 +extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
   2.279 +extern ia64_mv_dma_sync_sg_for_device	swiotlb_sync_sg_for_device;
   2.280 +extern ia64_mv_dma_mapping_error	swiotlb_dma_mapping_error;
   2.281 +extern ia64_mv_dma_supported		swiotlb_dma_supported;
   2.282 +
   2.283 +/*
   2.284 + * Define default versions so we can extend machvec for new platforms without having
   2.285 + * to update the machvec files for all existing platforms.
   2.286 + */
   2.287 +#ifndef platform_setup
   2.288 +# define platform_setup			machvec_setup
   2.289 +#endif
   2.290 +#ifndef platform_cpu_init
   2.291 +# define platform_cpu_init		machvec_noop
   2.292 +#endif
   2.293 +#ifndef platform_irq_init
   2.294 +# define platform_irq_init		machvec_noop
   2.295 +#endif
   2.296 +
   2.297 +#ifndef platform_send_ipi
   2.298 +# define platform_send_ipi		ia64_send_ipi	/* default to architected version */
   2.299 +#endif
   2.300 +#ifndef platform_timer_interrupt
   2.301 +# define platform_timer_interrupt 	machvec_timer_interrupt
   2.302 +#endif
   2.303 +#ifndef platform_global_tlb_purge
   2.304 +# define platform_global_tlb_purge	ia64_global_tlb_purge /* default to architected version */
   2.305 +#endif
   2.306 +#ifndef platform_tlb_migrate_finish
   2.307 +# define platform_tlb_migrate_finish	machvec_noop_mm
   2.308 +#endif
   2.309 +#ifndef platform_dma_init
   2.310 +# define platform_dma_init		swiotlb_init
   2.311 +#endif
   2.312 +#ifndef platform_dma_alloc_coherent
   2.313 +# define platform_dma_alloc_coherent	swiotlb_alloc_coherent
   2.314 +#endif
   2.315 +#ifndef platform_dma_free_coherent
   2.316 +# define platform_dma_free_coherent	swiotlb_free_coherent
   2.317 +#endif
   2.318 +#ifndef platform_dma_map_single
   2.319 +# define platform_dma_map_single	swiotlb_map_single
   2.320 +#endif
   2.321 +#ifndef platform_dma_unmap_single
   2.322 +# define platform_dma_unmap_single	swiotlb_unmap_single
   2.323 +#endif
   2.324 +#ifndef platform_dma_map_sg
   2.325 +# define platform_dma_map_sg		swiotlb_map_sg
   2.326 +#endif
   2.327 +#ifndef platform_dma_unmap_sg
   2.328 +# define platform_dma_unmap_sg		swiotlb_unmap_sg
   2.329 +#endif
   2.330 +#ifndef platform_dma_sync_single_for_cpu
   2.331 +# define platform_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu
   2.332 +#endif
   2.333 +#ifndef platform_dma_sync_sg_for_cpu
   2.334 +# define platform_dma_sync_sg_for_cpu		swiotlb_sync_sg_for_cpu
   2.335 +#endif
   2.336 +#ifndef platform_dma_sync_single_for_device
   2.337 +# define platform_dma_sync_single_for_device	swiotlb_sync_single_for_device
   2.338 +#endif
   2.339 +#ifndef platform_dma_sync_sg_for_device
   2.340 +# define platform_dma_sync_sg_for_device	swiotlb_sync_sg_for_device
   2.341 +#endif
   2.342 +#ifndef platform_dma_mapping_error
   2.343 +# define platform_dma_mapping_error		swiotlb_dma_mapping_error
   2.344 +#endif
   2.345 +#ifndef platform_dma_supported
   2.346 +# define  platform_dma_supported	swiotlb_dma_supported
   2.347 +#endif
   2.348 +#ifndef platform_local_vector_to_irq
   2.349 +# define platform_local_vector_to_irq	__ia64_local_vector_to_irq
   2.350 +#endif
   2.351 +#ifndef platform_pci_get_legacy_mem
   2.352 +# define platform_pci_get_legacy_mem	ia64_pci_get_legacy_mem
   2.353 +#endif
   2.354 +#ifndef platform_pci_legacy_read
   2.355 +# define platform_pci_legacy_read	ia64_pci_legacy_read
   2.356 +extern int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size);
   2.357 +#endif
   2.358 +#ifndef platform_pci_legacy_write
   2.359 +# define platform_pci_legacy_write	ia64_pci_legacy_write
   2.360 +extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size);
   2.361 +#endif
   2.362 +#ifndef platform_inb
   2.363 +# define platform_inb		__ia64_inb
   2.364 +#endif
   2.365 +#ifndef platform_inw
   2.366 +# define platform_inw		__ia64_inw
   2.367 +#endif
   2.368 +#ifndef platform_inl
   2.369 +# define platform_inl		__ia64_inl
   2.370 +#endif
   2.371 +#ifndef platform_outb
   2.372 +# define platform_outb		__ia64_outb
   2.373 +#endif
   2.374 +#ifndef platform_outw
   2.375 +# define platform_outw		__ia64_outw
   2.376 +#endif
   2.377 +#ifndef platform_outl
   2.378 +# define platform_outl		__ia64_outl
   2.379 +#endif
   2.380 +#ifndef platform_mmiowb
   2.381 +# define platform_mmiowb	__ia64_mmiowb
   2.382 +#endif
   2.383 +#ifndef platform_readb
   2.384 +# define platform_readb		__ia64_readb
   2.385 +#endif
   2.386 +#ifndef platform_readw
   2.387 +# define platform_readw		__ia64_readw
   2.388 +#endif
   2.389 +#ifndef platform_readl
   2.390 +# define platform_readl		__ia64_readl
   2.391 +#endif
   2.392 +#ifndef platform_readq
   2.393 +# define platform_readq		__ia64_readq
   2.394 +#endif
   2.395 +#ifndef platform_readb_relaxed
   2.396 +# define platform_readb_relaxed	__ia64_readb_relaxed
   2.397 +#endif
   2.398 +#ifndef platform_readw_relaxed
   2.399 +# define platform_readw_relaxed	__ia64_readw_relaxed
   2.400 +#endif
   2.401 +#ifndef platform_readl_relaxed
   2.402 +# define platform_readl_relaxed	__ia64_readl_relaxed
   2.403 +#endif
   2.404 +#ifndef platform_readq_relaxed
   2.405 +# define platform_readq_relaxed	__ia64_readq_relaxed
   2.406 +#endif
   2.407 +#ifndef platform_migrate
   2.408 +# define platform_migrate machvec_noop_task
   2.409 +#endif
   2.410 +#ifndef platform_msi_init
   2.411 +# define platform_msi_init	((ia64_mv_msi_init_t*)NULL)
   2.412 +#endif
   2.413 +
   2.414 +#endif /* _ASM_IA64_MACHVEC_H */