direct-io.hg

changeset 6834:f5c4042212b0

Enable CONFIG_SMP compile and link
author djm@kirby.fc.hp.com
date Tue Aug 30 12:41:54 2005 -0600 (2005-08-30)
parents 2b95125015a5
children 44316ce83277
files xen/arch/ia64/hyperprivop.S xen/arch/ia64/irq.c xen/arch/ia64/linux-xen/irq_ia64.c xen/arch/ia64/linux-xen/mm_contig.c xen/arch/ia64/linux-xen/sal.c xen/arch/ia64/linux-xen/smp.c xen/arch/ia64/linux-xen/smpboot.c xen/arch/ia64/process.c xen/arch/ia64/xensetup.c xen/arch/ia64/xentime.c xen/include/asm-ia64/config.h xen/include/asm-ia64/linux-xen/asm/pal.h xen/include/asm-ia64/linux-xen/asm/processor.h xen/include/asm-ia64/linux-xen/asm/spinlock.h xen/include/asm-ia64/linux-xen/asm/system.h xen/include/asm-ia64/linux-xen/asm/tlbflush.h xen/include/asm-ia64/linux/asm/sal.h xen/include/asm-ia64/linux/notifier.h xen/include/asm-ia64/vhpt.h
line diff
     1.1 --- a/xen/arch/ia64/hyperprivop.S	Fri Aug 26 13:06:49 2005 +0000
     1.2 +++ b/xen/arch/ia64/hyperprivop.S	Tue Aug 30 12:41:54 2005 -0600
     1.3 @@ -27,6 +27,11 @@
     1.4  #undef RFI_TO_INTERRUPT // not working yet
     1.5  #endif
     1.6  
     1.7 +#ifdef CONFIG_SMP
     1.8 +#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
     1.9 +#undef FAST_PTC_GA
    1.10 +#endif
    1.11 +
    1.12  // FIXME: turn off for now... but NaTs may crash Xen so re-enable soon!
    1.13  //#define HANDLE_AR_UNAT
    1.14  
    1.15 @@ -1506,9 +1511,6 @@ GLOBAL_ENTRY(hyper_thash)
    1.16  END(hyper_thash)
    1.17  
    1.18  ENTRY(hyper_ptc_ga)
    1.19 -#ifdef CONFIG_SMP
    1.20 -FIXME: ptc.ga instruction requires spinlock for SMP
    1.21 -#endif
    1.22  #ifndef FAST_PTC_GA
    1.23  	br.spnt.few dispatch_break_fault ;;
    1.24  #endif
     2.1 --- a/xen/arch/ia64/irq.c	Fri Aug 26 13:06:49 2005 +0000
     2.2 +++ b/xen/arch/ia64/irq.c	Tue Aug 30 12:41:54 2005 -0600
     2.3 @@ -266,8 +266,12 @@ skip:
     2.4  #ifdef CONFIG_SMP
     2.5  inline void synchronize_irq(unsigned int irq)
     2.6  {
     2.7 -	while (irq_descp(irq)->status & IRQ_INPROGRESS)
     2.8 +#ifndef XEN
     2.9 +	struct irq_desc *desc = irq_desc + irq;
    2.10 +
    2.11 +	while (desc->status & IRQ_INPROGRESS)
    2.12  		cpu_relax();
    2.13 +#endif
    2.14  }
    2.15  EXPORT_SYMBOL(synchronize_irq);
    2.16  #endif
    2.17 @@ -1012,6 +1016,8 @@ int setup_irq(unsigned int irq, struct i
    2.18  	return 0;
    2.19  }
    2.20  
    2.21 +#ifndef XEN
    2.22 +
    2.23  static struct proc_dir_entry * root_irq_dir;
    2.24  static struct proc_dir_entry * irq_dir [NR_IRQS];
    2.25  
    2.26 @@ -1121,6 +1127,7 @@ void move_irq(int irq)
    2.27  
    2.28  
    2.29  #endif /* CONFIG_SMP */
    2.30 +#endif
    2.31  
    2.32  #ifdef CONFIG_HOTPLUG_CPU
    2.33  unsigned int vectors_in_migration[NR_IRQS];
     3.1 --- a/xen/arch/ia64/linux-xen/irq_ia64.c	Fri Aug 26 13:06:49 2005 +0000
     3.2 +++ b/xen/arch/ia64/linux-xen/irq_ia64.c	Tue Aug 30 12:41:54 2005 -0600
     3.3 @@ -323,7 +323,9 @@ extern irqreturn_t handle_IPI (int irq, 
     3.4  
     3.5  static struct irqaction ipi_irqaction = {
     3.6  	.handler =	handle_IPI,
     3.7 +#ifndef XEN
     3.8  	.flags =	SA_INTERRUPT,
     3.9 +#endif
    3.10  	.name =		"IPI"
    3.11  };
    3.12  #endif
     4.1 --- a/xen/arch/ia64/linux-xen/mm_contig.c	Fri Aug 26 13:06:49 2005 +0000
     4.2 +++ b/xen/arch/ia64/linux-xen/mm_contig.c	Tue Aug 30 12:41:54 2005 -0600
     4.3 @@ -191,8 +191,13 @@ per_cpu_init (void)
     4.4  	 * get_zeroed_page().
     4.5  	 */
     4.6  	if (smp_processor_id() == 0) {
     4.7 +#ifdef XEN
     4.8 +		cpu_data = alloc_xenheap_pages(PERCPU_PAGE_SIZE -
     4.9 +			PAGE_SIZE + get_order(NR_CPUS));
    4.10 +#else
    4.11  		cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
    4.12  					   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
    4.13 +#endif
    4.14  		for (cpu = 0; cpu < NR_CPUS; cpu++) {
    4.15  			memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
    4.16  			__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
    4.17 @@ -204,6 +209,7 @@ per_cpu_init (void)
    4.18  }
    4.19  #endif /* CONFIG_SMP */
    4.20  
    4.21 +#ifndef XEN
    4.22  static int
    4.23  count_pages (u64 start, u64 end, void *arg)
    4.24  {
    4.25 @@ -229,7 +235,6 @@ count_dma_pages (u64 start, u64 end, voi
    4.26   * Set up the page tables.
    4.27   */
    4.28  
    4.29 -#ifndef XEN
    4.30  void
    4.31  paging_init (void)
    4.32  {
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/ia64/linux-xen/sal.c	Tue Aug 30 12:41:54 2005 -0600
     5.3 @@ -0,0 +1,305 @@
     5.4 +/*
     5.5 + * System Abstraction Layer (SAL) interface routines.
     5.6 + *
     5.7 + * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
     5.8 + *	David Mosberger-Tang <davidm@hpl.hp.com>
     5.9 + * Copyright (C) 1999 VA Linux Systems
    5.10 + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
    5.11 + */
    5.12 +#include <linux/config.h>
    5.13 +
    5.14 +#include <linux/kernel.h>
    5.15 +#include <linux/init.h>
    5.16 +#include <linux/module.h>
    5.17 +#include <linux/spinlock.h>
    5.18 +#include <linux/string.h>
    5.19 +
    5.20 +#include <asm/page.h>
    5.21 +#include <asm/sal.h>
    5.22 +#include <asm/pal.h>
    5.23 +#ifdef XEN
    5.24 +#include <linux/smp.h>
    5.25 +#endif
    5.26 +
    5.27 + __cacheline_aligned DEFINE_SPINLOCK(sal_lock);
    5.28 +unsigned long sal_platform_features;
    5.29 +
    5.30 +unsigned short sal_revision;
    5.31 +unsigned short sal_version;
    5.32 +
    5.33 +#define SAL_MAJOR(x) ((x) >> 8)
    5.34 +#define SAL_MINOR(x) ((x) & 0xff)
    5.35 +
    5.36 +static struct {
    5.37 +	void *addr;	/* function entry point */
    5.38 +	void *gpval;	/* gp value to use */
    5.39 +} pdesc;
    5.40 +
    5.41 +static long
    5.42 +default_handler (void)
    5.43 +{
    5.44 +	return -1;
    5.45 +}
    5.46 +
    5.47 +ia64_sal_handler ia64_sal = (ia64_sal_handler) default_handler;
    5.48 +ia64_sal_desc_ptc_t *ia64_ptc_domain_info;
    5.49 +
    5.50 +const char *
    5.51 +ia64_sal_strerror (long status)
    5.52 +{
    5.53 +	const char *str;
    5.54 +	switch (status) {
    5.55 +	      case 0: str = "Call completed without error"; break;
    5.56 +	      case 1: str = "Effect a warm boot of the system to complete "
    5.57 +			      "the update"; break;
    5.58 +	      case -1: str = "Not implemented"; break;
    5.59 +	      case -2: str = "Invalid argument"; break;
    5.60 +	      case -3: str = "Call completed with error"; break;
    5.61 +	      case -4: str = "Virtual address not registered"; break;
    5.62 +	      case -5: str = "No information available"; break;
    5.63 +	      case -6: str = "Insufficient space to add the entry"; break;
    5.64 +	      case -7: str = "Invalid entry_addr value"; break;
    5.65 +	      case -8: str = "Invalid interrupt vector"; break;
    5.66 +	      case -9: str = "Requested memory not available"; break;
    5.67 +	      case -10: str = "Unable to write to the NVM device"; break;
    5.68 +	      case -11: str = "Invalid partition type specified"; break;
    5.69 +	      case -12: str = "Invalid NVM_Object id specified"; break;
    5.70 +	      case -13: str = "NVM_Object already has the maximum number "
    5.71 +				"of partitions"; break;
    5.72 +	      case -14: str = "Insufficient space in partition for the "
    5.73 +				"requested write sub-function"; break;
    5.74 +	      case -15: str = "Insufficient data buffer space for the "
    5.75 +				"requested read record sub-function"; break;
    5.76 +	      case -16: str = "Scratch buffer required for the write/delete "
    5.77 +				"sub-function"; break;
    5.78 +	      case -17: str = "Insufficient space in the NVM_Object for the "
    5.79 +				"requested create sub-function"; break;
    5.80 +	      case -18: str = "Invalid value specified in the partition_rec "
    5.81 +				"argument"; break;
    5.82 +	      case -19: str = "Record oriented I/O not supported for this "
    5.83 +				"partition"; break;
    5.84 +	      case -20: str = "Bad format of record to be written or "
    5.85 +				"required keyword variable not "
    5.86 +				"specified"; break;
    5.87 +	      default: str = "Unknown SAL status code"; break;
    5.88 +	}
    5.89 +	return str;
    5.90 +}
    5.91 +
    5.92 +void __init
    5.93 +ia64_sal_handler_init (void *entry_point, void *gpval)
    5.94 +{
    5.95 +	/* fill in the SAL procedure descriptor and point ia64_sal to it: */
    5.96 +	pdesc.addr = entry_point;
    5.97 +	pdesc.gpval = gpval;
    5.98 +	ia64_sal = (ia64_sal_handler) &pdesc;
    5.99 +}
   5.100 +
   5.101 +static void __init
   5.102 +check_versions (struct ia64_sal_systab *systab)
   5.103 +{
   5.104 +	sal_revision = (systab->sal_rev_major << 8) | systab->sal_rev_minor;
   5.105 +	sal_version = (systab->sal_b_rev_major << 8) | systab->sal_b_rev_minor;
   5.106 +
   5.107 +	/* Check for broken firmware */
   5.108 +	if ((sal_revision == SAL_VERSION_CODE(49, 29))
   5.109 +	    && (sal_version == SAL_VERSION_CODE(49, 29)))
   5.110 +	{
   5.111 +		/*
   5.112 +		 * Old firmware for zx2000 prototypes have this weird version number,
   5.113 +		 * reset it to something sane.
   5.114 +		 */
   5.115 +		sal_revision = SAL_VERSION_CODE(2, 8);
   5.116 +		sal_version = SAL_VERSION_CODE(0, 0);
   5.117 +	}
   5.118 +}
   5.119 +
   5.120 +static void __init
   5.121 +sal_desc_entry_point (void *p)
   5.122 +{
   5.123 +	struct ia64_sal_desc_entry_point *ep = p;
   5.124 +	ia64_pal_handler_init(__va(ep->pal_proc));
   5.125 +	ia64_sal_handler_init(__va(ep->sal_proc), __va(ep->gp));
   5.126 +}
   5.127 +
   5.128 +#ifdef CONFIG_SMP
   5.129 +static void __init
   5.130 +set_smp_redirect (int flag)
   5.131 +{
   5.132 +#ifndef CONFIG_HOTPLUG_CPU
   5.133 +	if (no_int_routing)
   5.134 +		smp_int_redirect &= ~flag;
   5.135 +	else
   5.136 +		smp_int_redirect |= flag;
   5.137 +#else
   5.138 +	/*
   5.139 +	 * For CPU Hotplug we dont want to do any chipset supported
   5.140 +	 * interrupt redirection. The reason is this would require that
   5.141 +	 * All interrupts be stopped and hard bind the irq to a cpu.
   5.142 +	 * Later when the interrupt is fired we need to set the redir hint
   5.143 +	 * on again in the vector. This is combersome for something that the
   5.144 +	 * user mode irq balancer will solve anyways.
   5.145 +	 */
   5.146 +	no_int_routing=1;
   5.147 +	smp_int_redirect &= ~flag;
   5.148 +#endif
   5.149 +}
   5.150 +#else
   5.151 +#define set_smp_redirect(flag)	do { } while (0)
   5.152 +#endif
   5.153 +
   5.154 +static void __init
   5.155 +sal_desc_platform_feature (void *p)
   5.156 +{
   5.157 +	struct ia64_sal_desc_platform_feature *pf = p;
   5.158 +	sal_platform_features = pf->feature_mask;
   5.159 +
   5.160 +	printk(KERN_INFO "SAL Platform features:");
   5.161 +	if (!sal_platform_features) {
   5.162 +		printk(" None\n");
   5.163 +		return;
   5.164 +	}
   5.165 +
   5.166 +	if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_BUS_LOCK)
   5.167 +		printk(" BusLock");
   5.168 +	if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT) {
   5.169 +		printk(" IRQ_Redirection");
   5.170 +		set_smp_redirect(SMP_IRQ_REDIRECTION);
   5.171 +	}
   5.172 +	if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT) {
   5.173 +		printk(" IPI_Redirection");
   5.174 +		set_smp_redirect(SMP_IPI_REDIRECTION);
   5.175 +	}
   5.176 +	if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)
   5.177 +		printk(" ITC_Drift");
   5.178 +	printk("\n");
   5.179 +}
   5.180 +
   5.181 +#ifdef CONFIG_SMP
   5.182 +static void __init
   5.183 +sal_desc_ap_wakeup (void *p)
   5.184 +{
   5.185 +	struct ia64_sal_desc_ap_wakeup *ap = p;
   5.186 +
   5.187 +	switch (ap->mechanism) {
   5.188 +	case IA64_SAL_AP_EXTERNAL_INT:
   5.189 +		ap_wakeup_vector = ap->vector;
   5.190 +		printk(KERN_INFO "SAL: AP wakeup using external interrupt "
   5.191 +				"vector 0x%lx\n", ap_wakeup_vector);
   5.192 +		break;
   5.193 +	default:
   5.194 +		printk(KERN_ERR "SAL: AP wakeup mechanism unsupported!\n");
   5.195 +		break;
   5.196 +	}
   5.197 +}
   5.198 +
   5.199 +static void __init
   5.200 +chk_nointroute_opt(void)
   5.201 +{
   5.202 +	char *cp;
   5.203 +	extern char saved_command_line[];
   5.204 +
   5.205 +	for (cp = saved_command_line; *cp; ) {
   5.206 +		if (memcmp(cp, "nointroute", 10) == 0) {
   5.207 +			no_int_routing = 1;
   5.208 +			printk ("no_int_routing on\n");
   5.209 +			break;
   5.210 +		} else {
   5.211 +			while (*cp != ' ' && *cp)
   5.212 +				++cp;
   5.213 +			while (*cp == ' ')
   5.214 +				++cp;
   5.215 +		}
   5.216 +	}
   5.217 +}
   5.218 +
   5.219 +#else
   5.220 +static void __init sal_desc_ap_wakeup(void *p) { }
   5.221 +#endif
   5.222 +
   5.223 +void __init
   5.224 +ia64_sal_init (struct ia64_sal_systab *systab)
   5.225 +{
   5.226 +	char *p;
   5.227 +	int i;
   5.228 +
   5.229 +	if (!systab) {
   5.230 +		printk(KERN_WARNING "Hmm, no SAL System Table.\n");
   5.231 +		return;
   5.232 +	}
   5.233 +
   5.234 +	if (strncmp(systab->signature, "SST_", 4) != 0)
   5.235 +		printk(KERN_ERR "bad signature in system table!");
   5.236 +
   5.237 +	check_versions(systab);
   5.238 +#ifdef CONFIG_SMP
   5.239 +	chk_nointroute_opt();
   5.240 +#endif
   5.241 +
   5.242 +	/* revisions are coded in BCD, so %x does the job for us */
   5.243 +	printk(KERN_INFO "SAL %x.%x: %.32s %.32s%sversion %x.%x\n",
   5.244 +			SAL_MAJOR(sal_revision), SAL_MINOR(sal_revision),
   5.245 +			systab->oem_id, systab->product_id,
   5.246 +			systab->product_id[0] ? " " : "",
   5.247 +			SAL_MAJOR(sal_version), SAL_MINOR(sal_version));
   5.248 +
   5.249 +	p = (char *) (systab + 1);
   5.250 +	for (i = 0; i < systab->entry_count; i++) {
   5.251 +		/*
   5.252 +		 * The first byte of each entry type contains the type
   5.253 +		 * descriptor.
   5.254 +		 */
   5.255 +		switch (*p) {
   5.256 +		case SAL_DESC_ENTRY_POINT:
   5.257 +			sal_desc_entry_point(p);
   5.258 +			break;
   5.259 +		case SAL_DESC_PLATFORM_FEATURE:
   5.260 +			sal_desc_platform_feature(p);
   5.261 +			break;
   5.262 +		case SAL_DESC_PTC:
   5.263 +			ia64_ptc_domain_info = (ia64_sal_desc_ptc_t *)p;
   5.264 +			break;
   5.265 +		case SAL_DESC_AP_WAKEUP:
   5.266 +			sal_desc_ap_wakeup(p);
   5.267 +			break;
   5.268 +		}
   5.269 +		p += SAL_DESC_SIZE(*p);
   5.270 +	}
   5.271 +}
   5.272 +
   5.273 +int
   5.274 +ia64_sal_oemcall(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
   5.275 +		 u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7)
   5.276 +{
   5.277 +	if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
   5.278 +		return -1;
   5.279 +	SAL_CALL(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
   5.280 +	return 0;
   5.281 +}
   5.282 +EXPORT_SYMBOL(ia64_sal_oemcall);
   5.283 +
   5.284 +int
   5.285 +ia64_sal_oemcall_nolock(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
   5.286 +			u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
   5.287 +			u64 arg7)
   5.288 +{
   5.289 +	if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
   5.290 +		return -1;
   5.291 +	SAL_CALL_NOLOCK(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
   5.292 +			arg7);
   5.293 +	return 0;
   5.294 +}
   5.295 +EXPORT_SYMBOL(ia64_sal_oemcall_nolock);
   5.296 +
   5.297 +int
   5.298 +ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc,
   5.299 +			   u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5,
   5.300 +			   u64 arg6, u64 arg7)
   5.301 +{
   5.302 +	if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
   5.303 +		return -1;
   5.304 +	SAL_CALL_REENTRANT(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
   5.305 +			   arg7);
   5.306 +	return 0;
   5.307 +}
   5.308 +EXPORT_SYMBOL(ia64_sal_oemcall_reentrant);
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/ia64/linux-xen/smp.c	Tue Aug 30 12:41:54 2005 -0600
     6.3 @@ -0,0 +1,427 @@
     6.4 +/*
     6.5 + * SMP Support
     6.6 + *
     6.7 + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
     6.8 + * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
     6.9 + *
    6.10 + * Lots of stuff stolen from arch/alpha/kernel/smp.c
    6.11 + *
    6.12 + * 01/05/16 Rohit Seth <rohit.seth@intel.com>  IA64-SMP functions. Reorganized
    6.13 + * the existing code (on the lines of x86 port).
    6.14 + * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
    6.15 + * calibration on each CPU.
    6.16 + * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
    6.17 + * 00/03/31 Rohit Seth <rohit.seth@intel.com>	Fixes for Bootstrap Processor
    6.18 + * & cpu_online_map now gets done here (instead of setup.c)
    6.19 + * 99/10/05 davidm	Update to bring it in sync with new command-line processing
    6.20 + *  scheme.
    6.21 + * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
    6.22 + *		smp_call_function_single to resend IPI on timeouts
    6.23 + */
    6.24 +#include <linux/module.h>
    6.25 +#include <linux/kernel.h>
    6.26 +#include <linux/sched.h>
    6.27 +#include <linux/init.h>
    6.28 +#include <linux/interrupt.h>
    6.29 +#include <linux/smp.h>
    6.30 +#include <linux/kernel_stat.h>
    6.31 +#include <linux/mm.h>
    6.32 +#include <linux/cache.h>
    6.33 +#include <linux/delay.h>
    6.34 +#include <linux/efi.h>
    6.35 +#include <linux/bitops.h>
    6.36 +
    6.37 +#include <asm/atomic.h>
    6.38 +#include <asm/current.h>
    6.39 +#include <asm/delay.h>
    6.40 +#include <asm/machvec.h>
    6.41 +#include <asm/io.h>
    6.42 +#include <asm/irq.h>
    6.43 +#include <asm/page.h>
    6.44 +#include <asm/pgalloc.h>
    6.45 +#include <asm/pgtable.h>
    6.46 +#include <asm/processor.h>
    6.47 +#include <asm/ptrace.h>
    6.48 +#include <asm/sal.h>
    6.49 +#include <asm/system.h>
    6.50 +#include <asm/tlbflush.h>
    6.51 +#include <asm/unistd.h>
    6.52 +#include <asm/mca.h>
    6.53 +#ifdef XEN
    6.54 +#include <asm/hw_irq.h>
    6.55 +#endif
    6.56 +
    6.57 +#ifdef XEN
    6.58 +// FIXME: MOVE ELSEWHERE
    6.59 +//Huh? This seems to be used on ia64 even if !CONFIG_SMP
    6.60 +void flush_tlb_mask(cpumask_t mask)
    6.61 +{
    6.62 +	dummy();
    6.63 +}
    6.64 +//#if CONFIG_SMP || IA64
    6.65 +#if CONFIG_SMP
    6.66 +//Huh? This seems to be used on ia64 even if !CONFIG_SMP
    6.67 +void smp_send_event_check_mask(cpumask_t mask)
    6.68 +{
    6.69 +	dummy();
    6.70 +	//send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
    6.71 +}
    6.72 +
    6.73 +
    6.74 +//Huh? This seems to be used on ia64 even if !CONFIG_SMP
    6.75 +int try_flush_tlb_mask(cpumask_t mask)
    6.76 +{
    6.77 +	dummy();
    6.78 +	return 1;
    6.79 +}
    6.80 +#endif
    6.81 +#endif
    6.82 +
    6.83 +#ifdef CONFIG_SMP	/* ifdef XEN */
    6.84 +
    6.85 +/*
    6.86 + * Structure and data for smp_call_function(). This is designed to minimise static memory
    6.87 + * requirements. It also looks cleaner.
    6.88 + */
    6.89 +static  __cacheline_aligned DEFINE_SPINLOCK(call_lock);
    6.90 +
    6.91 +struct call_data_struct {
    6.92 +	void (*func) (void *info);
    6.93 +	void *info;
    6.94 +	long wait;
    6.95 +	atomic_t started;
    6.96 +	atomic_t finished;
    6.97 +};
    6.98 +
    6.99 +static volatile struct call_data_struct *call_data;
   6.100 +
   6.101 +#define IPI_CALL_FUNC		0
   6.102 +#define IPI_CPU_STOP		1
   6.103 +
   6.104 +/* This needs to be cacheline aligned because it is written to by *other* CPUs.  */
   6.105 +static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
   6.106 +
   6.107 +extern void cpu_halt (void);
   6.108 +
   6.109 +void
   6.110 +lock_ipi_calllock(void)
   6.111 +{
   6.112 +	spin_lock_irq(&call_lock);
   6.113 +}
   6.114 +
   6.115 +void
   6.116 +unlock_ipi_calllock(void)
   6.117 +{
   6.118 +	spin_unlock_irq(&call_lock);
   6.119 +}
   6.120 +
   6.121 +static void
   6.122 +stop_this_cpu (void)
   6.123 +{
   6.124 +	/*
   6.125 +	 * Remove this CPU:
   6.126 +	 */
   6.127 +	cpu_clear(smp_processor_id(), cpu_online_map);
   6.128 +	max_xtp();
   6.129 +	local_irq_disable();
   6.130 +#ifndef XEN
   6.131 +	cpu_halt();
   6.132 +#endif
   6.133 +}
   6.134 +
   6.135 +void
   6.136 +cpu_die(void)
   6.137 +{
   6.138 +	max_xtp();
   6.139 +	local_irq_disable();
   6.140 +#ifndef XEN
   6.141 +	cpu_halt();
   6.142 +#endif
   6.143 +	/* Should never be here */
   6.144 +	BUG();
   6.145 +	for (;;);
   6.146 +}
   6.147 +
   6.148 +irqreturn_t
   6.149 +handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
   6.150 +{
   6.151 +	int this_cpu = get_cpu();
   6.152 +	unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
   6.153 +	unsigned long ops;
   6.154 +
   6.155 +	mb();	/* Order interrupt and bit testing. */
   6.156 +	while ((ops = xchg(pending_ipis, 0)) != 0) {
   6.157 +		mb();	/* Order bit clearing and data access. */
   6.158 +		do {
   6.159 +			unsigned long which;
   6.160 +
   6.161 +			which = ffz(~ops);
   6.162 +			ops &= ~(1 << which);
   6.163 +
   6.164 +			switch (which) {
   6.165 +			      case IPI_CALL_FUNC:
   6.166 +			      {
   6.167 +				      struct call_data_struct *data;
   6.168 +				      void (*func)(void *info);
   6.169 +				      void *info;
   6.170 +				      int wait;
   6.171 +
   6.172 +				      /* release the 'pointer lock' */
   6.173 +				      data = (struct call_data_struct *) call_data;
   6.174 +				      func = data->func;
   6.175 +				      info = data->info;
   6.176 +				      wait = data->wait;
   6.177 +
   6.178 +				      mb();
   6.179 +				      atomic_inc(&data->started);
   6.180 +				      /*
   6.181 +				       * At this point the structure may be gone unless
   6.182 +				       * wait is true.
   6.183 +				       */
   6.184 +				      (*func)(info);
   6.185 +
   6.186 +				      /* Notify the sending CPU that the task is done.  */
   6.187 +				      mb();
   6.188 +				      if (wait)
   6.189 +					      atomic_inc(&data->finished);
   6.190 +			      }
   6.191 +			      break;
   6.192 +
   6.193 +			      case IPI_CPU_STOP:
   6.194 +				stop_this_cpu();
   6.195 +				break;
   6.196 +
   6.197 +			      default:
   6.198 +				printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
   6.199 +				break;
   6.200 +			}
   6.201 +		} while (ops);
   6.202 +		mb();	/* Order data access and bit testing. */
   6.203 +	}
   6.204 +	put_cpu();
   6.205 +	return IRQ_HANDLED;
   6.206 +}
   6.207 +
   6.208 +/*
   6.209 + * Called with preeemption disabled.
   6.210 + */
   6.211 +static inline void
   6.212 +send_IPI_single (int dest_cpu, int op)
   6.213 +{
   6.214 +	set_bit(op, &per_cpu(ipi_operation, dest_cpu));
   6.215 +	platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
   6.216 +}
   6.217 +
   6.218 +/*
   6.219 + * Called with preeemption disabled.
   6.220 + */
   6.221 +static inline void
   6.222 +send_IPI_allbutself (int op)
   6.223 +{
   6.224 +	unsigned int i;
   6.225 +
   6.226 +	for (i = 0; i < NR_CPUS; i++) {
   6.227 +		if (cpu_online(i) && i != smp_processor_id())
   6.228 +			send_IPI_single(i, op);
   6.229 +	}
   6.230 +}
   6.231 +
   6.232 +/*
   6.233 + * Called with preeemption disabled.
   6.234 + */
   6.235 +static inline void
   6.236 +send_IPI_all (int op)
   6.237 +{
   6.238 +	int i;
   6.239 +
   6.240 +	for (i = 0; i < NR_CPUS; i++)
   6.241 +		if (cpu_online(i))
   6.242 +			send_IPI_single(i, op);
   6.243 +}
   6.244 +
   6.245 +/*
   6.246 + * Called with preeemption disabled.
   6.247 + */
   6.248 +static inline void
   6.249 +send_IPI_self (int op)
   6.250 +{
   6.251 +	send_IPI_single(smp_processor_id(), op);
   6.252 +}
   6.253 +
   6.254 +/*
   6.255 + * Called with preeemption disabled.
   6.256 + */
   6.257 +void
   6.258 +smp_send_reschedule (int cpu)
   6.259 +{
   6.260 +	platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
   6.261 +}
   6.262 +
   6.263 +void
   6.264 +smp_flush_tlb_all (void)
   6.265 +{
   6.266 +	on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
   6.267 +}
   6.268 +
   6.269 +void
   6.270 +smp_flush_tlb_mm (struct mm_struct *mm)
   6.271 +{
   6.272 +	preempt_disable();
   6.273 +	/* this happens for the common case of a single-threaded fork():  */
   6.274 +	if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
   6.275 +	{
   6.276 +		local_finish_flush_tlb_mm(mm);
   6.277 +		preempt_enable();
   6.278 +		return;
   6.279 +	}
   6.280 +
   6.281 +	preempt_enable();
   6.282 +	/*
   6.283 +	 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
   6.284 +	 * have been running in the address space.  It's not clear that this is worth the
   6.285 +	 * trouble though: to avoid races, we have to raise the IPI on the target CPU
   6.286 +	 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
   6.287 +	 * rather trivial.
   6.288 +	 */
   6.289 +	on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
   6.290 +}
   6.291 +
   6.292 +/*
   6.293 + * Run a function on another CPU
   6.294 + *  <func>	The function to run. This must be fast and non-blocking.
   6.295 + *  <info>	An arbitrary pointer to pass to the function.
   6.296 + *  <nonatomic>	Currently unused.
   6.297 + *  <wait>	If true, wait until function has completed on other CPUs.
   6.298 + *  [RETURNS]   0 on success, else a negative status code.
   6.299 + *
   6.300 + * Does not return until the remote CPU is nearly ready to execute <func>
   6.301 + * or is or has executed.
   6.302 + */
   6.303 +
   6.304 +int
   6.305 +smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
   6.306 +			  int wait)
   6.307 +{
   6.308 +	struct call_data_struct data;
   6.309 +	int cpus = 1;
   6.310 +	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
   6.311 +
   6.312 +	if (cpuid == me) {
   6.313 +		printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__);
   6.314 +		put_cpu();
   6.315 +		return -EBUSY;
   6.316 +	}
   6.317 +
   6.318 +	data.func = func;
   6.319 +	data.info = info;
   6.320 +	atomic_set(&data.started, 0);
   6.321 +	data.wait = wait;
   6.322 +	if (wait)
   6.323 +		atomic_set(&data.finished, 0);
   6.324 +
   6.325 +#ifdef XEN
   6.326 +	spin_lock(&call_lock);
   6.327 +#else
   6.328 +	spin_lock_bh(&call_lock);
   6.329 +#endif
   6.330 +
   6.331 +	call_data = &data;
   6.332 +	mb();	/* ensure store to call_data precedes setting of IPI_CALL_FUNC */
   6.333 +  	send_IPI_single(cpuid, IPI_CALL_FUNC);
   6.334 +
   6.335 +	/* Wait for response */
   6.336 +	while (atomic_read(&data.started) != cpus)
   6.337 +		cpu_relax();
   6.338 +
   6.339 +	if (wait)
   6.340 +		while (atomic_read(&data.finished) != cpus)
   6.341 +			cpu_relax();
   6.342 +	call_data = NULL;
   6.343 +
   6.344 +#ifdef XEN
   6.345 +	spin_unlock(&call_lock);
   6.346 +#else
   6.347 +	spin_unlock_bh(&call_lock);
   6.348 +#endif
   6.349 +	put_cpu();
   6.350 +	return 0;
   6.351 +}
   6.352 +EXPORT_SYMBOL(smp_call_function_single);
   6.353 +
   6.354 +/*
   6.355 + * this function sends a 'generic call function' IPI to all other CPUs
   6.356 + * in the system.
   6.357 + */
   6.358 +
   6.359 +/*
   6.360 + *  [SUMMARY]	Run a function on all other CPUs.
   6.361 + *  <func>	The function to run. This must be fast and non-blocking.
   6.362 + *  <info>	An arbitrary pointer to pass to the function.
   6.363 + *  <nonatomic>	currently unused.
   6.364 + *  <wait>	If true, wait (atomically) until function has completed on other CPUs.
   6.365 + *  [RETURNS]   0 on success, else a negative status code.
   6.366 + *
   6.367 + * Does not return until remote CPUs are nearly ready to execute <func> or are or have
   6.368 + * executed.
   6.369 + *
   6.370 + * You must not call this function with disabled interrupts or from a
   6.371 + * hardware interrupt handler or from a bottom half handler.
   6.372 + */
   6.373 +int
   6.374 +smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
   6.375 +{
   6.376 +	struct call_data_struct data;
   6.377 +	int cpus = num_online_cpus()-1;
   6.378 +
   6.379 +	if (!cpus)
   6.380 +		return 0;
   6.381 +
   6.382 +	/* Can deadlock when called with interrupts disabled */
   6.383 +#ifdef XEN
   6.384 +	if (irqs_disabled()) panic("smp_call_function called with interrupts disabled\n");
   6.385 +#else
   6.386 +	WARN_ON(irqs_disabled());
   6.387 +#endif
   6.388 +
   6.389 +	data.func = func;
   6.390 +	data.info = info;
   6.391 +	atomic_set(&data.started, 0);
   6.392 +	data.wait = wait;
   6.393 +	if (wait)
   6.394 +		atomic_set(&data.finished, 0);
   6.395 +
   6.396 +	spin_lock(&call_lock);
   6.397 +
   6.398 +	call_data = &data;
   6.399 +	mb();	/* ensure store to call_data precedes setting of IPI_CALL_FUNC */
   6.400 +	send_IPI_allbutself(IPI_CALL_FUNC);
   6.401 +
   6.402 +	/* Wait for response */
   6.403 +	while (atomic_read(&data.started) != cpus)
   6.404 +		cpu_relax();
   6.405 +
   6.406 +	if (wait)
   6.407 +		while (atomic_read(&data.finished) != cpus)
   6.408 +			cpu_relax();
   6.409 +	call_data = NULL;
   6.410 +
   6.411 +	spin_unlock(&call_lock);
   6.412 +	return 0;
   6.413 +}
   6.414 +EXPORT_SYMBOL(smp_call_function);
   6.415 +
   6.416 +/*
   6.417 + * this function calls the 'stop' function on all other CPUs in the system.
   6.418 + */
   6.419 +void
   6.420 +smp_send_stop (void)
   6.421 +{
   6.422 +	send_IPI_allbutself(IPI_CPU_STOP);
   6.423 +}
   6.424 +
   6.425 +int __init
   6.426 +setup_profiling_timer (unsigned int multiplier)
   6.427 +{
   6.428 +	return -EINVAL;
   6.429 +}
   6.430 +#endif /* CONFIG_SMP ifdef XEN */
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/ia64/linux-xen/smpboot.c	Tue Aug 30 12:41:54 2005 -0600
     7.3 @@ -0,0 +1,903 @@
     7.4 +/*
     7.5 + * SMP boot-related support
     7.6 + *
     7.7 + * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
     7.8 + *	David Mosberger-Tang <davidm@hpl.hp.com>
     7.9 + * Copyright (C) 2001, 2004-2005 Intel Corp
    7.10 + * 	Rohit Seth <rohit.seth@intel.com>
    7.11 + * 	Suresh Siddha <suresh.b.siddha@intel.com>
    7.12 + * 	Gordon Jin <gordon.jin@intel.com>
    7.13 + *	Ashok Raj  <ashok.raj@intel.com>
    7.14 + *
    7.15 + * 01/05/16 Rohit Seth <rohit.seth@intel.com>	Moved SMP booting functions from smp.c to here.
    7.16 + * 01/04/27 David Mosberger <davidm@hpl.hp.com>	Added ITC synching code.
    7.17 + * 02/07/31 David Mosberger <davidm@hpl.hp.com>	Switch over to hotplug-CPU boot-sequence.
    7.18 + *						smp_boot_cpus()/smp_commence() is replaced by
    7.19 + *						smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
    7.20 + * 04/06/21 Ashok Raj		<ashok.raj@intel.com> Added CPU Hotplug Support
    7.21 + * 04/12/26 Jin Gordon <gordon.jin@intel.com>
    7.22 + * 04/12/26 Rohit Seth <rohit.seth@intel.com>
    7.23 + *						Add multi-threading and multi-core detection
    7.24 + * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
    7.25 + *						Setup cpu_sibling_map and cpu_core_map
    7.26 + */
    7.27 +#include <linux/config.h>
    7.28 +
    7.29 +#include <linux/module.h>
    7.30 +#include <linux/acpi.h>
    7.31 +#include <linux/bootmem.h>
    7.32 +#include <linux/cpu.h>
    7.33 +#include <linux/delay.h>
    7.34 +#include <linux/init.h>
    7.35 +#include <linux/interrupt.h>
    7.36 +#include <linux/irq.h>
    7.37 +#include <linux/kernel.h>
    7.38 +#include <linux/kernel_stat.h>
    7.39 +#include <linux/mm.h>
    7.40 +#include <linux/notifier.h>	/* hg add me */
    7.41 +#include <linux/smp.h>
    7.42 +#include <linux/smp_lock.h>
    7.43 +#include <linux/spinlock.h>
    7.44 +#include <linux/efi.h>
    7.45 +#include <linux/percpu.h>
    7.46 +#include <linux/bitops.h>
    7.47 +
    7.48 +#include <asm/atomic.h>
    7.49 +#include <asm/cache.h>
    7.50 +#include <asm/current.h>
    7.51 +#include <asm/delay.h>
    7.52 +#include <asm/ia32.h>
    7.53 +#include <asm/io.h>
    7.54 +#include <asm/irq.h>
    7.55 +#include <asm/machvec.h>
    7.56 +#include <asm/mca.h>
    7.57 +#include <asm/page.h>
    7.58 +#include <asm/pgalloc.h>
    7.59 +#include <asm/pgtable.h>
    7.60 +#include <asm/processor.h>
    7.61 +#include <asm/ptrace.h>
    7.62 +#include <asm/sal.h>
    7.63 +#include <asm/system.h>
    7.64 +#include <asm/tlbflush.h>
    7.65 +#include <asm/unistd.h>
    7.66 +
    7.67 +#ifdef XEN
    7.68 +#include <asm/hw_irq.h>
    7.69 +int ht_per_core = 1;
    7.70 +#endif
    7.71 +
    7.72 +#ifdef CONFIG_SMP /* ifdef XEN */
    7.73 +
    7.74 +#define SMP_DEBUG 0
    7.75 +
    7.76 +#if SMP_DEBUG
    7.77 +#define Dprintk(x...)  printk(x)
    7.78 +#else
    7.79 +#define Dprintk(x...)
    7.80 +#endif
    7.81 +
    7.82 +#ifdef CONFIG_HOTPLUG_CPU
    7.83 +/*
    7.84 + * Store all idle threads, this can be reused instead of creating
    7.85 + * a new thread. Also avoids complicated thread destroy functionality
    7.86 + * for idle threads.
    7.87 + */
    7.88 +struct task_struct *idle_thread_array[NR_CPUS];
    7.89 +
    7.90 +/*
    7.91 + * Global array allocated for NR_CPUS at boot time
    7.92 + */
    7.93 +struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
    7.94 +
    7.95 +/*
    7.96 + * start_ap in head.S uses this to store current booting cpu
    7.97 + * info.
    7.98 + */
    7.99 +struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
   7.100 +
   7.101 +#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
   7.102 +
   7.103 +#define get_idle_for_cpu(x)		(idle_thread_array[(x)])
   7.104 +#define set_idle_for_cpu(x,p)	(idle_thread_array[(x)] = (p))
   7.105 +
   7.106 +#else
   7.107 +
   7.108 +#define get_idle_for_cpu(x)		(NULL)
   7.109 +#define set_idle_for_cpu(x,p)
   7.110 +#define set_brendez_area(x)
   7.111 +#endif
   7.112 +
   7.113 +
   7.114 +/*
   7.115 + * ITC synchronization related stuff:
   7.116 + */
   7.117 +#define MASTER	0
   7.118 +#define SLAVE	(SMP_CACHE_BYTES/8)
   7.119 +
   7.120 +#define NUM_ROUNDS	64	/* magic value */
   7.121 +#define NUM_ITERS	5	/* likewise */
   7.122 +
   7.123 +static DEFINE_SPINLOCK(itc_sync_lock);
   7.124 +static volatile unsigned long go[SLAVE + 1];
   7.125 +
   7.126 +#define DEBUG_ITC_SYNC	0
   7.127 +
   7.128 +extern void __devinit calibrate_delay (void);
   7.129 +extern void start_ap (void);
   7.130 +extern unsigned long ia64_iobase;
   7.131 +
   7.132 +task_t *task_for_booting_cpu;
   7.133 +
   7.134 +/*
   7.135 + * State for each CPU
   7.136 + */
   7.137 +DEFINE_PER_CPU(int, cpu_state);
   7.138 +
   7.139 +/* Bitmasks of currently online, and possible CPUs */
   7.140 +cpumask_t cpu_online_map;
   7.141 +EXPORT_SYMBOL(cpu_online_map);
   7.142 +cpumask_t cpu_possible_map;
   7.143 +EXPORT_SYMBOL(cpu_possible_map);
   7.144 +
   7.145 +cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
   7.146 +cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
   7.147 +int smp_num_siblings = 1;
   7.148 +int smp_num_cpucores = 1;
   7.149 +
   7.150 +/* which logical CPU number maps to which CPU (physical APIC ID) */
   7.151 +volatile int ia64_cpu_to_sapicid[NR_CPUS];
   7.152 +EXPORT_SYMBOL(ia64_cpu_to_sapicid);
   7.153 +
   7.154 +static volatile cpumask_t cpu_callin_map;
   7.155 +
   7.156 +struct smp_boot_data smp_boot_data __initdata;
   7.157 +
   7.158 +unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
   7.159 +
   7.160 +char __initdata no_int_routing;
   7.161 +
   7.162 +unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
   7.163 +
   7.164 +static int __init
   7.165 +nointroute (char *str)
   7.166 +{
   7.167 +	no_int_routing = 1;
   7.168 +	printk ("no_int_routing on\n");
   7.169 +	return 1;
   7.170 +}
   7.171 +
   7.172 +__setup("nointroute", nointroute);
   7.173 +
   7.174 +void
   7.175 +sync_master (void *arg)
   7.176 +{
   7.177 +	unsigned long flags, i;
   7.178 +
   7.179 +	go[MASTER] = 0;
   7.180 +
   7.181 +	local_irq_save(flags);
   7.182 +	{
   7.183 +		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
   7.184 +			while (!go[MASTER])
   7.185 +				cpu_relax();
   7.186 +			go[MASTER] = 0;
   7.187 +			go[SLAVE] = ia64_get_itc();
   7.188 +		}
   7.189 +	}
   7.190 +	local_irq_restore(flags);
   7.191 +}
   7.192 +
   7.193 +/*
   7.194 + * Return the number of cycles by which our itc differs from the itc on the master
   7.195 + * (time-keeper) CPU.  A positive number indicates our itc is ahead of the master,
   7.196 + * negative that it is behind.
   7.197 + */
   7.198 +static inline long
   7.199 +get_delta (long *rt, long *master)
   7.200 +{
   7.201 +	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
   7.202 +	unsigned long tcenter, t0, t1, tm;
   7.203 +	long i;
   7.204 +
   7.205 +	for (i = 0; i < NUM_ITERS; ++i) {
   7.206 +		t0 = ia64_get_itc();
   7.207 +		go[MASTER] = 1;
   7.208 +		while (!(tm = go[SLAVE]))
   7.209 +			cpu_relax();
   7.210 +		go[SLAVE] = 0;
   7.211 +		t1 = ia64_get_itc();
   7.212 +
   7.213 +		if (t1 - t0 < best_t1 - best_t0)
   7.214 +			best_t0 = t0, best_t1 = t1, best_tm = tm;
   7.215 +	}
   7.216 +
   7.217 +	*rt = best_t1 - best_t0;
   7.218 +	*master = best_tm - best_t0;
   7.219 +
   7.220 +	/* average best_t0 and best_t1 without overflow: */
   7.221 +	tcenter = (best_t0/2 + best_t1/2);
   7.222 +	if (best_t0 % 2 + best_t1 % 2 == 2)
   7.223 +		++tcenter;
   7.224 +	return tcenter - best_tm;
   7.225 +}
   7.226 +
   7.227 +/*
   7.228 + * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
   7.229 + * (normally the time-keeper CPU).  We use a closed loop to eliminate the possibility of
   7.230 + * unaccounted-for errors (such as getting a machine check in the middle of a calibration
   7.231 + * step).  The basic idea is for the slave to ask the master what itc value it has and to
   7.232 + * read its own itc before and after the master responds.  Each iteration gives us three
   7.233 + * timestamps:
   7.234 + *
   7.235 + *	slave		master
   7.236 + *
   7.237 + *	t0 ---\
   7.238 + *             ---\
   7.239 + *		   --->
   7.240 + *			tm
   7.241 + *		   /---
   7.242 + *	       /---
   7.243 + *	t1 <---
   7.244 + *
   7.245 + *
   7.246 + * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
   7.247 + * and t1.  If we achieve this, the clocks are synchronized provided the interconnect
   7.248 + * between the slave and the master is symmetric.  Even if the interconnect were
   7.249 + * asymmetric, we would still know that the synchronization error is smaller than the
   7.250 + * roundtrip latency (t0 - t1).
   7.251 + *
   7.252 + * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
   7.253 + * within one or two cycles.  However, we can only *guarantee* that the synchronization is
   7.254 + * accurate to within a round-trip time, which is typically in the range of several
   7.255 + * hundred cycles (e.g., ~500 cycles).  In practice, this means that the itc's are usually
   7.256 + * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
   7.257 + * than half a micro second or so.
   7.258 + */
   7.259 +void
   7.260 +ia64_sync_itc (unsigned int master)
   7.261 +{
   7.262 +	long i, delta, adj, adjust_latency = 0, done = 0;
   7.263 +	unsigned long flags, rt, master_time_stamp, bound;
   7.264 +#if DEBUG_ITC_SYNC
   7.265 +	struct {
   7.266 +		long rt;	/* roundtrip time */
   7.267 +		long master;	/* master's timestamp */
   7.268 +		long diff;	/* difference between midpoint and master's timestamp */
   7.269 +		long lat;	/* estimate of itc adjustment latency */
   7.270 +	} t[NUM_ROUNDS];
   7.271 +#endif
   7.272 +
   7.273 +	/*
   7.274 +	 * Make sure local timer ticks are disabled while we sync.  If
   7.275 +	 * they were enabled, we'd have to worry about nasty issues
   7.276 +	 * like setting the ITC ahead of (or a long time before) the
   7.277 +	 * next scheduled tick.
   7.278 +	 */
   7.279 +	BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
   7.280 +
   7.281 +	go[MASTER] = 1;
   7.282 +
   7.283 +	if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
   7.284 +		printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
   7.285 +		return;
   7.286 +	}
   7.287 +
   7.288 +	while (go[MASTER])
   7.289 +		cpu_relax();	/* wait for master to be ready */
   7.290 +
   7.291 +	spin_lock_irqsave(&itc_sync_lock, flags);
   7.292 +	{
   7.293 +		for (i = 0; i < NUM_ROUNDS; ++i) {
   7.294 +			delta = get_delta(&rt, &master_time_stamp);
   7.295 +			if (delta == 0) {
   7.296 +				done = 1;	/* let's lock on to this... */
   7.297 +				bound = rt;
   7.298 +			}
   7.299 +
   7.300 +			if (!done) {
   7.301 +				if (i > 0) {
   7.302 +					adjust_latency += -delta;
   7.303 +					adj = -delta + adjust_latency/4;
   7.304 +				} else
   7.305 +					adj = -delta;
   7.306 +
   7.307 +				ia64_set_itc(ia64_get_itc() + adj);
   7.308 +			}
   7.309 +#if DEBUG_ITC_SYNC
   7.310 +			t[i].rt = rt;
   7.311 +			t[i].master = master_time_stamp;
   7.312 +			t[i].diff = delta;
   7.313 +			t[i].lat = adjust_latency/4;
   7.314 +#endif
   7.315 +		}
   7.316 +	}
   7.317 +	spin_unlock_irqrestore(&itc_sync_lock, flags);
   7.318 +
   7.319 +#if DEBUG_ITC_SYNC
   7.320 +	for (i = 0; i < NUM_ROUNDS; ++i)
   7.321 +		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
   7.322 +		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
   7.323 +#endif
   7.324 +
   7.325 +	printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
   7.326 +	       "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
   7.327 +}
   7.328 +
   7.329 +/*
   7.330 + * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
   7.331 + */
   7.332 +static inline void __devinit
   7.333 +smp_setup_percpu_timer (void)
   7.334 +{
   7.335 +}
   7.336 +
   7.337 +static void __devinit
   7.338 +smp_callin (void)
   7.339 +{
   7.340 +	int cpuid, phys_id;
   7.341 +	extern void ia64_init_itm(void);
   7.342 +
   7.343 +#ifdef CONFIG_PERFMON
   7.344 +	extern void pfm_init_percpu(void);
   7.345 +#endif
   7.346 +
   7.347 +	cpuid = smp_processor_id();
   7.348 +	phys_id = hard_smp_processor_id();
   7.349 +
   7.350 +	if (cpu_online(cpuid)) {
   7.351 +		printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
   7.352 +		       phys_id, cpuid);
   7.353 +		BUG();
   7.354 +	}
   7.355 +
   7.356 +	lock_ipi_calllock();
   7.357 +	cpu_set(cpuid, cpu_online_map);
   7.358 +	unlock_ipi_calllock();
   7.359 +	per_cpu(cpu_state, cpuid) = CPU_ONLINE;
   7.360 +
   7.361 +	smp_setup_percpu_timer();
   7.362 +
   7.363 +#ifndef XEN
   7.364 +	ia64_mca_cmc_vector_setup();	/* Setup vector on AP */
   7.365 +#endif
   7.366 +
   7.367 +#ifdef CONFIG_PERFMON
   7.368 +	pfm_init_percpu();
   7.369 +#endif
   7.370 +
   7.371 +	local_irq_enable();
   7.372 +
   7.373 +	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
   7.374 +		/*
   7.375 +		 * Synchronize the ITC with the BP.  Need to do this after irqs are
   7.376 +		 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
   7.377 +		 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
   7.378 +		 * local_bh_enable(), which bugs out if irqs are not enabled...
   7.379 +		 */
   7.380 +		Dprintk("Going to syncup ITC with BP.\n");
   7.381 +		ia64_sync_itc(0);
   7.382 +	}
   7.383 +
   7.384 +	/*
   7.385 +	 * Get our bogomips.
   7.386 +	 */
   7.387 +	ia64_init_itm();
   7.388 +#ifndef XEN
   7.389 +	calibrate_delay();
   7.390 +#endif
   7.391 +	local_cpu_data->loops_per_jiffy = loops_per_jiffy;
   7.392 +
   7.393 +#ifdef CONFIG_IA32_SUPPORT
   7.394 +	ia32_gdt_init();
   7.395 +#endif
   7.396 +
   7.397 +	/*
   7.398 +	 * Allow the master to continue.
   7.399 +	 */
   7.400 +	cpu_set(cpuid, cpu_callin_map);
   7.401 +	Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
   7.402 +}
   7.403 +
   7.404 +
   7.405 +/*
   7.406 + * Activate a secondary processor.  head.S calls this.
   7.407 + */
   7.408 +int __devinit
   7.409 +start_secondary (void *unused)
   7.410 +{
   7.411 +	/* Early console may use I/O ports */
   7.412 +	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
   7.413 +	Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
   7.414 +	efi_map_pal_code();
   7.415 +	cpu_init();
   7.416 +	smp_callin();
   7.417 +
   7.418 +#ifdef XEN
   7.419 +	startup_cpu_idle_loop();
   7.420 +#else
   7.421 +	cpu_idle();
   7.422 +#endif
   7.423 +	return 0;
   7.424 +}
   7.425 +
   7.426 +struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
   7.427 +{
   7.428 +	return NULL;
   7.429 +}
   7.430 +
   7.431 +#ifndef XEN
   7.432 +struct create_idle {
   7.433 +	struct task_struct *idle;
   7.434 +	struct completion done;
   7.435 +	int cpu;
   7.436 +};
   7.437 +
   7.438 +void
   7.439 +do_fork_idle(void *_c_idle)
   7.440 +{
   7.441 +	struct create_idle *c_idle = _c_idle;
   7.442 +
   7.443 +	c_idle->idle = fork_idle(c_idle->cpu);
   7.444 +	complete(&c_idle->done);
   7.445 +}
   7.446 +#endif
   7.447 +
   7.448 +static int __devinit
   7.449 +do_boot_cpu (int sapicid, int cpu)
   7.450 +{
   7.451 +	int timeout;
   7.452 +#ifndef XEN
   7.453 +	struct create_idle c_idle = {
   7.454 +		.cpu	= cpu,
   7.455 +		.done	= COMPLETION_INITIALIZER(c_idle.done),
   7.456 +	};
   7.457 +	DECLARE_WORK(work, do_fork_idle, &c_idle);
   7.458 +
   7.459 + 	c_idle.idle = get_idle_for_cpu(cpu);
   7.460 + 	if (c_idle.idle) {
   7.461 +		init_idle(c_idle.idle, cpu);
   7.462 + 		goto do_rest;
   7.463 +	}
   7.464 +
   7.465 +	/*
   7.466 +	 * We can't use kernel_thread since we must avoid to reschedule the child.
   7.467 +	 */
   7.468 +	if (!keventd_up() || current_is_keventd())
   7.469 +		work.func(work.data);
   7.470 +	else {
   7.471 +		schedule_work(&work);
   7.472 +		wait_for_completion(&c_idle.done);
   7.473 +	}
   7.474 +
   7.475 +	if (IS_ERR(c_idle.idle))
   7.476 +		panic("failed fork for CPU %d", cpu);
   7.477 +
   7.478 +	set_idle_for_cpu(cpu, c_idle.idle);
   7.479 +
   7.480 +do_rest:
   7.481 +	task_for_booting_cpu = c_idle.idle;
   7.482 +#endif
   7.483 +
   7.484 +	Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
   7.485 +
   7.486 +	set_brendez_area(cpu);
   7.487 +	platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
   7.488 +
   7.489 +	/*
   7.490 +	 * Wait 10s total for the AP to start
   7.491 +	 */
   7.492 +	Dprintk("Waiting on callin_map ...");
   7.493 +	for (timeout = 0; timeout < 100000; timeout++) {
   7.494 +		if (cpu_isset(cpu, cpu_callin_map))
   7.495 +			break;  /* It has booted */
   7.496 +		udelay(100);
   7.497 +	}
   7.498 +	Dprintk("\n");
   7.499 +
   7.500 +	if (!cpu_isset(cpu, cpu_callin_map)) {
   7.501 +		printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
   7.502 +		ia64_cpu_to_sapicid[cpu] = -1;
   7.503 +		cpu_clear(cpu, cpu_online_map);  /* was set in smp_callin() */
   7.504 +		return -EINVAL;
   7.505 +	}
   7.506 +	return 0;
   7.507 +}
   7.508 +
   7.509 +static int __init
   7.510 +decay (char *str)
   7.511 +{
   7.512 +	int ticks;
   7.513 +	get_option (&str, &ticks);
   7.514 +	return 1;
   7.515 +}
   7.516 +
   7.517 +__setup("decay=", decay);
   7.518 +
   7.519 +/*
   7.520 + * Initialize the logical CPU number to SAPICID mapping
   7.521 + */
   7.522 +void __init
   7.523 +smp_build_cpu_map (void)
   7.524 +{
   7.525 +	int sapicid, cpu, i;
   7.526 +	int boot_cpu_id = hard_smp_processor_id();
   7.527 +
   7.528 +	for (cpu = 0; cpu < NR_CPUS; cpu++) {
   7.529 +		ia64_cpu_to_sapicid[cpu] = -1;
   7.530 +#ifdef CONFIG_HOTPLUG_CPU
   7.531 +		cpu_set(cpu, cpu_possible_map);
   7.532 +#endif
   7.533 +	}
   7.534 +
   7.535 +	ia64_cpu_to_sapicid[0] = boot_cpu_id;
   7.536 +	cpus_clear(cpu_present_map);
   7.537 +	cpu_set(0, cpu_present_map);
   7.538 +	cpu_set(0, cpu_possible_map);
   7.539 +	for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
   7.540 +		sapicid = smp_boot_data.cpu_phys_id[i];
   7.541 +		if (sapicid == boot_cpu_id)
   7.542 +			continue;
   7.543 +		cpu_set(cpu, cpu_present_map);
   7.544 +		cpu_set(cpu, cpu_possible_map);
   7.545 +		ia64_cpu_to_sapicid[cpu] = sapicid;
   7.546 +		cpu++;
   7.547 +	}
   7.548 +}
   7.549 +
   7.550 +/*
   7.551 + * Cycle through the APs sending Wakeup IPIs to boot each.
   7.552 + */
   7.553 +void __init
   7.554 +smp_prepare_cpus (unsigned int max_cpus)
   7.555 +{
   7.556 +	int boot_cpu_id = hard_smp_processor_id();
   7.557 +
   7.558 +	/*
   7.559 +	 * Initialize the per-CPU profiling counter/multiplier
   7.560 +	 */
   7.561 +
   7.562 +	smp_setup_percpu_timer();
   7.563 +
   7.564 +	/*
   7.565 +	 * We have the boot CPU online for sure.
   7.566 +	 */
   7.567 +	cpu_set(0, cpu_online_map);
   7.568 +	cpu_set(0, cpu_callin_map);
   7.569 +
   7.570 +	local_cpu_data->loops_per_jiffy = loops_per_jiffy;
   7.571 +	ia64_cpu_to_sapicid[0] = boot_cpu_id;
   7.572 +
   7.573 +	printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
   7.574 +
   7.575 +	current_thread_info()->cpu = 0;
   7.576 +
   7.577 +	/*
   7.578 +	 * If SMP should be disabled, then really disable it!
   7.579 +	 */
   7.580 +	if (!max_cpus) {
   7.581 +		printk(KERN_INFO "SMP mode deactivated.\n");
   7.582 +		cpus_clear(cpu_online_map);
   7.583 +		cpus_clear(cpu_present_map);
   7.584 +		cpus_clear(cpu_possible_map);
   7.585 +		cpu_set(0, cpu_online_map);
   7.586 +		cpu_set(0, cpu_present_map);
   7.587 +		cpu_set(0, cpu_possible_map);
   7.588 +		return;
   7.589 +	}
   7.590 +}
   7.591 +
   7.592 +void __devinit smp_prepare_boot_cpu(void)
   7.593 +{
   7.594 +	cpu_set(smp_processor_id(), cpu_online_map);
   7.595 +	cpu_set(smp_processor_id(), cpu_callin_map);
   7.596 +	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
   7.597 +}
   7.598 +
   7.599 +/*
   7.600 + * mt_info[] is a temporary store for all info returned by
   7.601 + * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the
   7.602 + * specific cpu comes.
   7.603 + */
   7.604 +static struct {
   7.605 +	__u32   socket_id;
   7.606 +	__u16   core_id;
   7.607 +	__u16   thread_id;
   7.608 +	__u16   proc_fixed_addr;
   7.609 +	__u8    valid;
   7.610 +} mt_info[NR_CPUS] __devinitdata;
   7.611 +
   7.612 +#ifdef CONFIG_HOTPLUG_CPU
   7.613 +static inline void
   7.614 +remove_from_mtinfo(int cpu)
   7.615 +{
   7.616 +	int i;
   7.617 +
   7.618 +	for_each_cpu(i)
   7.619 +		if (mt_info[i].valid &&  mt_info[i].socket_id ==
   7.620 +		    				cpu_data(cpu)->socket_id)
   7.621 +			mt_info[i].valid = 0;
   7.622 +}
   7.623 +
   7.624 +static inline void
   7.625 +clear_cpu_sibling_map(int cpu)
   7.626 +{
   7.627 +	int i;
   7.628 +
   7.629 +	for_each_cpu_mask(i, cpu_sibling_map[cpu])
   7.630 +		cpu_clear(cpu, cpu_sibling_map[i]);
   7.631 +	for_each_cpu_mask(i, cpu_core_map[cpu])
   7.632 +		cpu_clear(cpu, cpu_core_map[i]);
   7.633 +
   7.634 +	cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE;
   7.635 +}
   7.636 +
   7.637 +static void
   7.638 +remove_siblinginfo(int cpu)
   7.639 +{
   7.640 +	int last = 0;
   7.641 +
   7.642 +	if (cpu_data(cpu)->threads_per_core == 1 &&
   7.643 +	    cpu_data(cpu)->cores_per_socket == 1) {
   7.644 +		cpu_clear(cpu, cpu_core_map[cpu]);
   7.645 +		cpu_clear(cpu, cpu_sibling_map[cpu]);
   7.646 +		return;
   7.647 +	}
   7.648 +
   7.649 +	last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
   7.650 +
   7.651 +	/* remove it from all sibling map's */
   7.652 +	clear_cpu_sibling_map(cpu);
   7.653 +
   7.654 +	/* if this cpu is the last in the core group, remove all its info 
   7.655 +	 * from mt_info structure
   7.656 +	 */
   7.657 +	if (last)
   7.658 +		remove_from_mtinfo(cpu);
   7.659 +}
   7.660 +
   7.661 +extern void fixup_irqs(void);
   7.662 +/* must be called with cpucontrol mutex held */
   7.663 +int __cpu_disable(void)
   7.664 +{
   7.665 +	int cpu = smp_processor_id();
   7.666 +
   7.667 +	/*
   7.668 +	 * dont permit boot processor for now
   7.669 +	 */
   7.670 +	if (cpu == 0)
   7.671 +		return -EBUSY;
   7.672 +
   7.673 +	remove_siblinginfo(cpu);
   7.674 +	cpu_clear(cpu, cpu_online_map);
   7.675 +	fixup_irqs();
   7.676 +	local_flush_tlb_all();
   7.677 +	cpu_clear(cpu, cpu_callin_map);
   7.678 +	return 0;
   7.679 +}
   7.680 +
   7.681 +void __cpu_die(unsigned int cpu)
   7.682 +{
   7.683 +	unsigned int i;
   7.684 +
   7.685 +	for (i = 0; i < 100; i++) {
   7.686 +		/* They ack this in play_dead by setting CPU_DEAD */
   7.687 +		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
   7.688 +		{
   7.689 +			printk ("CPU %d is now offline\n", cpu);
   7.690 +			return;
   7.691 +		}
   7.692 +		msleep(100);
   7.693 +	}
   7.694 + 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
   7.695 +}
   7.696 +#else /* !CONFIG_HOTPLUG_CPU */
   7.697 +int __cpu_disable(void)
   7.698 +{
   7.699 +	return -ENOSYS;
   7.700 +}
   7.701 +
   7.702 +void __cpu_die(unsigned int cpu)
   7.703 +{
   7.704 +	/* We said "no" in __cpu_disable */
   7.705 +	BUG();
   7.706 +}
   7.707 +#endif /* CONFIG_HOTPLUG_CPU */
   7.708 +
   7.709 +void
   7.710 +smp_cpus_done (unsigned int dummy)
   7.711 +{
   7.712 +	int cpu;
   7.713 +	unsigned long bogosum = 0;
   7.714 +
   7.715 +	/*
   7.716 +	 * Allow the user to impress friends.
   7.717 +	 */
   7.718 +
   7.719 +	for (cpu = 0; cpu < NR_CPUS; cpu++)
   7.720 +		if (cpu_online(cpu))
   7.721 +			bogosum += cpu_data(cpu)->loops_per_jiffy;
   7.722 +
   7.723 +	printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
   7.724 +	       (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
   7.725 +}
   7.726 +
   7.727 +static inline void __devinit
   7.728 +set_cpu_sibling_map(int cpu)
   7.729 +{
   7.730 +	int i;
   7.731 +
   7.732 +	for_each_online_cpu(i) {
   7.733 +		if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
   7.734 +			cpu_set(i, cpu_core_map[cpu]);
   7.735 +			cpu_set(cpu, cpu_core_map[i]);
   7.736 +			if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
   7.737 +				cpu_set(i, cpu_sibling_map[cpu]);
   7.738 +				cpu_set(cpu, cpu_sibling_map[i]);
   7.739 +			}
   7.740 +		}
   7.741 +	}
   7.742 +}
   7.743 +
   7.744 +int __devinit
   7.745 +__cpu_up (unsigned int cpu)
   7.746 +{
   7.747 +	int ret;
   7.748 +	int sapicid;
   7.749 +
   7.750 +	sapicid = ia64_cpu_to_sapicid[cpu];
   7.751 +	if (sapicid == -1)
   7.752 +		return -EINVAL;
   7.753 +
   7.754 +	/*
   7.755 +	 * Already booted cpu? not valid anymore since we dont
   7.756 +	 * do idle loop tightspin anymore.
   7.757 +	 */
   7.758 +	if (cpu_isset(cpu, cpu_callin_map))
   7.759 +		return -EINVAL;
   7.760 +
   7.761 +	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
   7.762 +	/* Processor goes to start_secondary(), sets online flag */
   7.763 +	ret = do_boot_cpu(sapicid, cpu);
   7.764 +	if (ret < 0)
   7.765 +		return ret;
   7.766 +
   7.767 +	if (cpu_data(cpu)->threads_per_core == 1 &&
   7.768 +	    cpu_data(cpu)->cores_per_socket == 1) {
   7.769 +		cpu_set(cpu, cpu_sibling_map[cpu]);
   7.770 +		cpu_set(cpu, cpu_core_map[cpu]);
   7.771 +		return 0;
   7.772 +	}
   7.773 +
   7.774 +	set_cpu_sibling_map(cpu);
   7.775 +
   7.776 +	return 0;
   7.777 +}
   7.778 +
   7.779 +/*
   7.780 + * Assume that CPU's have been discovered by some platform-dependent interface.  For
   7.781 + * SoftSDV/Lion, that would be ACPI.
   7.782 + *
   7.783 + * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
   7.784 + */
   7.785 +void __init
   7.786 +init_smp_config(void)
   7.787 +{
   7.788 +	struct fptr {
   7.789 +		unsigned long fp;
   7.790 +		unsigned long gp;
   7.791 +	} *ap_startup;
   7.792 +	long sal_ret;
   7.793 +
   7.794 +	/* Tell SAL where to drop the AP's.  */
   7.795 +	ap_startup = (struct fptr *) start_ap;
   7.796 +	sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
   7.797 +				       ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
   7.798 +	if (sal_ret < 0)
   7.799 +		printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
   7.800 +		       ia64_sal_strerror(sal_ret));
   7.801 +}
   7.802 +
   7.803 +static inline int __devinit
   7.804 +check_for_mtinfo_index(void)
   7.805 +{
   7.806 +	int i;
   7.807 +	
   7.808 +	for_each_cpu(i)
   7.809 +		if (!mt_info[i].valid)
   7.810 +			return i;
   7.811 +
   7.812 +	return -1;
   7.813 +}
   7.814 +
   7.815 +/*
   7.816 + * Search the mt_info to find out if this socket's cid/tid information is
   7.817 + * cached or not. If the socket exists, fill in the core_id and thread_id 
   7.818 + * in cpuinfo
   7.819 + */
   7.820 +static int __devinit
   7.821 +check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c)
   7.822 +{
   7.823 +	int i;
   7.824 +	__u32 sid = c->socket_id;
   7.825 +
   7.826 +	for_each_cpu(i) {
   7.827 +		if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address
   7.828 +		    && mt_info[i].socket_id == sid) {
   7.829 +			c->core_id = mt_info[i].core_id;
   7.830 +			c->thread_id = mt_info[i].thread_id;
   7.831 +			return 1; /* not a new socket */
   7.832 +		}
   7.833 +	}
   7.834 +	return 0;
   7.835 +}
   7.836 +
   7.837 +/*
   7.838 + * identify_siblings(cpu) gets called from identify_cpu. This populates the 
   7.839 + * information related to logical execution units in per_cpu_data structure.
   7.840 + */
   7.841 +void __devinit
   7.842 +identify_siblings(struct cpuinfo_ia64 *c)
   7.843 +{
   7.844 +	s64 status;
   7.845 +	u16 pltid;
   7.846 +	u64 proc_fixed_addr;
   7.847 +	int count, i;
   7.848 +	pal_logical_to_physical_t info;
   7.849 +
   7.850 +	if (smp_num_cpucores == 1 && smp_num_siblings == 1)
   7.851 +		return;
   7.852 +
   7.853 +	if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) {
   7.854 +		printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
   7.855 +		       status);
   7.856 +		return;
   7.857 +	}
   7.858 +	if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) {
   7.859 +		printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
   7.860 +		return;
   7.861 +	}
   7.862 +	if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) {
   7.863 +		printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status);
   7.864 +		return;
   7.865 +	}
   7.866 +
   7.867 +	c->socket_id =  (pltid << 8) | info.overview_ppid;
   7.868 +	c->cores_per_socket = info.overview_cpp;
   7.869 +	c->threads_per_core = info.overview_tpc;
   7.870 +	count = c->num_log = info.overview_num_log;
   7.871 +
   7.872 +	/* If the thread and core id information is already cached, then
   7.873 +	 * we will simply update cpu_info and return. Otherwise, we will
   7.874 +	 * do the PAL calls and cache core and thread id's of all the siblings.
   7.875 +	 */
   7.876 +	if (check_for_new_socket(proc_fixed_addr, c))
   7.877 +		return;
   7.878 +
   7.879 +	for (i = 0; i < count; i++) {
   7.880 +		int index;
   7.881 +
   7.882 +		if (i && (status = ia64_pal_logical_to_phys(i, &info))
   7.883 +			  != PAL_STATUS_SUCCESS) {
   7.884 +                	printk(KERN_ERR "ia64_pal_logical_to_phys failed"
   7.885 +					" with %ld\n", status);
   7.886 +                	return;
   7.887 +		}
   7.888 +		if (info.log2_la == proc_fixed_addr) {
   7.889 +			c->core_id = info.log1_cid;
   7.890 +			c->thread_id = info.log1_tid;
   7.891 +		}
   7.892 +
   7.893 +		index = check_for_mtinfo_index();
   7.894 +		/* We will not do the mt_info caching optimization in this case.
   7.895 +		 */
   7.896 +		if (index < 0)
   7.897 +			continue;
   7.898 +
   7.899 +		mt_info[index].valid = 1;
   7.900 +		mt_info[index].socket_id = c->socket_id;
   7.901 +		mt_info[index].core_id = info.log1_cid;
   7.902 +		mt_info[index].thread_id = info.log1_tid;
   7.903 +		mt_info[index].proc_fixed_addr = info.log2_la;
   7.904 +	}
   7.905 +}
   7.906 +#endif /* CONFIG_SMP ifdef XEN */
     8.1 --- a/xen/arch/ia64/linux/sal.c	Fri Aug 26 13:06:49 2005 +0000
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,302 +0,0 @@
     8.4 -/*
     8.5 - * System Abstraction Layer (SAL) interface routines.
     8.6 - *
     8.7 - * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
     8.8 - *	David Mosberger-Tang <davidm@hpl.hp.com>
     8.9 - * Copyright (C) 1999 VA Linux Systems
    8.10 - * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
    8.11 - */
    8.12 -#include <linux/config.h>
    8.13 -
    8.14 -#include <linux/kernel.h>
    8.15 -#include <linux/init.h>
    8.16 -#include <linux/module.h>
    8.17 -#include <linux/spinlock.h>
    8.18 -#include <linux/string.h>
    8.19 -
    8.20 -#include <asm/page.h>
    8.21 -#include <asm/sal.h>
    8.22 -#include <asm/pal.h>
    8.23 -
    8.24 - __cacheline_aligned DEFINE_SPINLOCK(sal_lock);
    8.25 -unsigned long sal_platform_features;
    8.26 -
    8.27 -unsigned short sal_revision;
    8.28 -unsigned short sal_version;
    8.29 -
    8.30 -#define SAL_MAJOR(x) ((x) >> 8)
    8.31 -#define SAL_MINOR(x) ((x) & 0xff)
    8.32 -
    8.33 -static struct {
    8.34 -	void *addr;	/* function entry point */
    8.35 -	void *gpval;	/* gp value to use */
    8.36 -} pdesc;
    8.37 -
    8.38 -static long
    8.39 -default_handler (void)
    8.40 -{
    8.41 -	return -1;
    8.42 -}
    8.43 -
    8.44 -ia64_sal_handler ia64_sal = (ia64_sal_handler) default_handler;
    8.45 -ia64_sal_desc_ptc_t *ia64_ptc_domain_info;
    8.46 -
    8.47 -const char *
    8.48 -ia64_sal_strerror (long status)
    8.49 -{
    8.50 -	const char *str;
    8.51 -	switch (status) {
    8.52 -	      case 0: str = "Call completed without error"; break;
    8.53 -	      case 1: str = "Effect a warm boot of the system to complete "
    8.54 -			      "the update"; break;
    8.55 -	      case -1: str = "Not implemented"; break;
    8.56 -	      case -2: str = "Invalid argument"; break;
    8.57 -	      case -3: str = "Call completed with error"; break;
    8.58 -	      case -4: str = "Virtual address not registered"; break;
    8.59 -	      case -5: str = "No information available"; break;
    8.60 -	      case -6: str = "Insufficient space to add the entry"; break;
    8.61 -	      case -7: str = "Invalid entry_addr value"; break;
    8.62 -	      case -8: str = "Invalid interrupt vector"; break;
    8.63 -	      case -9: str = "Requested memory not available"; break;
    8.64 -	      case -10: str = "Unable to write to the NVM device"; break;
    8.65 -	      case -11: str = "Invalid partition type specified"; break;
    8.66 -	      case -12: str = "Invalid NVM_Object id specified"; break;
    8.67 -	      case -13: str = "NVM_Object already has the maximum number "
    8.68 -				"of partitions"; break;
    8.69 -	      case -14: str = "Insufficient space in partition for the "
    8.70 -				"requested write sub-function"; break;
    8.71 -	      case -15: str = "Insufficient data buffer space for the "
    8.72 -				"requested read record sub-function"; break;
    8.73 -	      case -16: str = "Scratch buffer required for the write/delete "
    8.74 -				"sub-function"; break;
    8.75 -	      case -17: str = "Insufficient space in the NVM_Object for the "
    8.76 -				"requested create sub-function"; break;
    8.77 -	      case -18: str = "Invalid value specified in the partition_rec "
    8.78 -				"argument"; break;
    8.79 -	      case -19: str = "Record oriented I/O not supported for this "
    8.80 -				"partition"; break;
    8.81 -	      case -20: str = "Bad format of record to be written or "
    8.82 -				"required keyword variable not "
    8.83 -				"specified"; break;
    8.84 -	      default: str = "Unknown SAL status code"; break;
    8.85 -	}
    8.86 -	return str;
    8.87 -}
    8.88 -
    8.89 -void __init
    8.90 -ia64_sal_handler_init (void *entry_point, void *gpval)
    8.91 -{
    8.92 -	/* fill in the SAL procedure descriptor and point ia64_sal to it: */
    8.93 -	pdesc.addr = entry_point;
    8.94 -	pdesc.gpval = gpval;
    8.95 -	ia64_sal = (ia64_sal_handler) &pdesc;
    8.96 -}
    8.97 -
    8.98 -static void __init
    8.99 -check_versions (struct ia64_sal_systab *systab)
   8.100 -{
   8.101 -	sal_revision = (systab->sal_rev_major << 8) | systab->sal_rev_minor;
   8.102 -	sal_version = (systab->sal_b_rev_major << 8) | systab->sal_b_rev_minor;
   8.103 -
   8.104 -	/* Check for broken firmware */
   8.105 -	if ((sal_revision == SAL_VERSION_CODE(49, 29))
   8.106 -	    && (sal_version == SAL_VERSION_CODE(49, 29)))
   8.107 -	{
   8.108 -		/*
   8.109 -		 * Old firmware for zx2000 prototypes have this weird version number,
   8.110 -		 * reset it to something sane.
   8.111 -		 */
   8.112 -		sal_revision = SAL_VERSION_CODE(2, 8);
   8.113 -		sal_version = SAL_VERSION_CODE(0, 0);
   8.114 -	}
   8.115 -}
   8.116 -
   8.117 -static void __init
   8.118 -sal_desc_entry_point (void *p)
   8.119 -{
   8.120 -	struct ia64_sal_desc_entry_point *ep = p;
   8.121 -	ia64_pal_handler_init(__va(ep->pal_proc));
   8.122 -	ia64_sal_handler_init(__va(ep->sal_proc), __va(ep->gp));
   8.123 -}
   8.124 -
   8.125 -#ifdef CONFIG_SMP
   8.126 -static void __init
   8.127 -set_smp_redirect (int flag)
   8.128 -{
   8.129 -#ifndef CONFIG_HOTPLUG_CPU
   8.130 -	if (no_int_routing)
   8.131 -		smp_int_redirect &= ~flag;
   8.132 -	else
   8.133 -		smp_int_redirect |= flag;
   8.134 -#else
   8.135 -	/*
   8.136 -	 * For CPU Hotplug we dont want to do any chipset supported
   8.137 -	 * interrupt redirection. The reason is this would require that
   8.138 -	 * All interrupts be stopped and hard bind the irq to a cpu.
   8.139 -	 * Later when the interrupt is fired we need to set the redir hint
   8.140 -	 * on again in the vector. This is combersome for something that the
   8.141 -	 * user mode irq balancer will solve anyways.
   8.142 -	 */
   8.143 -	no_int_routing=1;
   8.144 -	smp_int_redirect &= ~flag;
   8.145 -#endif
   8.146 -}
   8.147 -#else
   8.148 -#define set_smp_redirect(flag)	do { } while (0)
   8.149 -#endif
   8.150 -
   8.151 -static void __init
   8.152 -sal_desc_platform_feature (void *p)
   8.153 -{
   8.154 -	struct ia64_sal_desc_platform_feature *pf = p;
   8.155 -	sal_platform_features = pf->feature_mask;
   8.156 -
   8.157 -	printk(KERN_INFO "SAL Platform features:");
   8.158 -	if (!sal_platform_features) {
   8.159 -		printk(" None\n");
   8.160 -		return;
   8.161 -	}
   8.162 -
   8.163 -	if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_BUS_LOCK)
   8.164 -		printk(" BusLock");
   8.165 -	if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT) {
   8.166 -		printk(" IRQ_Redirection");
   8.167 -		set_smp_redirect(SMP_IRQ_REDIRECTION);
   8.168 -	}
   8.169 -	if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT) {
   8.170 -		printk(" IPI_Redirection");
   8.171 -		set_smp_redirect(SMP_IPI_REDIRECTION);
   8.172 -	}
   8.173 -	if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)
   8.174 -		printk(" ITC_Drift");
   8.175 -	printk("\n");
   8.176 -}
   8.177 -
   8.178 -#ifdef CONFIG_SMP
   8.179 -static void __init
   8.180 -sal_desc_ap_wakeup (void *p)
   8.181 -{
   8.182 -	struct ia64_sal_desc_ap_wakeup *ap = p;
   8.183 -
   8.184 -	switch (ap->mechanism) {
   8.185 -	case IA64_SAL_AP_EXTERNAL_INT:
   8.186 -		ap_wakeup_vector = ap->vector;
   8.187 -		printk(KERN_INFO "SAL: AP wakeup using external interrupt "
   8.188 -				"vector 0x%lx\n", ap_wakeup_vector);
   8.189 -		break;
   8.190 -	default:
   8.191 -		printk(KERN_ERR "SAL: AP wakeup mechanism unsupported!\n");
   8.192 -		break;
   8.193 -	}
   8.194 -}
   8.195 -
   8.196 -static void __init
   8.197 -chk_nointroute_opt(void)
   8.198 -{
   8.199 -	char *cp;
   8.200 -	extern char saved_command_line[];
   8.201 -
   8.202 -	for (cp = saved_command_line; *cp; ) {
   8.203 -		if (memcmp(cp, "nointroute", 10) == 0) {
   8.204 -			no_int_routing = 1;
   8.205 -			printk ("no_int_routing on\n");
   8.206 -			break;
   8.207 -		} else {
   8.208 -			while (*cp != ' ' && *cp)
   8.209 -				++cp;
   8.210 -			while (*cp == ' ')
   8.211 -				++cp;
   8.212 -		}
   8.213 -	}
   8.214 -}
   8.215 -
   8.216 -#else
   8.217 -static void __init sal_desc_ap_wakeup(void *p) { }
   8.218 -#endif
   8.219 -
   8.220 -void __init
   8.221 -ia64_sal_init (struct ia64_sal_systab *systab)
   8.222 -{
   8.223 -	char *p;
   8.224 -	int i;
   8.225 -
   8.226 -	if (!systab) {
   8.227 -		printk(KERN_WARNING "Hmm, no SAL System Table.\n");
   8.228 -		return;
   8.229 -	}
   8.230 -
   8.231 -	if (strncmp(systab->signature, "SST_", 4) != 0)
   8.232 -		printk(KERN_ERR "bad signature in system table!");
   8.233 -
   8.234 -	check_versions(systab);
   8.235 -#ifdef CONFIG_SMP
   8.236 -	chk_nointroute_opt();
   8.237 -#endif
   8.238 -
   8.239 -	/* revisions are coded in BCD, so %x does the job for us */
   8.240 -	printk(KERN_INFO "SAL %x.%x: %.32s %.32s%sversion %x.%x\n",
   8.241 -			SAL_MAJOR(sal_revision), SAL_MINOR(sal_revision),
   8.242 -			systab->oem_id, systab->product_id,
   8.243 -			systab->product_id[0] ? " " : "",
   8.244 -			SAL_MAJOR(sal_version), SAL_MINOR(sal_version));
   8.245 -
   8.246 -	p = (char *) (systab + 1);
   8.247 -	for (i = 0; i < systab->entry_count; i++) {
   8.248 -		/*
   8.249 -		 * The first byte of each entry type contains the type
   8.250 -		 * descriptor.
   8.251 -		 */
   8.252 -		switch (*p) {
   8.253 -		case SAL_DESC_ENTRY_POINT:
   8.254 -			sal_desc_entry_point(p);
   8.255 -			break;
   8.256 -		case SAL_DESC_PLATFORM_FEATURE:
   8.257 -			sal_desc_platform_feature(p);
   8.258 -			break;
   8.259 -		case SAL_DESC_PTC:
   8.260 -			ia64_ptc_domain_info = (ia64_sal_desc_ptc_t *)p;
   8.261 -			break;
   8.262 -		case SAL_DESC_AP_WAKEUP:
   8.263 -			sal_desc_ap_wakeup(p);
   8.264 -			break;
   8.265 -		}
   8.266 -		p += SAL_DESC_SIZE(*p);
   8.267 -	}
   8.268 -}
   8.269 -
   8.270 -int
   8.271 -ia64_sal_oemcall(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
   8.272 -		 u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7)
   8.273 -{
   8.274 -	if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
   8.275 -		return -1;
   8.276 -	SAL_CALL(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
   8.277 -	return 0;
   8.278 -}
   8.279 -EXPORT_SYMBOL(ia64_sal_oemcall);
   8.280 -
   8.281 -int
   8.282 -ia64_sal_oemcall_nolock(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1,
   8.283 -			u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
   8.284 -			u64 arg7)
   8.285 -{
   8.286 -	if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
   8.287 -		return -1;
   8.288 -	SAL_CALL_NOLOCK(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
   8.289 -			arg7);
   8.290 -	return 0;
   8.291 -}
   8.292 -EXPORT_SYMBOL(ia64_sal_oemcall_nolock);
   8.293 -
   8.294 -int
   8.295 -ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc,
   8.296 -			   u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5,
   8.297 -			   u64 arg6, u64 arg7)
   8.298 -{
   8.299 -	if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX)
   8.300 -		return -1;
   8.301 -	SAL_CALL_REENTRANT(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6,
   8.302 -			   arg7);
   8.303 -	return 0;
   8.304 -}
   8.305 -EXPORT_SYMBOL(ia64_sal_oemcall_reentrant);
     9.1 --- a/xen/arch/ia64/process.c	Fri Aug 26 13:06:49 2005 +0000
     9.2 +++ b/xen/arch/ia64/process.c	Tue Aug 30 12:41:54 2005 -0600
     9.3 @@ -224,7 +224,7 @@ panic_domain(regs,"psr.ic off, deliverin
     9.4  	regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
     9.5  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
     9.6  #ifdef CONFIG_SMP
     9.7 -#error "sharedinfo doesn't handle smp yet"
     9.8 +#warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
     9.9  #endif
    9.10  	regs->r31 = &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
    9.11  
    10.1 --- a/xen/arch/ia64/smp.c	Fri Aug 26 13:06:49 2005 +0000
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,43 +0,0 @@
    10.4 -/*
    10.5 - *	Intel SMP support routines.
    10.6 - *
    10.7 - *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
    10.8 - *	(c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
    10.9 - *
   10.10 - *	This code is released under the GNU General Public License version 2 or
   10.11 - *	later.
   10.12 - */
   10.13 -
   10.14 -//#include <xen/irq.h>
   10.15 -#include <xen/sched.h>
   10.16 -#include <xen/delay.h>
   10.17 -#include <xen/spinlock.h>
   10.18 -#include <asm/smp.h>
   10.19 -//#include <asm/mc146818rtc.h>
   10.20 -#include <asm/pgalloc.h>
   10.21 -//#include <asm/smpboot.h>
   10.22 -#include <asm/hardirq.h>
   10.23 -
   10.24 -
   10.25 -//Huh? This seems to be used on ia64 even if !CONFIG_SMP
   10.26 -void flush_tlb_mask(cpumask_t mask)
   10.27 -{
   10.28 -	dummy();
   10.29 -}
   10.30 -//#if CONFIG_SMP || IA64
   10.31 -#if CONFIG_SMP
   10.32 -//Huh? This seems to be used on ia64 even if !CONFIG_SMP
   10.33 -void smp_send_event_check_mask(cpumask_t mask)
   10.34 -{
   10.35 -	dummy();
   10.36 -	//send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
   10.37 -}
   10.38 -
   10.39 -
   10.40 -//Huh? This seems to be used on ia64 even if !CONFIG_SMP
   10.41 -int try_flush_tlb_mask(cpumask_t mask)
   10.42 -{
   10.43 -	dummy();
   10.44 -	return 1;
   10.45 -}
   10.46 -#endif
    11.1 --- a/xen/arch/ia64/smpboot.c	Fri Aug 26 13:06:49 2005 +0000
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,2 +0,0 @@
    11.4 -// expand later
    11.5 -int ht_per_core = 1;
    12.1 --- a/xen/arch/ia64/xensetup.c	Fri Aug 26 13:06:49 2005 +0000
    12.2 +++ b/xen/arch/ia64/xensetup.c	Tue Aug 30 12:41:54 2005 -0600
    12.3 @@ -27,6 +27,8 @@ char saved_command_line[COMMAND_LINE_SIZ
    12.4  
    12.5  struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu };
    12.6  
    12.7 +cpumask_t cpu_present_map;
    12.8 +
    12.9  #ifdef CLONE_DOMAIN0
   12.10  struct domain *clones[CLONE_DOMAIN0];
   12.11  #endif
    13.1 --- a/xen/arch/ia64/xentime.c	Fri Aug 26 13:06:49 2005 +0000
    13.2 +++ b/xen/arch/ia64/xentime.c	Tue Aug 30 12:41:54 2005 -0600
    13.3 @@ -32,6 +32,10 @@
    13.4  #endif
    13.5  #include <xen/softirq.h>
    13.6  
    13.7 +#ifdef XEN
    13.8 +seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
    13.9 +#endif
   13.10 +
   13.11  #define TIME_KEEPER_ID  0
   13.12  extern unsigned long wall_jiffies;
   13.13  
    14.1 --- a/xen/include/asm-ia64/config.h	Fri Aug 26 13:06:49 2005 +0000
    14.2 +++ b/xen/include/asm-ia64/config.h	Tue Aug 30 12:41:54 2005 -0600
    14.3 @@ -21,6 +21,22 @@
    14.4  #define CONFIG_EFI_PCDP
    14.5  #define CONFIG_SERIAL_SGI_L1_CONSOLE
    14.6  
    14.7 +#undef CONFIG_XEN_SMP
    14.8 +
    14.9 +#ifdef CONFIG_XEN_SMP
   14.10 +#define CONFIG_SMP 1
   14.11 +#define NR_CPUS 2
   14.12 +#define CONFIG_NR_CPUS 2
   14.13 +#else
   14.14 +#undef CONFIG_SMP
   14.15 +#define NR_CPUS 1
   14.16 +#define CONFIG_NR_CPUS 1
   14.17 +#endif
   14.18 +//#define NR_CPUS 16
   14.19 +//#define CONFIG_NR_CPUS 16
   14.20 +//leave SMP for a later time
   14.21 +//#undef CONFIG_SMP
   14.22 +
   14.23  #ifndef __ASSEMBLY__
   14.24  
   14.25  // can't find where this typedef was before?!?
   14.26 @@ -75,13 +91,16 @@ extern char _end[]; /* standard ELF symb
   14.27  //#define __cond_lock(x) (x)
   14.28  #define __must_check
   14.29  #define __deprecated
   14.30 +#ifndef RELOC_HIDE
   14.31 +# define RELOC_HIDE(ptr, off)					\
   14.32 +  ({ unsigned long __ptr;					\
   14.33 +     __ptr = (unsigned long) (ptr);				\
   14.34 +    (typeof(ptr)) (__ptr + (off)); })
   14.35 +#endif
   14.36  
   14.37  // xen/include/asm/config.h
   14.38  #define HZ 100
   14.39 -// leave SMP for a later time
   14.40 -#define NR_CPUS 1
   14.41 -//#define NR_CPUS 16
   14.42 -//#define CONFIG_NR_CPUS 16
   14.43 +// FIXME SMP: leave SMP for a later time
   14.44  #define barrier() __asm__ __volatile__("": : :"memory")
   14.45  
   14.46  ///////////////////////////////////////////////////////////////
   14.47 @@ -99,13 +118,18 @@ extern char _end[]; /* standard ELF symb
   14.48  
   14.49  // from include/asm-ia64/smp.h
   14.50  #ifdef CONFIG_SMP
   14.51 -#error "Lots of things to fix to enable CONFIG_SMP!"
   14.52 +#warning "Lots of things to fix to enable CONFIG_SMP!"
   14.53  #endif
   14.54 +// FIXME SMP
   14.55  #define	get_cpu()	0
   14.56  #define put_cpu()	do {} while(0)
   14.57  
   14.58  // needed for common/dom0_ops.c until hyperthreading is supported
   14.59 +#ifdef CONFIG_SMP
   14.60 +extern int smp_num_siblings;
   14.61 +#else
   14.62  #define smp_num_siblings 1
   14.63 +#endif
   14.64  
   14.65  // from linux/include/linux/mm.h
   14.66  struct page;
   14.67 @@ -253,10 +277,6 @@ extern int ht_per_core;
   14.68  
   14.69  #define CONFIG_MCKINLEY
   14.70  
   14.71 -//#define CONFIG_SMP 1
   14.72 -//#define CONFIG_NR_CPUS 2
   14.73 -//leave SMP for a later time
   14.74 -#undef CONFIG_SMP
   14.75  #undef CONFIG_X86_LOCAL_APIC
   14.76  #undef CONFIG_X86_IO_APIC
   14.77  #undef CONFIG_X86_L1_CACHE_SHIFT
    15.1 --- a/xen/include/asm-ia64/linux-xen/asm/pal.h	Fri Aug 26 13:06:49 2005 +0000
    15.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pal.h	Tue Aug 30 12:41:54 2005 -0600
    15.3 @@ -67,6 +67,7 @@
    15.4  #define PAL_REGISTER_INFO	39	/* return AR and CR register information*/
    15.5  #define PAL_SHUTDOWN		40	/* enter processor shutdown state */
    15.6  #define PAL_PREFETCH_VISIBILITY	41	/* Make Processor Prefetches Visible */
    15.7 +#define PAL_LOGICAL_TO_PHYSICAL 42	/* returns information on logical to physical processor mapping */
    15.8  
    15.9  #define PAL_COPY_PAL		256	/* relocate PAL procedures and PAL PMI */
   15.10  #define PAL_HALT_INFO		257	/* return the low power capabilities of processor */
   15.11 @@ -1559,7 +1560,76 @@ ia64_pal_prefetch_visibility (s64 trans_
   15.12  	return iprv.status;
   15.13  }
   15.14  
   15.15 +/* data structure for getting information on logical to physical mappings */
   15.16 +typedef union pal_log_overview_u {
   15.17 +	struct {
   15.18 +		u64	num_log		:16,	/* Total number of logical
   15.19 +						 * processors on this die
   15.20 +						 */
   15.21 +			tpc		:8,	/* Threads per core */
   15.22 +			reserved3	:8,	/* Reserved */
   15.23 +			cpp		:8,	/* Cores per processor */
   15.24 +			reserved2	:8,	/* Reserved */
   15.25 +			ppid		:8,	/* Physical processor ID */
   15.26 +			reserved1	:8;	/* Reserved */
   15.27 +	} overview_bits;
   15.28 +	u64 overview_data;
   15.29 +} pal_log_overview_t;
   15.30 +
   15.31 +typedef union pal_proc_n_log_info1_u{
   15.32 +	struct {
   15.33 +		u64	tid		:16,	/* Thread id */
   15.34 +			reserved2	:16,	/* Reserved */
   15.35 +			cid		:16,	/* Core id */
   15.36 +			reserved1	:16;	/* Reserved */
   15.37 +	} ppli1_bits;
   15.38 +	u64	ppli1_data;
   15.39 +} pal_proc_n_log_info1_t;
   15.40 +
   15.41 +typedef union pal_proc_n_log_info2_u {
   15.42 +	struct {
   15.43 +		u64	la		:16,	/* Logical address */
   15.44 +			reserved	:48;	/* Reserved */
   15.45 +	} ppli2_bits;
   15.46 +	u64	ppli2_data;
   15.47 +} pal_proc_n_log_info2_t;
   15.48 +
   15.49 +typedef struct pal_logical_to_physical_s
   15.50 +{
   15.51 +	pal_log_overview_t overview;
   15.52 +	pal_proc_n_log_info1_t ppli1;
   15.53 +	pal_proc_n_log_info2_t ppli2;
   15.54 +} pal_logical_to_physical_t;
   15.55 +
   15.56 +#define overview_num_log	overview.overview_bits.num_log
   15.57 +#define overview_tpc		overview.overview_bits.tpc
   15.58 +#define overview_cpp		overview.overview_bits.cpp
   15.59 +#define overview_ppid		overview.overview_bits.ppid
   15.60 +#define log1_tid		ppli1.ppli1_bits.tid
   15.61 +#define log1_cid		ppli1.ppli1_bits.cid
   15.62 +#define log2_la			ppli2.ppli2_bits.la
   15.63 +
   15.64 +/* Get information on logical to physical processor mappings. */
   15.65 +static inline s64
   15.66 +ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping)
   15.67 +{
   15.68 +	struct ia64_pal_retval iprv;
   15.69 +
   15.70 +	PAL_CALL(iprv, PAL_LOGICAL_TO_PHYSICAL, proc_number, 0, 0);
   15.71 +
   15.72 +	if (iprv.status == PAL_STATUS_SUCCESS)
   15.73 +	{
   15.74 +		if (proc_number == 0)
   15.75 +			mapping->overview.overview_data = iprv.v0;
   15.76 +		mapping->ppli1.ppli1_data = iprv.v1;
   15.77 +		mapping->ppli2.ppli2_data = iprv.v2;
   15.78 +	}
   15.79 +
   15.80 +	return iprv.status;
   15.81 +}
   15.82 +#ifdef XEN
   15.83  #include <asm/vmx_pal.h>
   15.84 +#endif
   15.85  #endif /* __ASSEMBLY__ */
   15.86  
   15.87  #endif /* _ASM_IA64_PAL_H */
    16.1 --- a/xen/include/asm-ia64/linux-xen/asm/processor.h	Fri Aug 26 13:06:49 2005 +0000
    16.2 +++ b/xen/include/asm-ia64/linux-xen/asm/processor.h	Tue Aug 30 12:41:54 2005 -0600
    16.3 @@ -164,6 +164,13 @@ struct cpuinfo_ia64 {
    16.4  #ifdef CONFIG_SMP
    16.5  	__u64 loops_per_jiffy;
    16.6  	int cpu;
    16.7 +	__u32 socket_id;	/* physical processor socket id */
    16.8 +	__u16 core_id;		/* core id */
    16.9 +	__u16 thread_id;	/* thread id */
   16.10 +	__u16 num_log;		/* Total number of logical processors on
   16.11 +				 * this socket that were successfully booted */
   16.12 +	__u8  cores_per_socket;	/* Cores per processor socket */
   16.13 +	__u8  threads_per_core;	/* Threads per core */
   16.14  #endif
   16.15  
   16.16  	/* CPUID-derived information: */
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h	Tue Aug 30 12:41:54 2005 -0600
    17.3 @@ -0,0 +1,241 @@
    17.4 +#ifndef _ASM_IA64_SPINLOCK_H
    17.5 +#define _ASM_IA64_SPINLOCK_H
    17.6 +
    17.7 +/*
    17.8 + * Copyright (C) 1998-2003 Hewlett-Packard Co
    17.9 + *	David Mosberger-Tang <davidm@hpl.hp.com>
   17.10 + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
   17.11 + *
   17.12 + * This file is used for SMP configurations only.
   17.13 + */
   17.14 +
   17.15 +#include <linux/compiler.h>
   17.16 +#include <linux/kernel.h>
   17.17 +
   17.18 +#include <asm/atomic.h>
   17.19 +#include <asm/bitops.h>
   17.20 +#include <asm/intrinsics.h>
   17.21 +#include <asm/system.h>
   17.22 +
   17.23 +typedef struct {
   17.24 +	volatile unsigned int lock;
   17.25 +#ifdef CONFIG_PREEMPT
   17.26 +	unsigned int break_lock;
   17.27 +#endif
   17.28 +#ifdef XEN
   17.29 +	unsigned char recurse_cpu;
   17.30 +	unsigned char recurse_cnt;
   17.31 +#endif
   17.32 +} spinlock_t;
   17.33 +
   17.34 +#define SPIN_LOCK_UNLOCKED			(spinlock_t) { 0 }
   17.35 +#define spin_lock_init(x)			((x)->lock = 0)
   17.36 +
   17.37 +#ifdef ASM_SUPPORTED
   17.38 +/*
   17.39 + * Try to get the lock.  If we fail to get the lock, make a non-standard call to
   17.40 + * ia64_spinlock_contention().  We do not use a normal call because that would force all
   17.41 + * callers of spin_lock() to be non-leaf routines.  Instead, ia64_spinlock_contention() is
   17.42 + * carefully coded to touch only those registers that spin_lock() marks "clobbered".
   17.43 + */
   17.44 +
   17.45 +#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
   17.46 +
   17.47 +static inline void
   17.48 +_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
   17.49 +{
   17.50 +	register volatile unsigned int *ptr asm ("r31") = &lock->lock;
   17.51 +
   17.52 +#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
   17.53 +# ifdef CONFIG_ITANIUM
   17.54 +	/* don't use brl on Itanium... */
   17.55 +	asm volatile ("{\n\t"
   17.56 +		      "  mov ar.ccv = r0\n\t"
   17.57 +		      "  mov r28 = ip\n\t"
   17.58 +		      "  mov r30 = 1;;\n\t"
   17.59 +		      "}\n\t"
   17.60 +		      "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
   17.61 +		      "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
   17.62 +		      "cmp4.ne p14, p0 = r30, r0\n\t"
   17.63 +		      "mov b6 = r29;;\n\t"
   17.64 +		      "mov r27=%2\n\t"
   17.65 +		      "(p14) br.cond.spnt.many b6"
   17.66 +		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
   17.67 +# else
   17.68 +	asm volatile ("{\n\t"
   17.69 +		      "  mov ar.ccv = r0\n\t"
   17.70 +		      "  mov r28 = ip\n\t"
   17.71 +		      "  mov r30 = 1;;\n\t"
   17.72 +		      "}\n\t"
   17.73 +		      "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
   17.74 +		      "cmp4.ne p14, p0 = r30, r0\n\t"
   17.75 +		      "mov r27=%2\n\t"
   17.76 +		      "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
   17.77 +		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
   17.78 +# endif /* CONFIG_MCKINLEY */
   17.79 +#else
   17.80 +# ifdef CONFIG_ITANIUM
   17.81 +	/* don't use brl on Itanium... */
   17.82 +	/* mis-declare, so we get the entry-point, not it's function descriptor: */
   17.83 +	asm volatile ("mov r30 = 1\n\t"
   17.84 +		      "mov r27=%2\n\t"
   17.85 +		      "mov ar.ccv = r0;;\n\t"
   17.86 +		      "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
   17.87 +		      "movl r29 = ia64_spinlock_contention;;\n\t"
   17.88 +		      "cmp4.ne p14, p0 = r30, r0\n\t"
   17.89 +		      "mov b6 = r29;;\n\t"
   17.90 +		      "(p14) br.call.spnt.many b6 = b6"
   17.91 +		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
   17.92 +# else
   17.93 +	asm volatile ("mov r30 = 1\n\t"
   17.94 +		      "mov r27=%2\n\t"
   17.95 +		      "mov ar.ccv = r0;;\n\t"
   17.96 +		      "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
   17.97 +		      "cmp4.ne p14, p0 = r30, r0\n\t"
   17.98 +		      "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
   17.99 +		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
  17.100 +# endif /* CONFIG_MCKINLEY */
  17.101 +#endif
  17.102 +}
  17.103 +#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
  17.104 +#else /* !ASM_SUPPORTED */
  17.105 +#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  17.106 +# define _raw_spin_lock(x)								\
  17.107 +do {											\
  17.108 +	__u32 *ia64_spinlock_ptr = (__u32 *) (x);					\
  17.109 +	__u64 ia64_spinlock_val;							\
  17.110 +	ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);			\
  17.111 +	if (unlikely(ia64_spinlock_val)) {						\
  17.112 +		do {									\
  17.113 +			while (*ia64_spinlock_ptr)					\
  17.114 +				ia64_barrier();						\
  17.115 +			ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);	\
  17.116 +		} while (ia64_spinlock_val);						\
  17.117 +	}										\
  17.118 +} while (0)
  17.119 +#endif /* !ASM_SUPPORTED */
  17.120 +
  17.121 +#define spin_is_locked(x)	((x)->lock != 0)
  17.122 +#define _raw_spin_unlock(x)	do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
  17.123 +#define _raw_spin_trylock(x)	(cmpxchg_acq(&(x)->lock, 0, 1) == 0)
  17.124 +#define spin_unlock_wait(x)	do { barrier(); } while ((x)->lock)
  17.125 +
  17.126 +#ifdef XEN
  17.127 +/*
  17.128 + * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
  17.129 + * reentered recursively on the same CPU. All critical regions that may form
  17.130 + * part of a recursively-nested set must be protected by these forms. If there
  17.131 + * are any critical regions that cannot form part of such a set, they can use
  17.132 + * standard spin_[un]lock().
  17.133 + */
  17.134 +#define _raw_spin_lock_recursive(_lock)            \
  17.135 +    do {                                           \
  17.136 +        int cpu = smp_processor_id();              \
  17.137 +        if ( likely((_lock)->recurse_cpu != cpu) ) \
  17.138 +        {                                          \
  17.139 +            spin_lock(_lock);                      \
  17.140 +            (_lock)->recurse_cpu = cpu;            \
  17.141 +        }                                          \
  17.142 +        (_lock)->recurse_cnt++;                    \
  17.143 +    } while ( 0 )
  17.144 +
  17.145 +#define _raw_spin_unlock_recursive(_lock)          \
  17.146 +    do {                                           \
  17.147 +        if ( likely(--(_lock)->recurse_cnt == 0) ) \
  17.148 +        {                                          \
  17.149 +            (_lock)->recurse_cpu = -1;             \
  17.150 +            spin_unlock(_lock);                    \
  17.151 +        }                                          \
  17.152 +    } while ( 0 )
  17.153 +#endif
  17.154 +
  17.155 +typedef struct {
  17.156 +	volatile unsigned int read_counter	: 31;
  17.157 +	volatile unsigned int write_lock	:  1;
  17.158 +#ifdef CONFIG_PREEMPT
  17.159 +	unsigned int break_lock;
  17.160 +#endif
  17.161 +} rwlock_t;
  17.162 +#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
  17.163 +
  17.164 +#define rwlock_init(x)		do { *(x) = RW_LOCK_UNLOCKED; } while(0)
  17.165 +#define read_can_lock(rw)	(*(volatile int *)(rw) >= 0)
  17.166 +#define write_can_lock(rw)	(*(volatile int *)(rw) == 0)
  17.167 +
  17.168 +#define _raw_read_lock(rw)								\
  17.169 +do {											\
  17.170 +	rwlock_t *__read_lock_ptr = (rw);						\
  17.171 +											\
  17.172 +	while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {		\
  17.173 +		ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);			\
  17.174 +		while (*(volatile int *)__read_lock_ptr < 0)				\
  17.175 +			cpu_relax();							\
  17.176 +	}										\
  17.177 +} while (0)
  17.178 +
  17.179 +#define _raw_read_unlock(rw)					\
  17.180 +do {								\
  17.181 +	rwlock_t *__read_lock_ptr = (rw);			\
  17.182 +	ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);	\
  17.183 +} while (0)
  17.184 +
  17.185 +#ifdef ASM_SUPPORTED
  17.186 +#define _raw_write_lock(rw)							\
  17.187 +do {										\
  17.188 + 	__asm__ __volatile__ (							\
  17.189 +		"mov ar.ccv = r0\n"						\
  17.190 +		"dep r29 = -1, r0, 31, 1;;\n"					\
  17.191 +		"1:\n"								\
  17.192 +		"ld4 r2 = [%0];;\n"						\
  17.193 +		"cmp4.eq p0,p7 = r0,r2\n"					\
  17.194 +		"(p7) br.cond.spnt.few 1b \n"					\
  17.195 +		"cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"			\
  17.196 +		"cmp4.eq p0,p7 = r0, r2\n"					\
  17.197 +		"(p7) br.cond.spnt.few 1b;;\n"					\
  17.198 +		:: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");		\
  17.199 +} while(0)
  17.200 +
  17.201 +#define _raw_write_trylock(rw)							\
  17.202 +({										\
  17.203 +	register long result;							\
  17.204 +										\
  17.205 +	__asm__ __volatile__ (							\
  17.206 +		"mov ar.ccv = r0\n"						\
  17.207 +		"dep r29 = -1, r0, 31, 1;;\n"					\
  17.208 +		"cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"				\
  17.209 +		: "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");		\
  17.210 +	(result == 0);								\
  17.211 +})
  17.212 +
  17.213 +#else /* !ASM_SUPPORTED */
  17.214 +
  17.215 +#define _raw_write_lock(l)								\
  17.216 +({											\
  17.217 +	__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);			\
  17.218 +	__u32 *ia64_write_lock_ptr = (__u32 *) (l);					\
  17.219 +	do {										\
  17.220 +		while (*ia64_write_lock_ptr)						\
  17.221 +			ia64_barrier();							\
  17.222 +		ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0);	\
  17.223 +	} while (ia64_val);								\
  17.224 +})
  17.225 +
  17.226 +#define _raw_write_trylock(rw)						\
  17.227 +({									\
  17.228 +	__u64 ia64_val;							\
  17.229 +	__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);			\
  17.230 +	ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);	\
  17.231 +	(ia64_val == 0);						\
  17.232 +})
  17.233 +
  17.234 +#endif /* !ASM_SUPPORTED */
  17.235 +
  17.236 +#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  17.237 +
  17.238 +#define _raw_write_unlock(x)								\
  17.239 +({											\
  17.240 +	smp_mb__before_clear_bit();	/* need barrier before releasing lock... */	\
  17.241 +	clear_bit(31, (x));								\
  17.242 +})
  17.243 +
  17.244 +#endif /*  _ASM_IA64_SPINLOCK_H */
    18.1 --- a/xen/include/asm-ia64/linux-xen/asm/system.h	Fri Aug 26 13:06:49 2005 +0000
    18.2 +++ b/xen/include/asm-ia64/linux-xen/asm/system.h	Tue Aug 30 12:41:54 2005 -0600
    18.3 @@ -247,9 +247,9 @@ extern void ia64_load_extra (struct task
    18.4   */
    18.5  # define switch_to(prev,next,last) do {						\
    18.6  	if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) {				\
    18.7 -		ia64_psr(ia64_task_regs(prev))->mfh = 0;			\
    18.8 -		(prev)->thread.flags |= IA64_THREAD_FPH_VALID;			\
    18.9 -		__ia64_save_fpu((prev)->thread.fph);				\
   18.10 +		/* ia64_psr(ia64_task_regs(prev))->mfh = 0; */			\
   18.11 +		/* (prev)->thread.flags |= IA64_THREAD_FPH_VALID; */			\
   18.12 +		/* __ia64_save_fpu((prev)->thread.fph); */				\
   18.13  	}									\
   18.14  	__switch_to(prev, next, last);						\
   18.15  } while (0)
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/xen/include/asm-ia64/linux-xen/asm/tlbflush.h	Tue Aug 30 12:41:54 2005 -0600
    19.3 @@ -0,0 +1,105 @@
    19.4 +#ifndef _ASM_IA64_TLBFLUSH_H
    19.5 +#define _ASM_IA64_TLBFLUSH_H
    19.6 +
    19.7 +/*
    19.8 + * Copyright (C) 2002 Hewlett-Packard Co
    19.9 + *	David Mosberger-Tang <davidm@hpl.hp.com>
   19.10 + */
   19.11 +
   19.12 +#include <linux/config.h>
   19.13 +
   19.14 +#include <linux/mm.h>
   19.15 +
   19.16 +#include <asm/intrinsics.h>
   19.17 +#include <asm/mmu_context.h>
   19.18 +#include <asm/page.h>
   19.19 +
   19.20 +/*
   19.21 + * Now for some TLB flushing routines.  This is the kind of stuff that
   19.22 + * can be very expensive, so try to avoid them whenever possible.
   19.23 + */
   19.24 +
   19.25 +/*
   19.26 + * Flush everything (kernel mapping may also have changed due to
   19.27 + * vmalloc/vfree).
   19.28 + */
   19.29 +extern void local_flush_tlb_all (void);
   19.30 +
   19.31 +#ifdef CONFIG_SMP
   19.32 +  extern void smp_flush_tlb_all (void);
   19.33 +  extern void smp_flush_tlb_mm (struct mm_struct *mm);
   19.34 +# define flush_tlb_all()	smp_flush_tlb_all()
   19.35 +#else
   19.36 +# define flush_tlb_all()	local_flush_tlb_all()
   19.37 +#endif
   19.38 +
   19.39 +static inline void
   19.40 +local_finish_flush_tlb_mm (struct mm_struct *mm)
   19.41 +{
   19.42 +#ifndef XEN
   19.43 +	if (mm == current->active_mm)
   19.44 +		activate_context(mm);
   19.45 +#endif
   19.46 +}
   19.47 +
   19.48 +/*
   19.49 + * Flush a specified user mapping.  This is called, e.g., as a result of fork() and
   19.50 + * exit().  fork() ends up here because the copy-on-write mechanism needs to write-protect
   19.51 + * the PTEs of the parent task.
   19.52 + */
   19.53 +static inline void
   19.54 +flush_tlb_mm (struct mm_struct *mm)
   19.55 +{
   19.56 +	if (!mm)
   19.57 +		return;
   19.58 +
   19.59 +#ifndef XEN
   19.60 +	mm->context = 0;
   19.61 +#endif
   19.62 +
   19.63 +	if (atomic_read(&mm->mm_users) == 0)
   19.64 +		return;		/* happens as a result of exit_mmap() */
   19.65 +
   19.66 +#ifdef CONFIG_SMP
   19.67 +	smp_flush_tlb_mm(mm);
   19.68 +#else
   19.69 +	local_finish_flush_tlb_mm(mm);
   19.70 +#endif
   19.71 +}
   19.72 +
   19.73 +extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
   19.74 +
   19.75 +/*
   19.76 + * Page-granular tlb flush.
   19.77 + */
   19.78 +static inline void
   19.79 +flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
   19.80 +{
   19.81 +#ifdef CONFIG_SMP
   19.82 +	flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
   19.83 +#else
   19.84 +	if (vma->vm_mm == current->active_mm)
   19.85 +		ia64_ptcl(addr, (PAGE_SHIFT << 2));
   19.86 +#ifndef XEN
   19.87 +	else
   19.88 +		vma->vm_mm->context = 0;
   19.89 +#endif
   19.90 +#endif
   19.91 +}
   19.92 +
   19.93 +/*
   19.94 + * Flush the TLB entries mapping the virtually mapped linear page
   19.95 + * table corresponding to address range [START-END).
   19.96 + */
   19.97 +static inline void
   19.98 +flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
   19.99 +{
  19.100 +	/*
  19.101 +	 * Deprecated.  The virtual page table is now flushed via the normal gather/flush
  19.102 +	 * interface (see tlb.h).
  19.103 +	 */
  19.104 +}
  19.105 +
  19.106 +#define flush_tlb_kernel_range(start, end)	flush_tlb_all()	/* XXX fix me */
  19.107 +
  19.108 +#endif /* _ASM_IA64_TLBFLUSH_H */
    20.1 --- a/xen/include/asm-ia64/linux/asm/sal.h	Fri Aug 26 13:06:49 2005 +0000
    20.2 +++ b/xen/include/asm-ia64/linux/asm/sal.h	Tue Aug 30 12:41:54 2005 -0600
    20.3 @@ -91,6 +91,7 @@ extern spinlock_t sal_lock;
    20.4  #define SAL_PCI_CONFIG_READ		0x01000010
    20.5  #define SAL_PCI_CONFIG_WRITE		0x01000011
    20.6  #define SAL_FREQ_BASE			0x01000012
    20.7 +#define SAL_PHYSICAL_ID_INFO		0x01000013
    20.8  
    20.9  #define SAL_UPDATE_PAL			0x01000020
   20.10  
   20.11 @@ -815,6 +816,17 @@ ia64_sal_update_pal (u64 param_buf, u64 
   20.12  	return isrv.status;
   20.13  }
   20.14  
   20.15 +/* Get physical processor die mapping in the platform. */
   20.16 +static inline s64
   20.17 +ia64_sal_physical_id_info(u16 *splid)
   20.18 +{
   20.19 +	struct ia64_sal_retval isrv;
   20.20 +	SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
   20.21 +	if (splid)
   20.22 +		*splid = isrv.v0;
   20.23 +	return isrv.status;
   20.24 +}
   20.25 +
   20.26  extern unsigned long sal_platform_features;
   20.27  
   20.28  extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
   20.29 @@ -832,6 +844,44 @@ extern int ia64_sal_oemcall_nolock(struc
   20.30  				   u64, u64, u64, u64, u64);
   20.31  extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
   20.32  				      u64, u64, u64, u64, u64);
   20.33 +#ifdef CONFIG_HOTPLUG_CPU
   20.34 +/*
   20.35 + * System Abstraction Layer Specification
   20.36 + * Section 3.2.5.1: OS_BOOT_RENDEZ to SAL return State.
   20.37 + * Note: region regs are stored first in head.S _start. Hence they must
   20.38 + * stay up front.
   20.39 + */
   20.40 +struct sal_to_os_boot {
   20.41 +	u64 rr[8];		/* Region Registers */
   20.42 +	u64	br[6];		/* br0: return addr into SAL boot rendez routine */
   20.43 +	u64 gr1;		/* SAL:GP */
   20.44 +	u64 gr12;		/* SAL:SP */
   20.45 +	u64 gr13;		/* SAL: Task Pointer */
   20.46 +	u64 fpsr;
   20.47 +	u64	pfs;
   20.48 +	u64 rnat;
   20.49 +	u64 unat;
   20.50 +	u64 bspstore;
   20.51 +	u64 dcr;		/* Default Control Register */
   20.52 +	u64 iva;
   20.53 +	u64 pta;
   20.54 +	u64 itv;
   20.55 +	u64 pmv;
   20.56 +	u64 cmcv;
   20.57 +	u64 lrr[2];
   20.58 +	u64 gr[4];
   20.59 +	u64 pr;			/* Predicate registers */
   20.60 +	u64 lc;			/* Loop Count */
   20.61 +	struct ia64_fpreg fp[20];
   20.62 +};
   20.63 +
   20.64 +/*
   20.65 + * Global array allocated for NR_CPUS at boot time
   20.66 + */
   20.67 +extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
   20.68 +
   20.69 +extern void ia64_jump_to_sal(struct sal_to_os_boot *);
   20.70 +#endif
   20.71  
   20.72  extern void ia64_sal_handler_init(void *entry_point, void *gpval);
   20.73  
    21.1 --- a/xen/include/asm-ia64/linux/asm/spinlock.h	Fri Aug 26 13:06:49 2005 +0000
    21.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.3 @@ -1,208 +0,0 @@
    21.4 -#ifndef _ASM_IA64_SPINLOCK_H
    21.5 -#define _ASM_IA64_SPINLOCK_H
    21.6 -
    21.7 -/*
    21.8 - * Copyright (C) 1998-2003 Hewlett-Packard Co
    21.9 - *	David Mosberger-Tang <davidm@hpl.hp.com>
   21.10 - * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
   21.11 - *
   21.12 - * This file is used for SMP configurations only.
   21.13 - */
   21.14 -
   21.15 -#include <linux/compiler.h>
   21.16 -#include <linux/kernel.h>
   21.17 -
   21.18 -#include <asm/atomic.h>
   21.19 -#include <asm/bitops.h>
   21.20 -#include <asm/intrinsics.h>
   21.21 -#include <asm/system.h>
   21.22 -
   21.23 -typedef struct {
   21.24 -	volatile unsigned int lock;
   21.25 -#ifdef CONFIG_PREEMPT
   21.26 -	unsigned int break_lock;
   21.27 -#endif
   21.28 -} spinlock_t;
   21.29 -
   21.30 -#define SPIN_LOCK_UNLOCKED			(spinlock_t) { 0 }
   21.31 -#define spin_lock_init(x)			((x)->lock = 0)
   21.32 -
   21.33 -#ifdef ASM_SUPPORTED
   21.34 -/*
   21.35 - * Try to get the lock.  If we fail to get the lock, make a non-standard call to
   21.36 - * ia64_spinlock_contention().  We do not use a normal call because that would force all
   21.37 - * callers of spin_lock() to be non-leaf routines.  Instead, ia64_spinlock_contention() is
   21.38 - * carefully coded to touch only those registers that spin_lock() marks "clobbered".
   21.39 - */
   21.40 -
   21.41 -#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
   21.42 -
   21.43 -static inline void
   21.44 -_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
   21.45 -{
   21.46 -	register volatile unsigned int *ptr asm ("r31") = &lock->lock;
   21.47 -
   21.48 -#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
   21.49 -# ifdef CONFIG_ITANIUM
   21.50 -	/* don't use brl on Itanium... */
   21.51 -	asm volatile ("{\n\t"
   21.52 -		      "  mov ar.ccv = r0\n\t"
   21.53 -		      "  mov r28 = ip\n\t"
   21.54 -		      "  mov r30 = 1;;\n\t"
   21.55 -		      "}\n\t"
   21.56 -		      "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
   21.57 -		      "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
   21.58 -		      "cmp4.ne p14, p0 = r30, r0\n\t"
   21.59 -		      "mov b6 = r29;;\n\t"
   21.60 -		      "mov r27=%2\n\t"
   21.61 -		      "(p14) br.cond.spnt.many b6"
   21.62 -		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
   21.63 -# else
   21.64 -	asm volatile ("{\n\t"
   21.65 -		      "  mov ar.ccv = r0\n\t"
   21.66 -		      "  mov r28 = ip\n\t"
   21.67 -		      "  mov r30 = 1;;\n\t"
   21.68 -		      "}\n\t"
   21.69 -		      "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
   21.70 -		      "cmp4.ne p14, p0 = r30, r0\n\t"
   21.71 -		      "mov r27=%2\n\t"
   21.72 -		      "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
   21.73 -		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
   21.74 -# endif /* CONFIG_MCKINLEY */
   21.75 -#else
   21.76 -# ifdef CONFIG_ITANIUM
   21.77 -	/* don't use brl on Itanium... */
   21.78 -	/* mis-declare, so we get the entry-point, not it's function descriptor: */
   21.79 -	asm volatile ("mov r30 = 1\n\t"
   21.80 -		      "mov r27=%2\n\t"
   21.81 -		      "mov ar.ccv = r0;;\n\t"
   21.82 -		      "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
   21.83 -		      "movl r29 = ia64_spinlock_contention;;\n\t"
   21.84 -		      "cmp4.ne p14, p0 = r30, r0\n\t"
   21.85 -		      "mov b6 = r29;;\n\t"
   21.86 -		      "(p14) br.call.spnt.many b6 = b6"
   21.87 -		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
   21.88 -# else
   21.89 -	asm volatile ("mov r30 = 1\n\t"
   21.90 -		      "mov r27=%2\n\t"
   21.91 -		      "mov ar.ccv = r0;;\n\t"
   21.92 -		      "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
   21.93 -		      "cmp4.ne p14, p0 = r30, r0\n\t"
   21.94 -		      "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
   21.95 -		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
   21.96 -# endif /* CONFIG_MCKINLEY */
   21.97 -#endif
   21.98 -}
   21.99 -#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
  21.100 -#else /* !ASM_SUPPORTED */
  21.101 -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  21.102 -# define _raw_spin_lock(x)								\
  21.103 -do {											\
  21.104 -	__u32 *ia64_spinlock_ptr = (__u32 *) (x);					\
  21.105 -	__u64 ia64_spinlock_val;							\
  21.106 -	ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);			\
  21.107 -	if (unlikely(ia64_spinlock_val)) {						\
  21.108 -		do {									\
  21.109 -			while (*ia64_spinlock_ptr)					\
  21.110 -				ia64_barrier();						\
  21.111 -			ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);	\
  21.112 -		} while (ia64_spinlock_val);						\
  21.113 -	}										\
  21.114 -} while (0)
  21.115 -#endif /* !ASM_SUPPORTED */
  21.116 -
  21.117 -#define spin_is_locked(x)	((x)->lock != 0)
  21.118 -#define _raw_spin_unlock(x)	do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
  21.119 -#define _raw_spin_trylock(x)	(cmpxchg_acq(&(x)->lock, 0, 1) == 0)
  21.120 -#define spin_unlock_wait(x)	do { barrier(); } while ((x)->lock)
  21.121 -
  21.122 -typedef struct {
  21.123 -	volatile unsigned int read_counter	: 31;
  21.124 -	volatile unsigned int write_lock	:  1;
  21.125 -#ifdef CONFIG_PREEMPT
  21.126 -	unsigned int break_lock;
  21.127 -#endif
  21.128 -} rwlock_t;
  21.129 -#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
  21.130 -
  21.131 -#define rwlock_init(x)		do { *(x) = RW_LOCK_UNLOCKED; } while(0)
  21.132 -#define read_can_lock(rw)	(*(volatile int *)(rw) >= 0)
  21.133 -#define write_can_lock(rw)	(*(volatile int *)(rw) == 0)
  21.134 -
  21.135 -#define _raw_read_lock(rw)								\
  21.136 -do {											\
  21.137 -	rwlock_t *__read_lock_ptr = (rw);						\
  21.138 -											\
  21.139 -	while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {		\
  21.140 -		ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);			\
  21.141 -		while (*(volatile int *)__read_lock_ptr < 0)				\
  21.142 -			cpu_relax();							\
  21.143 -	}										\
  21.144 -} while (0)
  21.145 -
  21.146 -#define _raw_read_unlock(rw)					\
  21.147 -do {								\
  21.148 -	rwlock_t *__read_lock_ptr = (rw);			\
  21.149 -	ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);	\
  21.150 -} while (0)
  21.151 -
  21.152 -#ifdef ASM_SUPPORTED
  21.153 -#define _raw_write_lock(rw)							\
  21.154 -do {										\
  21.155 - 	__asm__ __volatile__ (							\
  21.156 -		"mov ar.ccv = r0\n"						\
  21.157 -		"dep r29 = -1, r0, 31, 1;;\n"					\
  21.158 -		"1:\n"								\
  21.159 -		"ld4 r2 = [%0];;\n"						\
  21.160 -		"cmp4.eq p0,p7 = r0,r2\n"					\
  21.161 -		"(p7) br.cond.spnt.few 1b \n"					\
  21.162 -		"cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"			\
  21.163 -		"cmp4.eq p0,p7 = r0, r2\n"					\
  21.164 -		"(p7) br.cond.spnt.few 1b;;\n"					\
  21.165 -		:: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");		\
  21.166 -} while(0)
  21.167 -
  21.168 -#define _raw_write_trylock(rw)							\
  21.169 -({										\
  21.170 -	register long result;							\
  21.171 -										\
  21.172 -	__asm__ __volatile__ (							\
  21.173 -		"mov ar.ccv = r0\n"						\
  21.174 -		"dep r29 = -1, r0, 31, 1;;\n"					\
  21.175 -		"cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"				\
  21.176 -		: "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");		\
  21.177 -	(result == 0);								\
  21.178 -})
  21.179 -
  21.180 -#else /* !ASM_SUPPORTED */
  21.181 -
  21.182 -#define _raw_write_lock(l)								\
  21.183 -({											\
  21.184 -	__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);			\
  21.185 -	__u32 *ia64_write_lock_ptr = (__u32 *) (l);					\
  21.186 -	do {										\
  21.187 -		while (*ia64_write_lock_ptr)						\
  21.188 -			ia64_barrier();							\
  21.189 -		ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0);	\
  21.190 -	} while (ia64_val);								\
  21.191 -})
  21.192 -
  21.193 -#define _raw_write_trylock(rw)						\
  21.194 -({									\
  21.195 -	__u64 ia64_val;							\
  21.196 -	__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);			\
  21.197 -	ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);	\
  21.198 -	(ia64_val == 0);						\
  21.199 -})
  21.200 -
  21.201 -#endif /* !ASM_SUPPORTED */
  21.202 -
  21.203 -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  21.204 -
  21.205 -#define _raw_write_unlock(x)								\
  21.206 -({											\
  21.207 -	smp_mb__before_clear_bit();	/* need barrier before releasing lock... */	\
  21.208 -	clear_bit(31, (x));								\
  21.209 -})
  21.210 -
  21.211 -#endif /*  _ASM_IA64_SPINLOCK_H */
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/xen/include/asm-ia64/linux/notifier.h	Tue Aug 30 12:41:54 2005 -0600
    23.3 @@ -0,0 +1,76 @@
    23.4 +/*
    23.5 + *	Routines to manage notifier chains for passing status changes to any
    23.6 + *	interested routines. We need this instead of hard coded call lists so
    23.7 + *	that modules can poke their nose into the innards. The network devices
    23.8 + *	needed them so here they are for the rest of you.
    23.9 + *
   23.10 + *				Alan Cox <Alan.Cox@linux.org>
   23.11 + */
   23.12 + 
   23.13 +#ifndef _LINUX_NOTIFIER_H
   23.14 +#define _LINUX_NOTIFIER_H
   23.15 +#include <linux/errno.h>
   23.16 +
   23.17 +struct notifier_block
   23.18 +{
   23.19 +	int (*notifier_call)(struct notifier_block *self, unsigned long, void *);
   23.20 +	struct notifier_block *next;
   23.21 +	int priority;
   23.22 +};
   23.23 +
   23.24 +
   23.25 +#ifdef __KERNEL__
   23.26 +
   23.27 +extern int notifier_chain_register(struct notifier_block **list, struct notifier_block *n);
   23.28 +extern int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n);
   23.29 +extern int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v);
   23.30 +
   23.31 +#define NOTIFY_DONE		0x0000		/* Don't care */
   23.32 +#define NOTIFY_OK		0x0001		/* Suits me */
   23.33 +#define NOTIFY_STOP_MASK	0x8000		/* Don't call further */
   23.34 +#define NOTIFY_BAD		(NOTIFY_STOP_MASK|0x0002)	/* Bad/Veto action	*/
   23.35 +/*
   23.36 + * Clean way to return from the notifier and stop further calls.
   23.37 + */
   23.38 +#define NOTIFY_STOP		(NOTIFY_OK|NOTIFY_STOP_MASK)
   23.39 +
   23.40 +/*
   23.41 + *	Declared notifiers so far. I can imagine quite a few more chains
   23.42 + *	over time (eg laptop power reset chains, reboot chain (to clean 
   23.43 + *	device units up), device [un]mount chain, module load/unload chain,
   23.44 + *	low memory chain, screenblank chain (for plug in modular screenblankers) 
   23.45 + *	VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
   23.46 + */
   23.47 + 
   23.48 +/* netdevice notifier chain */
   23.49 +#define NETDEV_UP	0x0001	/* For now you can't veto a device up/down */
   23.50 +#define NETDEV_DOWN	0x0002
   23.51 +#define NETDEV_REBOOT	0x0003	/* Tell a protocol stack a network interface
   23.52 +				   detected a hardware crash and restarted
   23.53 +				   - we can use this eg to kick tcp sessions
   23.54 +				   once done */
   23.55 +#define NETDEV_CHANGE	0x0004	/* Notify device state change */
   23.56 +#define NETDEV_REGISTER 0x0005
   23.57 +#define NETDEV_UNREGISTER	0x0006
   23.58 +#define NETDEV_CHANGEMTU	0x0007
   23.59 +#define NETDEV_CHANGEADDR	0x0008
   23.60 +#define NETDEV_GOING_DOWN	0x0009
   23.61 +#define NETDEV_CHANGENAME	0x000A
   23.62 +#define NETDEV_FEAT_CHANGE	0x000B
   23.63 +
   23.64 +#define SYS_DOWN	0x0001	/* Notify of system down */
   23.65 +#define SYS_RESTART	SYS_DOWN
   23.66 +#define SYS_HALT	0x0002	/* Notify of system halt */
   23.67 +#define SYS_POWER_OFF	0x0003	/* Notify of system power off */
   23.68 +
   23.69 +#define NETLINK_URELEASE	0x0001	/* Unicast netlink socket released */
   23.70 +
   23.71 +#define CPU_ONLINE		0x0002 /* CPU (unsigned)v is up */
   23.72 +#define CPU_UP_PREPARE		0x0003 /* CPU (unsigned)v coming up */
   23.73 +#define CPU_UP_CANCELED		0x0004 /* CPU (unsigned)v NOT coming up */
   23.74 +#define CPU_DOWN_PREPARE	0x0005 /* CPU (unsigned)v going down */
   23.75 +#define CPU_DOWN_FAILED		0x0006 /* CPU (unsigned)v NOT going down */
   23.76 +#define CPU_DEAD		0x0007 /* CPU (unsigned)v dead */
   23.77 +
   23.78 +#endif /* __KERNEL__ */
   23.79 +#endif /* _LINUX_NOTIFIER_H */
    24.1 --- a/xen/include/asm-ia64/vhpt.h	Fri Aug 26 13:06:49 2005 +0000
    24.2 +++ b/xen/include/asm-ia64/vhpt.h	Tue Aug 30 12:41:54 2005 -0600
    24.3 @@ -129,7 +129,7 @@ struct vhpt_lf_entry {
    24.4  #define VHPT_CCHAIN_LOOKUP(Name, i_or_d)
    24.5  #else
    24.6  #ifdef CONFIG_SMP
    24.7 -#error "VHPT_CCHAIN_LOOKUP needs a semaphore on the VHPT!"
    24.8 +#warning "FIXME SMP: VHPT_CCHAIN_LOOKUP needs a semaphore on the VHPT!"
    24.9  #endif
   24.10  
   24.11  // VHPT_CCHAIN_LOOKUP is intended to run with psr.i+ic off