direct-io.hg

changeset 7529:fdea4a967bc7

Merge
author djm@kirby.fc.hp.com
date Mon Oct 24 09:08:13 2005 -0600 (2005-10-24)
parents ff7c5a791ed5 98c6c36ac444
children 813e504dc716
files tools/libxc/xc_ia64_stubs.c tools/xenstore/xsls.c xen/Rules.mk xen/arch/ia64/vmx/vmx_support.c xen/arch/ia64/xen/domain.c xen/include/public/arch-ia64.h xen/include/public/io/ioreq.h
line diff
     1.1 --- a/buildconfigs/Rules.mk	Fri Oct 21 13:58:39 2005 -0600
     1.2 +++ b/buildconfigs/Rules.mk	Mon Oct 24 09:08:13 2005 -0600
     1.3 @@ -80,10 +80,12 @@ clean::
     1.4  	rm -f patches/*/.makedep
     1.5  
     1.6  ref-%/.valid-ref: pristine-%/.valid-pristine
     1.7 +	set -e
     1.8  	rm -rf $(@D)
     1.9  	cp -al $(<D) $(@D)
    1.10 -	([ -d patches/$* ] && \
    1.11 -	  for i in patches/$*/*.patch ; do ( cd $(@D) ; patch -p1 <../$$i || exit 1 ) ; done) || true
    1.12 +	if [ -d patches/$* ] ; then \
    1.13 +	    for i in patches/$*/*.patch ; do ( cd $(@D) ; patch -p1 <../$$i || exit 1 ) ; done ; \
    1.14 +	fi
    1.15  	touch $@ # update timestamp to avoid rebuild
    1.16  endif
    1.17  
     2.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c	Fri Oct 21 13:58:39 2005 -0600
     2.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c	Mon Oct 24 09:08:13 2005 -0600
     2.3 @@ -94,7 +94,7 @@ void notify_remote_via_irq(int irq)
     2.4  
     2.5  irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
     2.6  {
     2.7 -    u32            l1, l2;
     2.8 +    unsigned long  l1, l2;
     2.9      unsigned int   l1i, l2i, port;
    2.10      irqreturn_t (*handler)(int, void *, struct pt_regs *);
    2.11      shared_info_t *s = HYPERVISOR_shared_info;
    2.12 @@ -108,14 +108,14 @@ irqreturn_t evtchn_interrupt(int irq, vo
    2.13      while ( l1 != 0 )
    2.14      {
    2.15          l1i = __ffs(l1);
    2.16 -        l1 &= ~(1 << l1i);
    2.17 +        l1 &= ~(1UL << l1i);
    2.18  
    2.19          while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
    2.20          {
    2.21              l2i = __ffs(l2);
    2.22 -            l2 &= ~(1 << l2i);
    2.23 +            l2 &= ~(1UL << l2i);
    2.24  
    2.25 -            port = (l1i << 5) + l2i;
    2.26 +            port = (l1i * BITS_PER_LONG) + l2i;
    2.27              if ( (handler = evtchns[port].handler) != NULL )
    2.28  	    {
    2.29  		clear_evtchn(port);
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Fri Oct 21 13:58:39 2005 -0600
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Mon Oct 24 09:08:13 2005 -0600
     3.3 @@ -1,7 +1,7 @@
     3.4  #
     3.5  # Automatically generated make config: don't edit
     3.6 -# Linux kernel version: 2.6.12-xen
     3.7 -# Wed Aug  3 10:04:25 2005
     3.8 +# Linux kernel version: 2.6.12-xen0
     3.9 +# Sat Oct 15 00:13:28 2005
    3.10  #
    3.11  CONFIG_XEN=y
    3.12  CONFIG_ARCH_XEN=y
    3.13 @@ -151,17 +151,15 @@ CONFIG_HAVE_DEC_LOCK=y
    3.14  # CONFIG_REGPARM is not set
    3.15  CONFIG_X86_LOCAL_APIC=y
    3.16  CONFIG_X86_IO_APIC=y
    3.17 -# CONFIG_HOTPLUG_CPU is not set
    3.18 +CONFIG_HOTPLUG_CPU=y
    3.19  
    3.20  #
    3.21  # Bus options (PCI, PCMCIA, EISA, MCA, ISA)
    3.22  #
    3.23  CONFIG_PCI=y
    3.24 -# CONFIG_PCI_GOBIOS is not set
    3.25  # CONFIG_PCI_GOMMCONFIG is not set
    3.26  # CONFIG_PCI_GODIRECT is not set
    3.27  CONFIG_PCI_GOANY=y
    3.28 -CONFIG_PCI_BIOS=y
    3.29  CONFIG_PCI_DIRECT=y
    3.30  CONFIG_PCI_MMCONFIG=y
    3.31  # CONFIG_PCIEPORTBUS is not set
    3.32 @@ -199,8 +197,6 @@ CONFIG_PCCARD_NONSTATIC=m
    3.33  #
    3.34  CONFIG_HOTPLUG_PCI=m
    3.35  CONFIG_HOTPLUG_PCI_FAKE=m
    3.36 -# CONFIG_HOTPLUG_PCI_COMPAQ is not set
    3.37 -# CONFIG_HOTPLUG_PCI_IBM is not set
    3.38  # CONFIG_HOTPLUG_PCI_ACPI is not set
    3.39  CONFIG_HOTPLUG_PCI_CPCI=y
    3.40  CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
    3.41 @@ -2667,6 +2663,7 @@ CONFIG_ACPI_BUTTON=m
    3.42  CONFIG_ACPI_VIDEO=m
    3.43  CONFIG_ACPI_FAN=m
    3.44  CONFIG_ACPI_PROCESSOR=m
    3.45 +# CONFIG_ACPI_HOTPLUG_CPU is not set
    3.46  CONFIG_ACPI_THERMAL=m
    3.47  CONFIG_ACPI_ASUS=m
    3.48  CONFIG_ACPI_IBM=m
    3.49 @@ -2694,7 +2691,7 @@ CONFIG_EXT3_FS_POSIX_ACL=y
    3.50  CONFIG_EXT3_FS_SECURITY=y
    3.51  CONFIG_JBD=m
    3.52  # CONFIG_JBD_DEBUG is not set
    3.53 -CONFIG_FS_MBCACHE=m
    3.54 +CONFIG_FS_MBCACHE=y
    3.55  CONFIG_REISERFS_FS=m
    3.56  # CONFIG_REISERFS_CHECK is not set
    3.57  # CONFIG_REISERFS_PROC_INFO is not set
     4.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile	Fri Oct 21 13:58:39 2005 -0600
     4.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile	Mon Oct 24 09:08:13 2005 -0600
     4.3 @@ -27,7 +27,7 @@ c-obj-$(CONFIG_X86_MSR)		+= msr.o
     4.4  c-obj-$(CONFIG_X86_CPUID)	+= cpuid.o
     4.5  obj-$(CONFIG_MICROCODE)		+= microcode.o
     4.6  c-obj-$(CONFIG_APM)		+= apm.o
     4.7 -obj-$(CONFIG_X86_SMP)		+= smp.o smpboot.o
     4.8 +obj-$(CONFIG_X86_SMP)		+= smp.o
     4.9  #obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline.o
    4.10  obj-$(CONFIG_X86_MPPARSE)	+= mpparse.o
    4.11  obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o
     5.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S	Fri Oct 21 13:58:39 2005 -0600
     5.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S	Mon Oct 24 09:08:13 2005 -0600
     5.3 @@ -40,7 +40,7 @@
     5.4  ENTRY(startup_32)
     5.5  	movl %esi,xen_start_info
     5.6  
     5.7 -#ifdef CONFIG_SMP
     5.8 +#if 0
     5.9  ENTRY(startup_32_smp)
    5.10  #endif /* CONFIG_SMP */
    5.11  
    5.12 @@ -78,7 +78,7 @@ checkCPUtype:
    5.13  	movl %eax,%gs
    5.14  	cld			# gcc2 wants the direction flag cleared at all times
    5.15  
    5.16 -#ifdef CONFIG_SMP
    5.17 +#if 0
    5.18  	movb ready, %cl	
    5.19  	cmpb $1,%cl
    5.20  	je 1f			# the first CPU calls start_kernel
     6.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c	Fri Oct 21 13:58:39 2005 -0600
     6.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c	Mon Oct 24 09:08:13 2005 -0600
     6.3 @@ -136,9 +136,6 @@ EXPORT_SYMBOL(cpu_sibling_map);
     6.4  #endif
     6.5  
     6.6  #ifdef CONFIG_SMP
     6.7 -EXPORT_SYMBOL(cpu_data);
     6.8 -EXPORT_SYMBOL(cpu_online_map);
     6.9 -EXPORT_SYMBOL(cpu_callout_map);
    6.10  EXPORT_SYMBOL(__write_lock_failed);
    6.11  EXPORT_SYMBOL(__read_lock_failed);
    6.12  
     7.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c	Fri Oct 21 13:58:39 2005 -0600
     7.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c	Mon Oct 24 09:08:13 2005 -0600
     7.3 @@ -112,44 +112,6 @@ void xen_idle(void)
     7.4  	}
     7.5  }
     7.6  
     7.7 -#ifdef CONFIG_HOTPLUG_CPU
     7.8 -#include <asm/nmi.h>
     7.9 -#ifdef CONFIG_SMP
    7.10 -extern void smp_suspend(void);
    7.11 -extern void smp_resume(void);
    7.12 -#endif
    7.13 -/* We don't actually take CPU down, just spin without interrupts. */
    7.14 -static inline void play_dead(void)
    7.15 -{
    7.16 -	/* Death loop */
    7.17 -	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
    7.18 -		HYPERVISOR_sched_op(SCHEDOP_yield, 0);
    7.19 -
    7.20 -	__flush_tlb_all();
    7.21 -   /* 
    7.22 -    * Restore IPI/IRQ mappings before marking online to prevent 
    7.23 -    * race between pending interrupts and restoration of handler. 
    7.24 -    */
    7.25 -#ifdef CONFIG_SMP
    7.26 -	local_irq_enable(); /* XXX Needed for smp_resume(). Clean me up. */
    7.27 -	smp_resume();
    7.28 -#endif
    7.29 -	cpu_set(smp_processor_id(), cpu_online_map);
    7.30 -}
    7.31 -#else
    7.32 -static inline void play_dead(void)
    7.33 -{
    7.34 -	BUG();
    7.35 -}
    7.36 -#endif /* CONFIG_HOTPLUG_CPU */
    7.37 -
    7.38 -void cpu_restore(void)
    7.39 -{
    7.40 -	play_dead();
    7.41 -	local_irq_enable();
    7.42 -	cpu_idle();
    7.43 -}
    7.44 -
    7.45  /*
    7.46   * The idle thread. There's no useful work to be
    7.47   * done, so just try to conserve power and have a
    7.48 @@ -158,7 +120,9 @@ void cpu_restore(void)
    7.49   */
    7.50  void cpu_idle (void)
    7.51  {
    7.52 +#if defined(CONFIG_HOTPLUG_CPU)
    7.53  	int cpu = _smp_processor_id();
    7.54 +#endif
    7.55  
    7.56  	/* endless idle loop with no priority at all */
    7.57  	while (1) {
    7.58 @@ -168,23 +132,12 @@ void cpu_idle (void)
    7.59  				__get_cpu_var(cpu_idle_state) = 0;
    7.60  			rmb();
    7.61  
    7.62 +#if defined(CONFIG_HOTPLUG_CPU)
    7.63  			if (cpu_is_offline(cpu)) {
    7.64 -				local_irq_disable();
    7.65 -#ifdef CONFIG_SMP
    7.66 -				smp_suspend();
    7.67 -#endif
    7.68 -#if defined(CONFIG_XEN) && defined(CONFIG_HOTPLUG_CPU)
    7.69 -				/* Ack it.  From this point on until
    7.70 -				   we get woken up, we're not allowed
    7.71 -				   to take any locks.  In particular,
    7.72 -				   don't printk. */
    7.73 -				__get_cpu_var(cpu_state) = CPU_DEAD;
    7.74 -				/* Tell hypervisor to take vcpu down. */
    7.75  				HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
    7.76 -#endif
    7.77 -				play_dead();
    7.78  				local_irq_enable();
    7.79  			}
    7.80 +#endif
    7.81  
    7.82  			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
    7.83  			xen_idle();
     8.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c	Fri Oct 21 13:58:39 2005 -0600
     8.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c	Mon Oct 24 09:08:13 2005 -0600
     8.3 @@ -939,6 +939,8 @@ void __init find_max_pfn(void)
     8.4  	if ( xen_override_max_pfn < xen_start_info->nr_pages )
     8.5  		xen_override_max_pfn = xen_start_info->nr_pages;
     8.6  	max_pfn = xen_override_max_pfn;
     8.7 +	/* 8MB slack, to make up for address space allocations in backends. */
     8.8 +	max_pfn += 8 << (20 - PAGE_SHIFT);
     8.9  }
    8.10  #endif /* XEN */
    8.11  
    8.12 @@ -1638,39 +1640,17 @@ void __init setup_arch(char **cmdline_p)
    8.13  #endif
    8.14  
    8.15  	/* Make sure we have a correctly sized P->M table. */
    8.16 -	if (max_pfn != xen_start_info->nr_pages) {
    8.17 -		phys_to_machine_mapping = alloc_bootmem_low_pages(
    8.18 -			max_pfn * sizeof(unsigned long));
    8.19 -
    8.20 -		if (max_pfn > xen_start_info->nr_pages) {
    8.21 -			/* set to INVALID_P2M_ENTRY */
    8.22 -			memset(phys_to_machine_mapping, ~0,
    8.23 -				max_pfn * sizeof(unsigned long));
    8.24 -			memcpy(phys_to_machine_mapping,
    8.25 -				(unsigned long *)xen_start_info->mfn_list,
    8.26 -				xen_start_info->nr_pages * sizeof(unsigned long));
    8.27 -		} else {
    8.28 -			struct xen_memory_reservation reservation = {
    8.29 -				.extent_start = (unsigned long *)xen_start_info->mfn_list + max_pfn,
    8.30 -				.nr_extents   = xen_start_info->nr_pages - max_pfn,
    8.31 -				.extent_order = 0,
    8.32 -				.domid        = DOMID_SELF
    8.33 -			};
    8.34 -
    8.35 -			memcpy(phys_to_machine_mapping,
    8.36 -				(unsigned long *)xen_start_info->mfn_list,
    8.37 -				max_pfn * sizeof(unsigned long));
    8.38 -			BUG_ON(HYPERVISOR_memory_op(
    8.39 -				XENMEM_decrease_reservation,
    8.40 -				&reservation) !=
    8.41 -			    (xen_start_info->nr_pages - max_pfn));
    8.42 -		}
    8.43 -		free_bootmem(
    8.44 -			__pa(xen_start_info->mfn_list), 
    8.45 -			PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
    8.46 -			sizeof(unsigned long))));
    8.47 -	}
    8.48 -
    8.49 +	phys_to_machine_mapping = alloc_bootmem_low_pages(
    8.50 +		max_pfn * sizeof(unsigned long));
    8.51 +	memset(phys_to_machine_mapping, ~0,
    8.52 +		max_pfn * sizeof(unsigned long));
    8.53 +	memcpy(phys_to_machine_mapping,
    8.54 +		(unsigned long *)xen_start_info->mfn_list,
    8.55 +		xen_start_info->nr_pages * sizeof(unsigned long));
    8.56 +	free_bootmem(
    8.57 +		__pa(xen_start_info->mfn_list), 
    8.58 +		PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
    8.59 +		sizeof(unsigned long))));
    8.60  
    8.61  	/* 
    8.62  	 * Initialise the list of the frames that specify the list of 
     9.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c	Fri Oct 21 13:58:39 2005 -0600
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,1625 +0,0 @@
     9.4 -/*
     9.5 - *	x86 SMP booting functions
     9.6 - *
     9.7 - *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
     9.8 - *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
     9.9 - *
    9.10 - *	Much of the core SMP work is based on previous work by Thomas Radke, to
    9.11 - *	whom a great many thanks are extended.
    9.12 - *
    9.13 - *	Thanks to Intel for making available several different Pentium,
    9.14 - *	Pentium Pro and Pentium-II/Xeon MP machines.
    9.15 - *	Original development of Linux SMP code supported by Caldera.
    9.16 - *
    9.17 - *	This code is released under the GNU General Public License version 2 or
    9.18 - *	later.
    9.19 - *
    9.20 - *	Fixes
    9.21 - *		Felix Koop	:	NR_CPUS used properly
    9.22 - *		Jose Renau	:	Handle single CPU case.
    9.23 - *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
    9.24 - *		Greg Wright	:	Fix for kernel stacks panic.
    9.25 - *		Erich Boleyn	:	MP v1.4 and additional changes.
    9.26 - *	Matthias Sattler	:	Changes for 2.1 kernel map.
    9.27 - *	Michel Lespinasse	:	Changes for 2.1 kernel map.
    9.28 - *	Michael Chastain	:	Change trampoline.S to gnu as.
    9.29 - *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
    9.30 - *		Ingo Molnar	:	Added APIC timers, based on code
    9.31 - *					from Jose Renau
    9.32 - *		Ingo Molnar	:	various cleanups and rewrites
    9.33 - *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
    9.34 - *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
    9.35 - *		Martin J. Bligh	: 	Added support for multi-quad systems
    9.36 - *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
    9.37 -*		Rusty Russell	:	Hacked into shape for new "hotplug" boot process. */
    9.38 -
    9.39 -#include <linux/module.h>
    9.40 -#include <linux/config.h>
    9.41 -#include <linux/init.h>
    9.42 -#include <linux/kernel.h>
    9.43 -
    9.44 -#include <linux/mm.h>
    9.45 -#include <linux/sched.h>
    9.46 -#include <linux/kernel_stat.h>
    9.47 -#include <linux/smp_lock.h>
    9.48 -#include <linux/irq.h>
    9.49 -#include <linux/bootmem.h>
    9.50 -#include <linux/notifier.h>
    9.51 -#include <linux/cpu.h>
    9.52 -#include <linux/percpu.h>
    9.53 -
    9.54 -#include <linux/delay.h>
    9.55 -#include <linux/mc146818rtc.h>
    9.56 -#include <asm/tlbflush.h>
    9.57 -#include <asm/desc.h>
    9.58 -#include <asm/arch_hooks.h>
    9.59 -
    9.60 -#include <asm/smp_alt.h>
    9.61 -
    9.62 -#ifndef CONFIG_X86_IO_APIC
    9.63 -#define Dprintk(args...)
    9.64 -#endif
    9.65 -#include <mach_wakecpu.h>
    9.66 -#include <smpboot_hooks.h>
    9.67 -
    9.68 -#include <asm-xen/evtchn.h>
    9.69 -#include <asm-xen/xen-public/vcpu.h>
    9.70 -
    9.71 -/* Set if we find a B stepping CPU */
    9.72 -static int __initdata smp_b_stepping;
    9.73 -
    9.74 -/* Number of siblings per CPU package */
    9.75 -int smp_num_siblings = 1;
    9.76 -int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
    9.77 -EXPORT_SYMBOL(phys_proc_id);
    9.78 -int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
    9.79 -EXPORT_SYMBOL(cpu_core_id);
    9.80 -
    9.81 -/* bitmap of online cpus */
    9.82 -cpumask_t cpu_online_map;
    9.83 -
    9.84 -cpumask_t cpu_callin_map;
    9.85 -cpumask_t cpu_callout_map;
    9.86 -static cpumask_t smp_commenced_mask;
    9.87 -
    9.88 -/* Per CPU bogomips and other parameters */
    9.89 -struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
    9.90 -
    9.91 -u8 x86_cpu_to_apicid[NR_CPUS] =
    9.92 -			{ [0 ... NR_CPUS-1] = 0xff };
    9.93 -EXPORT_SYMBOL(x86_cpu_to_apicid);
    9.94 -
    9.95 -#if 0
    9.96 -/*
    9.97 - * Trampoline 80x86 program as an array.
    9.98 - */
    9.99 -
   9.100 -extern unsigned char trampoline_data [];
   9.101 -extern unsigned char trampoline_end  [];
   9.102 -static unsigned char *trampoline_base;
   9.103 -static int trampoline_exec;
   9.104 -#endif
   9.105 -
   9.106 -#ifdef CONFIG_HOTPLUG_CPU
   9.107 -/* State of each CPU. */
   9.108 -DEFINE_PER_CPU(int, cpu_state) = { 0 };
   9.109 -#endif
   9.110 -
   9.111 -static DEFINE_PER_CPU(int, resched_irq);
   9.112 -static DEFINE_PER_CPU(int, callfunc_irq);
   9.113 -static char resched_name[NR_CPUS][15];
   9.114 -static char callfunc_name[NR_CPUS][15];
   9.115 -
   9.116 -#if 0
   9.117 -/*
   9.118 - * Currently trivial. Write the real->protected mode
   9.119 - * bootstrap into the page concerned. The caller
   9.120 - * has made sure it's suitably aligned.
   9.121 - */
   9.122 -
   9.123 -static unsigned long __init setup_trampoline(void)
   9.124 -{
   9.125 -	memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
   9.126 -	return virt_to_phys(trampoline_base);
   9.127 -}
   9.128 -#endif
   9.129 -
   9.130 -static void map_cpu_to_logical_apicid(void);
   9.131 -
   9.132 -/*
   9.133 - * We are called very early to get the low memory for the
   9.134 - * SMP bootup trampoline page.
   9.135 - */
   9.136 -void __init smp_alloc_memory(void)
   9.137 -{
   9.138 -#if 0
   9.139 -	trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
   9.140 -	/*
   9.141 -	 * Has to be in very low memory so we can execute
   9.142 -	 * real-mode AP code.
   9.143 -	 */
   9.144 -	if (__pa(trampoline_base) >= 0x9F000)
   9.145 -		BUG();
   9.146 -	/*
   9.147 -	 * Make the SMP trampoline executable:
   9.148 -	 */
   9.149 -	trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
   9.150 -#endif
   9.151 -}
   9.152 -
   9.153 -/*
   9.154 - * The bootstrap kernel entry code has set these up. Save them for
   9.155 - * a given CPU
   9.156 - */
   9.157 -
   9.158 -static void __init smp_store_cpu_info(int id)
   9.159 -{
   9.160 -	struct cpuinfo_x86 *c = cpu_data + id;
   9.161 -
   9.162 -	*c = boot_cpu_data;
   9.163 -	if (id!=0)
   9.164 -		identify_cpu(c);
   9.165 -	/*
   9.166 -	 * Mask B, Pentium, but not Pentium MMX
   9.167 -	 */
   9.168 -	if (c->x86_vendor == X86_VENDOR_INTEL &&
   9.169 -	    c->x86 == 5 &&
   9.170 -	    c->x86_mask >= 1 && c->x86_mask <= 4 &&
   9.171 -	    c->x86_model <= 3)
   9.172 -		/*
   9.173 -		 * Remember we have B step Pentia with bugs
   9.174 -		 */
   9.175 -		smp_b_stepping = 1;
   9.176 -
   9.177 -	/*
   9.178 -	 * Certain Athlons might work (for various values of 'work') in SMP
   9.179 -	 * but they are not certified as MP capable.
   9.180 -	 */
   9.181 -	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
   9.182 -
   9.183 -		/* Athlon 660/661 is valid. */	
   9.184 -		if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
   9.185 -			goto valid_k7;
   9.186 -
   9.187 -		/* Duron 670 is valid */
   9.188 -		if ((c->x86_model==7) && (c->x86_mask==0))
   9.189 -			goto valid_k7;
   9.190 -
   9.191 -		/*
   9.192 -		 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
   9.193 -		 * It's worth noting that the A5 stepping (662) of some Athlon XP's
   9.194 -		 * have the MP bit set.
   9.195 -		 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
   9.196 -		 */
   9.197 -		if (((c->x86_model==6) && (c->x86_mask>=2)) ||
   9.198 -		    ((c->x86_model==7) && (c->x86_mask>=1)) ||
   9.199 -		     (c->x86_model> 7))
   9.200 -			if (cpu_has_mp)
   9.201 -				goto valid_k7;
   9.202 -
   9.203 -		/* If we get here, it's not a certified SMP capable AMD system. */
   9.204 -		tainted |= TAINT_UNSAFE_SMP;
   9.205 -	}
   9.206 -
   9.207 -valid_k7:
   9.208 -	;
   9.209 -}
   9.210 -
   9.211 -#if 0
   9.212 -/*
   9.213 - * TSC synchronization.
   9.214 - *
   9.215 - * We first check whether all CPUs have their TSC's synchronized,
   9.216 - * then we print a warning if not, and always resync.
   9.217 - */
   9.218 -
   9.219 -static atomic_t tsc_start_flag = ATOMIC_INIT(0);
   9.220 -static atomic_t tsc_count_start = ATOMIC_INIT(0);
   9.221 -static atomic_t tsc_count_stop = ATOMIC_INIT(0);
   9.222 -static unsigned long long tsc_values[NR_CPUS];
   9.223 -
   9.224 -#define NR_LOOPS 5
   9.225 -
   9.226 -static void __init synchronize_tsc_bp (void)
   9.227 -{
   9.228 -	int i;
   9.229 -	unsigned long long t0;
   9.230 -	unsigned long long sum, avg;
   9.231 -	long long delta;
   9.232 -	unsigned long one_usec;
   9.233 -	int buggy = 0;
   9.234 -
   9.235 -	printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
   9.236 -
   9.237 -	/* convert from kcyc/sec to cyc/usec */
   9.238 -	one_usec = cpu_khz / 1000;
   9.239 -
   9.240 -	atomic_set(&tsc_start_flag, 1);
   9.241 -	wmb();
   9.242 -
   9.243 -	/*
   9.244 -	 * We loop a few times to get a primed instruction cache,
   9.245 -	 * then the last pass is more or less synchronized and
   9.246 -	 * the BP and APs set their cycle counters to zero all at
   9.247 -	 * once. This reduces the chance of having random offsets
   9.248 -	 * between the processors, and guarantees that the maximum
   9.249 -	 * delay between the cycle counters is never bigger than
   9.250 -	 * the latency of information-passing (cachelines) between
   9.251 -	 * two CPUs.
   9.252 -	 */
   9.253 -	for (i = 0; i < NR_LOOPS; i++) {
   9.254 -		/*
   9.255 -		 * all APs synchronize but they loop on '== num_cpus'
   9.256 -		 */
   9.257 -		while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
   9.258 -			mb();
   9.259 -		atomic_set(&tsc_count_stop, 0);
   9.260 -		wmb();
   9.261 -		/*
   9.262 -		 * this lets the APs save their current TSC:
   9.263 -		 */
   9.264 -		atomic_inc(&tsc_count_start);
   9.265 -
   9.266 -		rdtscll(tsc_values[smp_processor_id()]);
   9.267 -		/*
   9.268 -		 * We clear the TSC in the last loop:
   9.269 -		 */
   9.270 -		if (i == NR_LOOPS-1)
   9.271 -			write_tsc(0, 0);
   9.272 -
   9.273 -		/*
   9.274 -		 * Wait for all APs to leave the synchronization point:
   9.275 -		 */
   9.276 -		while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
   9.277 -			mb();
   9.278 -		atomic_set(&tsc_count_start, 0);
   9.279 -		wmb();
   9.280 -		atomic_inc(&tsc_count_stop);
   9.281 -	}
   9.282 -
   9.283 -	sum = 0;
   9.284 -	for (i = 0; i < NR_CPUS; i++) {
   9.285 -		if (cpu_isset(i, cpu_callout_map)) {
   9.286 -			t0 = tsc_values[i];
   9.287 -			sum += t0;
   9.288 -		}
   9.289 -	}
   9.290 -	avg = sum;
   9.291 -	do_div(avg, num_booting_cpus());
   9.292 -
   9.293 -	sum = 0;
   9.294 -	for (i = 0; i < NR_CPUS; i++) {
   9.295 -		if (!cpu_isset(i, cpu_callout_map))
   9.296 -			continue;
   9.297 -		delta = tsc_values[i] - avg;
   9.298 -		if (delta < 0)
   9.299 -			delta = -delta;
   9.300 -		/*
   9.301 -		 * We report bigger than 2 microseconds clock differences.
   9.302 -		 */
   9.303 -		if (delta > 2*one_usec) {
   9.304 -			long realdelta;
   9.305 -			if (!buggy) {
   9.306 -				buggy = 1;
   9.307 -				printk("\n");
   9.308 -			}
   9.309 -			realdelta = delta;
   9.310 -			do_div(realdelta, one_usec);
   9.311 -			if (tsc_values[i] < avg)
   9.312 -				realdelta = -realdelta;
   9.313 -
   9.314 -			printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta);
   9.315 -		}
   9.316 -
   9.317 -		sum += delta;
   9.318 -	}
   9.319 -	if (!buggy)
   9.320 -		printk("passed.\n");
   9.321 -}
   9.322 -
   9.323 -static void __init synchronize_tsc_ap (void)
   9.324 -{
   9.325 -	int i;
   9.326 -
   9.327 -	/*
   9.328 -	 * Not every cpu is online at the time
   9.329 -	 * this gets called, so we first wait for the BP to
   9.330 -	 * finish SMP initialization:
   9.331 -	 */
   9.332 -	while (!atomic_read(&tsc_start_flag)) mb();
   9.333 -
   9.334 -	for (i = 0; i < NR_LOOPS; i++) {
   9.335 -		atomic_inc(&tsc_count_start);
   9.336 -		while (atomic_read(&tsc_count_start) != num_booting_cpus())
   9.337 -			mb();
   9.338 -
   9.339 -		rdtscll(tsc_values[smp_processor_id()]);
   9.340 -		if (i == NR_LOOPS-1)
   9.341 -			write_tsc(0, 0);
   9.342 -
   9.343 -		atomic_inc(&tsc_count_stop);
   9.344 -		while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
   9.345 -	}
   9.346 -}
   9.347 -#undef NR_LOOPS
   9.348 -#endif
   9.349 -
   9.350 -extern void calibrate_delay(void);
   9.351 -
   9.352 -static atomic_t init_deasserted;
   9.353 -
   9.354 -static void __init smp_callin(void)
   9.355 -{
   9.356 -	int cpuid, phys_id;
   9.357 -	unsigned long timeout;
   9.358 -
   9.359 -#if 0
   9.360 -	/*
   9.361 -	 * If waken up by an INIT in an 82489DX configuration
   9.362 -	 * we may get here before an INIT-deassert IPI reaches
   9.363 -	 * our local APIC.  We have to wait for the IPI or we'll
   9.364 -	 * lock up on an APIC access.
   9.365 -	 */
   9.366 -	wait_for_init_deassert(&init_deasserted);
   9.367 -#endif
   9.368 -
   9.369 -	/*
   9.370 -	 * (This works even if the APIC is not enabled.)
   9.371 -	 */
   9.372 -	phys_id = smp_processor_id();
   9.373 -	cpuid = smp_processor_id();
   9.374 -	if (cpu_isset(cpuid, cpu_callin_map)) {
   9.375 -		printk("huh, phys CPU#%d, CPU#%d already present??\n",
   9.376 -					phys_id, cpuid);
   9.377 -		BUG();
   9.378 -	}
   9.379 -	Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
   9.380 -
   9.381 -	/*
   9.382 -	 * STARTUP IPIs are fragile beasts as they might sometimes
   9.383 -	 * trigger some glue motherboard logic. Complete APIC bus
   9.384 -	 * silence for 1 second, this overestimates the time the
   9.385 -	 * boot CPU is spending to send the up to 2 STARTUP IPIs
   9.386 -	 * by a factor of two. This should be enough.
   9.387 -	 */
   9.388 -
   9.389 -	/*
   9.390 -	 * Waiting 2s total for startup (udelay is not yet working)
   9.391 -	 */
   9.392 -	timeout = jiffies + 2*HZ;
   9.393 -	while (time_before(jiffies, timeout)) {
   9.394 -		/*
   9.395 -		 * Has the boot CPU finished it's STARTUP sequence?
   9.396 -		 */
   9.397 -		if (cpu_isset(cpuid, cpu_callout_map))
   9.398 -			break;
   9.399 -		rep_nop();
   9.400 -	}
   9.401 -
   9.402 -	if (!time_before(jiffies, timeout)) {
   9.403 -		printk("BUG: CPU%d started up but did not get a callout!\n",
   9.404 -			cpuid);
   9.405 -		BUG();
   9.406 -	}
   9.407 -
   9.408 -#if 0
   9.409 -	/*
   9.410 -	 * the boot CPU has finished the init stage and is spinning
   9.411 -	 * on callin_map until we finish. We are free to set up this
   9.412 -	 * CPU, first the APIC. (this is probably redundant on most
   9.413 -	 * boards)
   9.414 -	 */
   9.415 -
   9.416 -	Dprintk("CALLIN, before setup_local_APIC().\n");
   9.417 -	smp_callin_clear_local_apic();
   9.418 -	setup_local_APIC();
   9.419 -#endif
   9.420 -	map_cpu_to_logical_apicid();
   9.421 -
   9.422 -	/*
   9.423 -	 * Get our bogomips.
   9.424 -	 */
   9.425 -	calibrate_delay();
   9.426 -	Dprintk("Stack at about %p\n",&cpuid);
   9.427 -
   9.428 -	/*
   9.429 -	 * Save our processor parameters
   9.430 -	 */
   9.431 - 	smp_store_cpu_info(cpuid);
   9.432 -
   9.433 -#if 0
   9.434 -	disable_APIC_timer();
   9.435 -#endif
   9.436 -
   9.437 -	/*
   9.438 -	 * Allow the master to continue.
   9.439 -	 */
   9.440 -	cpu_set(cpuid, cpu_callin_map);
   9.441 -
   9.442 -#if 0
   9.443 -	/*
   9.444 -	 *      Synchronize the TSC with the BP
   9.445 -	 */
   9.446 -	if (cpu_has_tsc && cpu_khz)
   9.447 -		synchronize_tsc_ap();
   9.448 -#endif
   9.449 -}
   9.450 -
   9.451 -static int cpucount;
   9.452 -
   9.453 -extern void local_setup_timer(void);
   9.454 -
   9.455 -/*
   9.456 - * Activate a secondary processor.
   9.457 - */
   9.458 -static void __init start_secondary(void *unused)
   9.459 -{
   9.460 -	/*
   9.461 -	 * Dont put anything before smp_callin(), SMP
   9.462 -	 * booting is too fragile that we want to limit the
   9.463 -	 * things done here to the most necessary things.
   9.464 -	 */
   9.465 -	cpu_init();
   9.466 -	smp_callin();
   9.467 -	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
   9.468 -		rep_nop();
   9.469 -	local_setup_timer();
   9.470 -	smp_intr_init();
   9.471 -	local_irq_enable();
   9.472 -	/*
   9.473 -	 * low-memory mappings have been cleared, flush them from
   9.474 -	 * the local TLBs too.
   9.475 -	 */
   9.476 -	local_flush_tlb();
   9.477 -	cpu_set(smp_processor_id(), cpu_online_map);
   9.478 -
   9.479 -	/* We can take interrupts now: we're officially "up". */
   9.480 -	local_irq_enable();
   9.481 -
   9.482 -	wmb();
   9.483 -	cpu_idle();
   9.484 -}
   9.485 -
   9.486 -/*
   9.487 - * Everything has been set up for the secondary
   9.488 - * CPUs - they just need to reload everything
   9.489 - * from the task structure
   9.490 - * This function must not return.
   9.491 - */
   9.492 -void __init initialize_secondary(void)
   9.493 -{
   9.494 -	/*
   9.495 -	 * We don't actually need to load the full TSS,
   9.496 -	 * basically just the stack pointer and the eip.
   9.497 -	 */
   9.498 -
   9.499 -	asm volatile(
   9.500 -		"movl %0,%%esp\n\t"
   9.501 -		"jmp *%1"
   9.502 -		:
   9.503 -		:"r" (current->thread.esp),"r" (current->thread.eip));
   9.504 -}
   9.505 -
   9.506 -extern struct {
   9.507 -	void * esp;
   9.508 -	unsigned short ss;
   9.509 -} stack_start;
   9.510 -
   9.511 -#ifdef CONFIG_NUMA
   9.512 -
   9.513 -/* which logical CPUs are on which nodes */
   9.514 -cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
   9.515 -				{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
   9.516 -/* which node each logical CPU is on */
   9.517 -int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
   9.518 -EXPORT_SYMBOL(cpu_2_node);
   9.519 -
   9.520 -/* set up a mapping between cpu and node. */
   9.521 -static inline void map_cpu_to_node(int cpu, int node)
   9.522 -{
   9.523 -	printk("Mapping cpu %d to node %d\n", cpu, node);
   9.524 -	cpu_set(cpu, node_2_cpu_mask[node]);
   9.525 -	cpu_2_node[cpu] = node;
   9.526 -}
   9.527 -
   9.528 -/* undo a mapping between cpu and node. */
   9.529 -static inline void unmap_cpu_to_node(int cpu)
   9.530 -{
   9.531 -	int node;
   9.532 -
   9.533 -	printk("Unmapping cpu %d from all nodes\n", cpu);
   9.534 -	for (node = 0; node < MAX_NUMNODES; node ++)
   9.535 -		cpu_clear(cpu, node_2_cpu_mask[node]);
   9.536 -	cpu_2_node[cpu] = 0;
   9.537 -}
   9.538 -#else /* !CONFIG_NUMA */
   9.539 -
   9.540 -#define map_cpu_to_node(cpu, node)	({})
   9.541 -#define unmap_cpu_to_node(cpu)	({})
   9.542 -
   9.543 -#endif /* CONFIG_NUMA */
   9.544 -
   9.545 -u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
   9.546 -
   9.547 -static void map_cpu_to_logical_apicid(void)
   9.548 -{
   9.549 -	int cpu = smp_processor_id();
   9.550 -	int apicid = smp_processor_id();
   9.551 -
   9.552 -	cpu_2_logical_apicid[cpu] = apicid;
   9.553 -	map_cpu_to_node(cpu, apicid_to_node(apicid));
   9.554 -}
   9.555 -
   9.556 -static void unmap_cpu_to_logical_apicid(int cpu)
   9.557 -{
   9.558 -	cpu_2_logical_apicid[cpu] = BAD_APICID;
   9.559 -	unmap_cpu_to_node(cpu);
   9.560 -}
   9.561 -
   9.562 -#if APIC_DEBUG
   9.563 -static inline void __inquire_remote_apic(int apicid)
   9.564 -{
   9.565 -	int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
   9.566 -	char *names[] = { "ID", "VERSION", "SPIV" };
   9.567 -	int timeout, status;
   9.568 -
   9.569 -	printk("Inquiring remote APIC #%d...\n", apicid);
   9.570 -
   9.571 -	for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
   9.572 -		printk("... APIC #%d %s: ", apicid, names[i]);
   9.573 -
   9.574 -		/*
   9.575 -		 * Wait for idle.
   9.576 -		 */
   9.577 -		apic_wait_icr_idle();
   9.578 -
   9.579 -		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
   9.580 -		apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
   9.581 -
   9.582 -		timeout = 0;
   9.583 -		do {
   9.584 -			udelay(100);
   9.585 -			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
   9.586 -		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
   9.587 -
   9.588 -		switch (status) {
   9.589 -		case APIC_ICR_RR_VALID:
   9.590 -			status = apic_read(APIC_RRR);
   9.591 -			printk("%08x\n", status);
   9.592 -			break;
   9.593 -		default:
   9.594 -			printk("failed\n");
   9.595 -		}
   9.596 -	}
   9.597 -}
   9.598 -#endif
   9.599 -
   9.600 -#if 0
   9.601 -#ifdef WAKE_SECONDARY_VIA_NMI
   9.602 -/* 
   9.603 - * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
   9.604 - * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
   9.605 - * won't ... remember to clear down the APIC, etc later.
   9.606 - */
   9.607 -static int __init
   9.608 -wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
   9.609 -{
   9.610 -	unsigned long send_status = 0, accept_status = 0;
   9.611 -	int timeout, maxlvt;
   9.612 -
   9.613 -	/* Target chip */
   9.614 -	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
   9.615 -
   9.616 -	/* Boot on the stack */
   9.617 -	/* Kick the second */
   9.618 -	apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
   9.619 -
   9.620 -	Dprintk("Waiting for send to finish...\n");
   9.621 -	timeout = 0;
   9.622 -	do {
   9.623 -		Dprintk("+");
   9.624 -		udelay(100);
   9.625 -		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
   9.626 -	} while (send_status && (timeout++ < 1000));
   9.627 -
   9.628 -	/*
   9.629 -	 * Give the other CPU some time to accept the IPI.
   9.630 -	 */
   9.631 -	udelay(200);
   9.632 -	/*
   9.633 -	 * Due to the Pentium erratum 3AP.
   9.634 -	 */
   9.635 -	maxlvt = get_maxlvt();
   9.636 -	if (maxlvt > 3) {
   9.637 -		apic_read_around(APIC_SPIV);
   9.638 -		apic_write(APIC_ESR, 0);
   9.639 -	}
   9.640 -	accept_status = (apic_read(APIC_ESR) & 0xEF);
   9.641 -	Dprintk("NMI sent.\n");
   9.642 -
   9.643 -	if (send_status)
   9.644 -		printk("APIC never delivered???\n");
   9.645 -	if (accept_status)
   9.646 -		printk("APIC delivery error (%lx).\n", accept_status);
   9.647 -
   9.648 -	return (send_status | accept_status);
   9.649 -}
   9.650 -#endif	/* WAKE_SECONDARY_VIA_NMI */
   9.651 -
   9.652 -#ifdef WAKE_SECONDARY_VIA_INIT
   9.653 -static int __init
   9.654 -wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
   9.655 -{
   9.656 -	unsigned long send_status = 0, accept_status = 0;
   9.657 -	int maxlvt, timeout, num_starts, j;
   9.658 -
   9.659 -	/*
   9.660 -	 * Be paranoid about clearing APIC errors.
   9.661 -	 */
   9.662 -	if (APIC_INTEGRATED(apic_version[phys_apicid])) {
   9.663 -		apic_read_around(APIC_SPIV);
   9.664 -		apic_write(APIC_ESR, 0);
   9.665 -		apic_read(APIC_ESR);
   9.666 -	}
   9.667 -
   9.668 -	Dprintk("Asserting INIT.\n");
   9.669 -
   9.670 -	/*
   9.671 -	 * Turn INIT on target chip
   9.672 -	 */
   9.673 -	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
   9.674 -
   9.675 -	/*
   9.676 -	 * Send IPI
   9.677 -	 */
   9.678 -	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
   9.679 -				| APIC_DM_INIT);
   9.680 -
   9.681 -	Dprintk("Waiting for send to finish...\n");
   9.682 -	timeout = 0;
   9.683 -	do {
   9.684 -		Dprintk("+");
   9.685 -		udelay(100);
   9.686 -		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
   9.687 -	} while (send_status && (timeout++ < 1000));
   9.688 -
   9.689 -	mdelay(10);
   9.690 -
   9.691 -	Dprintk("Deasserting INIT.\n");
   9.692 -
   9.693 -	/* Target chip */
   9.694 -	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
   9.695 -
   9.696 -	/* Send IPI */
   9.697 -	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
   9.698 -
   9.699 -	Dprintk("Waiting for send to finish...\n");
   9.700 -	timeout = 0;
   9.701 -	do {
   9.702 -		Dprintk("+");
   9.703 -		udelay(100);
   9.704 -		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
   9.705 -	} while (send_status && (timeout++ < 1000));
   9.706 -
   9.707 -	atomic_set(&init_deasserted, 1);
   9.708 -
   9.709 -	/*
   9.710 -	 * Should we send STARTUP IPIs ?
   9.711 -	 *
   9.712 -	 * Determine this based on the APIC version.
   9.713 -	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
   9.714 -	 */
   9.715 -	if (APIC_INTEGRATED(apic_version[phys_apicid]))
   9.716 -		num_starts = 2;
   9.717 -	else
   9.718 -		num_starts = 0;
   9.719 -
   9.720 -	/*
   9.721 -	 * Run STARTUP IPI loop.
   9.722 -	 */
   9.723 -	Dprintk("#startup loops: %d.\n", num_starts);
   9.724 -
   9.725 -	maxlvt = get_maxlvt();
   9.726 -
   9.727 -	for (j = 1; j <= num_starts; j++) {
   9.728 -		Dprintk("Sending STARTUP #%d.\n",j);
   9.729 -		apic_read_around(APIC_SPIV);
   9.730 -		apic_write(APIC_ESR, 0);
   9.731 -		apic_read(APIC_ESR);
   9.732 -		Dprintk("After apic_write.\n");
   9.733 -
   9.734 -		/*
   9.735 -		 * STARTUP IPI
   9.736 -		 */
   9.737 -
   9.738 -		/* Target chip */
   9.739 -		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
   9.740 -
   9.741 -		/* Boot on the stack */
   9.742 -		/* Kick the second */
   9.743 -		apic_write_around(APIC_ICR, APIC_DM_STARTUP
   9.744 -					| (start_eip >> 12));
   9.745 -
   9.746 -		/*
   9.747 -		 * Give the other CPU some time to accept the IPI.
   9.748 -		 */
   9.749 -		udelay(300);
   9.750 -
   9.751 -		Dprintk("Startup point 1.\n");
   9.752 -
   9.753 -		Dprintk("Waiting for send to finish...\n");
   9.754 -		timeout = 0;
   9.755 -		do {
   9.756 -			Dprintk("+");
   9.757 -			udelay(100);
   9.758 -			send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
   9.759 -		} while (send_status && (timeout++ < 1000));
   9.760 -
   9.761 -		/*
   9.762 -		 * Give the other CPU some time to accept the IPI.
   9.763 -		 */
   9.764 -		udelay(200);
   9.765 -		/*
   9.766 -		 * Due to the Pentium erratum 3AP.
   9.767 -		 */
   9.768 -		if (maxlvt > 3) {
   9.769 -			apic_read_around(APIC_SPIV);
   9.770 -			apic_write(APIC_ESR, 0);
   9.771 -		}
   9.772 -		accept_status = (apic_read(APIC_ESR) & 0xEF);
   9.773 -		if (send_status || accept_status)
   9.774 -			break;
   9.775 -	}
   9.776 -	Dprintk("After Startup.\n");
   9.777 -
   9.778 -	if (send_status)
   9.779 -		printk("APIC never delivered???\n");
   9.780 -	if (accept_status)
   9.781 -		printk("APIC delivery error (%lx).\n", accept_status);
   9.782 -
   9.783 -	return (send_status | accept_status);
   9.784 -}
   9.785 -#endif	/* WAKE_SECONDARY_VIA_INIT */
   9.786 -#endif
   9.787 -
   9.788 -extern cpumask_t cpu_initialized;
   9.789 -
   9.790 -static int __init do_boot_cpu(int apicid)
   9.791 -/*
   9.792 - * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
   9.793 - * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
   9.794 - * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
   9.795 - */
   9.796 -{
   9.797 -	struct task_struct *idle;
   9.798 -	unsigned long boot_error;
   9.799 -	int timeout, cpu;
   9.800 -	unsigned long start_eip;
   9.801 -#if 0
   9.802 -	unsigned short nmi_high = 0, nmi_low = 0;
   9.803 -#endif
   9.804 -	vcpu_guest_context_t ctxt;
   9.805 -	extern void startup_32_smp(void);
   9.806 -	extern void hypervisor_callback(void);
   9.807 -	extern void failsafe_callback(void);
   9.808 -	extern void smp_trap_init(trap_info_t *);
   9.809 -
   9.810 -	cpu = ++cpucount;
   9.811 -	/*
   9.812 -	 * We can't use kernel_thread since we must avoid to
   9.813 -	 * reschedule the child.
   9.814 -	 */
   9.815 -	idle = fork_idle(cpu);
   9.816 -	if (IS_ERR(idle))
   9.817 -		panic("failed fork for CPU %d", cpu);
   9.818 -	idle->thread.eip = (unsigned long) start_secondary;
   9.819 -	/* start_eip had better be page-aligned! */
   9.820 -	start_eip = (unsigned long)startup_32_smp;
   9.821 -
   9.822 -	/* So we see what's up   */
   9.823 -	printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
   9.824 -	/* Stack for startup_32 can be just as for start_secondary onwards */
   9.825 -	stack_start.esp = (void *) idle->thread.esp;
   9.826 -
   9.827 -	irq_ctx_init(cpu);
   9.828 -
   9.829 -	/*
   9.830 -	 * This grunge runs the startup process for
   9.831 -	 * the targeted processor.
   9.832 -	 */
   9.833 -
   9.834 -	atomic_set(&init_deasserted, 0);
   9.835 -
   9.836 -#if 1
   9.837 -	cpu_gdt_descr[cpu].address = __get_free_page(GFP_KERNEL|__GFP_ZERO);
   9.838 -	BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
   9.839 -	cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
   9.840 -	memcpy((void *)cpu_gdt_descr[cpu].address,
   9.841 -	       (void *)cpu_gdt_descr[0].address, cpu_gdt_descr[0].size);
   9.842 -
   9.843 -	memset(&ctxt, 0, sizeof(ctxt));
   9.844 -
   9.845 -	ctxt.user_regs.ds = __USER_DS;
   9.846 -	ctxt.user_regs.es = __USER_DS;
   9.847 -	ctxt.user_regs.fs = 0;
   9.848 -	ctxt.user_regs.gs = 0;
   9.849 -	ctxt.user_regs.ss = __KERNEL_DS;
   9.850 -	ctxt.user_regs.cs = __KERNEL_CS;
   9.851 -	ctxt.user_regs.eip = start_eip;
   9.852 -	ctxt.user_regs.esp = idle->thread.esp;
   9.853 -#define X86_EFLAGS_IOPL_RING1 0x1000
   9.854 -	ctxt.user_regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_IOPL_RING1;
   9.855 -
   9.856 -	/* FPU is set up to default initial state. */
   9.857 -	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
   9.858 -
   9.859 -	smp_trap_init(ctxt.trap_ctxt);
   9.860 -
   9.861 -	/* No LDT. */
   9.862 -	ctxt.ldt_ents = 0;
   9.863 -
   9.864 -	{
   9.865 -		unsigned long va;
   9.866 -		int f;
   9.867 -
   9.868 -		for (va = cpu_gdt_descr[cpu].address, f = 0;
   9.869 -		     va < cpu_gdt_descr[cpu].address + cpu_gdt_descr[cpu].size;
   9.870 -		     va += PAGE_SIZE, f++) {
   9.871 -			ctxt.gdt_frames[f] = virt_to_mfn(va);
   9.872 -			make_page_readonly((void *)va);
   9.873 -		}
   9.874 -		ctxt.gdt_ents = cpu_gdt_descr[cpu].size / 8;
   9.875 -	}
   9.876 -
   9.877 -	/* Ring 1 stack is the initial stack. */
   9.878 -	ctxt.kernel_ss = __KERNEL_DS;
   9.879 -	ctxt.kernel_sp = idle->thread.esp;
   9.880 -
   9.881 -	/* Callback handlers. */
   9.882 -	ctxt.event_callback_cs     = __KERNEL_CS;
   9.883 -	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
   9.884 -	ctxt.failsafe_callback_cs  = __KERNEL_CS;
   9.885 -	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
   9.886 -
   9.887 -	ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
   9.888 -
   9.889 -	boot_error = HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt);
   9.890 -	if (boot_error)
   9.891 -		printk("boot error: %ld\n", boot_error);
   9.892 -
   9.893 -	if (!boot_error) {
   9.894 -		HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
   9.895 -
   9.896 -		/*
   9.897 -		 * allow APs to start initializing.
   9.898 -		 */
   9.899 -		Dprintk("Before Callout %d.\n", cpu);
   9.900 -		cpu_set(cpu, cpu_callout_map);
   9.901 -		Dprintk("After Callout %d.\n", cpu);
   9.902 -
   9.903 -		/*
   9.904 -		 * Wait 5s total for a response
   9.905 -		 */
   9.906 -		for (timeout = 0; timeout < 50000; timeout++) {
   9.907 -			if (cpu_isset(cpu, cpu_callin_map))
   9.908 -				break;	/* It has booted */
   9.909 -			udelay(100);
   9.910 -		}
   9.911 -
   9.912 -		if (cpu_isset(cpu, cpu_callin_map)) {
   9.913 -			/* number CPUs logically, starting from 1 (BSP is 0) */
   9.914 -			Dprintk("OK.\n");
   9.915 -			printk("CPU%d: ", cpu);
   9.916 -			print_cpu_info(&cpu_data[cpu]);
   9.917 -			Dprintk("CPU has booted.\n");
   9.918 -		} else {
   9.919 -			boot_error= 1;
   9.920 -		}
   9.921 -	}
   9.922 -	x86_cpu_to_apicid[cpu] = apicid;
   9.923 -	if (boot_error) {
   9.924 -		/* Try to put things back the way they were before ... */
   9.925 -		unmap_cpu_to_logical_apicid(cpu);
   9.926 -		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
   9.927 -		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
   9.928 -		cpucount--;
   9.929 -	}
   9.930 -
   9.931 -#else
   9.932 -	Dprintk("Setting warm reset code and vector.\n");
   9.933 -
   9.934 -	store_NMI_vector(&nmi_high, &nmi_low);
   9.935 -
   9.936 -	smpboot_setup_warm_reset_vector(start_eip);
   9.937 -
   9.938 -	/*
   9.939 -	 * Starting actual IPI sequence...
   9.940 -	 */
   9.941 -	boot_error = wakeup_secondary_cpu(apicid, start_eip);
   9.942 -
   9.943 -	if (!boot_error) {
   9.944 -		/*
   9.945 -		 * allow APs to start initializing.
   9.946 -		 */
   9.947 -		Dprintk("Before Callout %d.\n", cpu);
   9.948 -		cpu_set(cpu, cpu_callout_map);
   9.949 -		Dprintk("After Callout %d.\n", cpu);
   9.950 -
   9.951 -		/*
   9.952 -		 * Wait 5s total for a response
   9.953 -		 */
   9.954 -		for (timeout = 0; timeout < 50000; timeout++) {
   9.955 -			if (cpu_isset(cpu, cpu_callin_map))
   9.956 -				break;	/* It has booted */
   9.957 -			udelay(100);
   9.958 -		}
   9.959 -
   9.960 -		if (cpu_isset(cpu, cpu_callin_map)) {
   9.961 -			/* number CPUs logically, starting from 1 (BSP is 0) */
   9.962 -			Dprintk("OK.\n");
   9.963 -			printk("CPU%d: ", cpu);
   9.964 -			print_cpu_info(&cpu_data[cpu]);
   9.965 -			Dprintk("CPU has booted.\n");
   9.966 -		} else {
   9.967 -			boot_error= 1;
   9.968 -			if (*((volatile unsigned char *)trampoline_base)
   9.969 -					== 0xA5)
   9.970 -				/* trampoline started but...? */
   9.971 -				printk("Stuck ??\n");
   9.972 -			else
   9.973 -				/* trampoline code not run */
   9.974 -				printk("Not responding.\n");
   9.975 -			inquire_remote_apic(apicid);
   9.976 -		}
   9.977 -	}
   9.978 -	x86_cpu_to_apicid[cpu] = apicid;
   9.979 -	if (boot_error) {
   9.980 -		/* Try to put things back the way they were before ... */
   9.981 -		unmap_cpu_to_logical_apicid(cpu);
   9.982 -		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
   9.983 -		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
   9.984 -		cpucount--;
   9.985 -	}
   9.986 -
   9.987 -	/* mark "stuck" area as not stuck */
   9.988 -	*((volatile unsigned long *)trampoline_base) = 0;
   9.989 -#endif
   9.990 -
   9.991 -	return boot_error;
   9.992 -}
   9.993 -
   9.994 -static void smp_tune_scheduling (void)
   9.995 -{
   9.996 -	unsigned long cachesize;       /* kB   */
   9.997 -	unsigned long bandwidth = 350; /* MB/s */
   9.998 -	/*
   9.999 -	 * Rough estimation for SMP scheduling, this is the number of
  9.1000 -	 * cycles it takes for a fully memory-limited process to flush
  9.1001 -	 * the SMP-local cache.
  9.1002 -	 *
  9.1003 -	 * (For a P5 this pretty much means we will choose another idle
  9.1004 -	 *  CPU almost always at wakeup time (this is due to the small
  9.1005 -	 *  L1 cache), on PIIs it's around 50-100 usecs, depending on
  9.1006 -	 *  the cache size)
  9.1007 -	 */
  9.1008 -
  9.1009 -	if (!cpu_khz) {
  9.1010 -		/*
  9.1011 -		 * this basically disables processor-affinity
  9.1012 -		 * scheduling on SMP without a TSC.
  9.1013 -		 */
  9.1014 -		return;
  9.1015 -	} else {
  9.1016 -		cachesize = boot_cpu_data.x86_cache_size;
  9.1017 -		if (cachesize == -1) {
  9.1018 -			cachesize = 16; /* Pentiums, 2x8kB cache */
  9.1019 -			bandwidth = 100;
  9.1020 -		}
  9.1021 -	}
  9.1022 -}
  9.1023 -
  9.1024 -/*
  9.1025 - * Cycle through the processors sending APIC IPIs to boot each.
  9.1026 - */
  9.1027 -
  9.1028 -#if 0
  9.1029 -static int boot_cpu_logical_apicid;
  9.1030 -#endif
  9.1031 -/* Where the IO area was mapped on multiquad, always 0 otherwise */
  9.1032 -void *xquad_portio;
  9.1033 -
  9.1034 -cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
  9.1035 -cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
  9.1036 -EXPORT_SYMBOL(cpu_core_map);
  9.1037 -
  9.1038 -static void __init smp_boot_cpus(unsigned int max_cpus)
  9.1039 -{
  9.1040 -	int cpu, kicked;
  9.1041 -	unsigned long bogosum = 0;
  9.1042 -#if 0
  9.1043 -	int apicid, bit;
  9.1044 -#endif
  9.1045 -
  9.1046 -	/*
  9.1047 -	 * Setup boot CPU information
  9.1048 -	 */
  9.1049 -	smp_store_cpu_info(0); /* Final full version of the data */
  9.1050 -	printk("CPU%d: ", 0);
  9.1051 -	print_cpu_info(&cpu_data[0]);
  9.1052 -
  9.1053 -#if 0
  9.1054 -	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
  9.1055 -	boot_cpu_logical_apicid = logical_smp_processor_id();
  9.1056 -	x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
  9.1057 -#else
  9.1058 -	// boot_cpu_physical_apicid = 0;
  9.1059 -	// boot_cpu_logical_apicid = 0;
  9.1060 -	x86_cpu_to_apicid[0] = 0;
  9.1061 -#endif
  9.1062 -
  9.1063 -	current_thread_info()->cpu = 0;
  9.1064 -	smp_tune_scheduling();
  9.1065 -	cpus_clear(cpu_sibling_map[0]);
  9.1066 -	cpu_set(0, cpu_sibling_map[0]);
  9.1067 -
  9.1068 -	cpus_clear(cpu_core_map[0]);
  9.1069 -	cpu_set(0, cpu_core_map[0]);
  9.1070 -
  9.1071 -#ifdef CONFIG_X86_IO_APIC
  9.1072 -	/*
  9.1073 -	 * If we couldn't find an SMP configuration at boot time,
  9.1074 -	 * get out of here now!
  9.1075 -	 */
  9.1076 -	if (!smp_found_config && !acpi_lapic) {
  9.1077 -		printk(KERN_NOTICE "SMP motherboard not detected.\n");
  9.1078 -		smpboot_clear_io_apic_irqs();
  9.1079 -#if 0
  9.1080 -		phys_cpu_present_map = physid_mask_of_physid(0);
  9.1081 -#endif
  9.1082 -#ifdef CONFIG_X86_LOCAL_APIC
  9.1083 -		if (APIC_init_uniprocessor())
  9.1084 -			printk(KERN_NOTICE "Local APIC not detected."
  9.1085 -					   " Using dummy APIC emulation.\n");
  9.1086 -#endif
  9.1087 -		map_cpu_to_logical_apicid();
  9.1088 -		cpu_set(0, cpu_sibling_map[0]);
  9.1089 -		cpu_set(0, cpu_core_map[0]);
  9.1090 -		return;
  9.1091 -	}
  9.1092 -#endif
  9.1093 -
  9.1094 -#if 0
  9.1095 -	/*
  9.1096 -	 * Should not be necessary because the MP table should list the boot
  9.1097 -	 * CPU too, but we do it for the sake of robustness anyway.
  9.1098 -	 * Makes no sense to do this check in clustered apic mode, so skip it
  9.1099 -	 */
  9.1100 -	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
  9.1101 -		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
  9.1102 -				boot_cpu_physical_apicid);
  9.1103 -		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
  9.1104 -	}
  9.1105 -
  9.1106 -	/*
  9.1107 -	 * If we couldn't find a local APIC, then get out of here now!
  9.1108 -	 */
  9.1109 -	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
  9.1110 -		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
  9.1111 -			boot_cpu_physical_apicid);
  9.1112 -		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
  9.1113 -		smpboot_clear_io_apic_irqs();
  9.1114 -		phys_cpu_present_map = physid_mask_of_physid(0);
  9.1115 -		cpu_set(0, cpu_sibling_map[0]);
  9.1116 -		cpu_set(0, cpu_core_map[0]);
  9.1117 -		cpu_set(0, cpu_sibling_map[0]);
  9.1118 -		cpu_set(0, cpu_core_map[0]);
  9.1119 -		return;
  9.1120 -	}
  9.1121 -
  9.1122 -	verify_local_APIC();
  9.1123 -#endif
  9.1124 -
  9.1125 -	/*
  9.1126 -	 * If SMP should be disabled, then really disable it!
  9.1127 -	 */
  9.1128 -	if (!max_cpus) {
  9.1129 -		HYPERVISOR_shared_info->n_vcpu = 1;
  9.1130 -		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
  9.1131 -		smpboot_clear_io_apic_irqs();
  9.1132 -#if 0
  9.1133 -		phys_cpu_present_map = physid_mask_of_physid(0);
  9.1134 -#endif
  9.1135 -		return;
  9.1136 -	}
  9.1137 -
  9.1138 -	smp_intr_init();
  9.1139 -
  9.1140 -#if 0
  9.1141 -	connect_bsp_APIC();
  9.1142 -	setup_local_APIC();
  9.1143 -#endif
  9.1144 -	map_cpu_to_logical_apicid();
  9.1145 -#if 0
  9.1146 -
  9.1147 -
  9.1148 -	setup_portio_remap();
  9.1149 -
  9.1150 -	/*
  9.1151 -	 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
  9.1152 -	 *
  9.1153 -	 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
  9.1154 -	 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 
  9.1155 -	 * clustered apic ID.
  9.1156 -	 */
  9.1157 -	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
  9.1158 -#endif
  9.1159 -	Dprintk("CPU present map: %lx\n",
  9.1160 -		(1UL << HYPERVISOR_shared_info->n_vcpu) - 1);
  9.1161 -
  9.1162 -	kicked = 1;
  9.1163 -	for (cpu = 1; kicked < NR_CPUS &&
  9.1164 -		     cpu < HYPERVISOR_shared_info->n_vcpu; cpu++) {
  9.1165 -		if (max_cpus <= cpucount+1)
  9.1166 -			continue;
  9.1167 -
  9.1168 -#ifdef CONFIG_SMP_ALTERNATIVES
  9.1169 -		if (kicked == 1)
  9.1170 -			prepare_for_smp();
  9.1171 -#endif
  9.1172 -		if (do_boot_cpu(cpu))
  9.1173 -			printk("CPU #%d not responding - cannot use it.\n",
  9.1174 -								cpu);
  9.1175 -		else
  9.1176 -			++kicked;
  9.1177 -	}
  9.1178 -
  9.1179 -#if 0
  9.1180 -	/*
  9.1181 -	 * Cleanup possible dangling ends...
  9.1182 -	 */
  9.1183 -	smpboot_restore_warm_reset_vector();
  9.1184 -#endif
  9.1185 -
  9.1186 -	/*
  9.1187 -	 * Allow the user to impress friends.
  9.1188 -	 */
  9.1189 -	Dprintk("Before bogomips.\n");
  9.1190 -	for (cpu = 0; cpu < NR_CPUS; cpu++)
  9.1191 -		if (cpu_isset(cpu, cpu_callout_map))
  9.1192 -			bogosum += cpu_data[cpu].loops_per_jiffy;
  9.1193 -	printk(KERN_INFO
  9.1194 -		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
  9.1195 -		cpucount+1,
  9.1196 -		bogosum/(500000/HZ),
  9.1197 -		(bogosum/(5000/HZ))%100);
  9.1198 -	
  9.1199 -	Dprintk("Before bogocount - setting activated=1.\n");
  9.1200 -
  9.1201 -	if (smp_b_stepping)
  9.1202 -		printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
  9.1203 -
  9.1204 -	/*
  9.1205 -	 * Don't taint if we are running SMP kernel on a single non-MP
  9.1206 -	 * approved Athlon
  9.1207 -	 */
  9.1208 -	if (tainted & TAINT_UNSAFE_SMP) {
  9.1209 -		if (cpucount)
  9.1210 -			printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
  9.1211 -		else
  9.1212 -			tainted &= ~TAINT_UNSAFE_SMP;
  9.1213 -	}
  9.1214 -
  9.1215 -	Dprintk("Boot done.\n");
  9.1216 -
  9.1217 -	/*
  9.1218 -	 * construct cpu_sibling_map[], so that we can tell sibling CPUs
  9.1219 -	 * efficiently.
  9.1220 -	 */
  9.1221 -	for (cpu = 0; cpu < NR_CPUS; cpu++) {
  9.1222 -		cpus_clear(cpu_sibling_map[cpu]);
  9.1223 -		cpus_clear(cpu_core_map[cpu]);
  9.1224 -	}
  9.1225 -
  9.1226 -	for (cpu = 0; cpu < NR_CPUS; cpu++) {
  9.1227 -		struct cpuinfo_x86 *c = cpu_data + cpu;
  9.1228 -		int siblings = 0;
  9.1229 -		int i;
  9.1230 -		if (!cpu_isset(cpu, cpu_callout_map))
  9.1231 -			continue;
  9.1232 -
  9.1233 -		if (smp_num_siblings > 1) {
  9.1234 -			for (i = 0; i < NR_CPUS; i++) {
  9.1235 -				if (!cpu_isset(i, cpu_callout_map))
  9.1236 -					continue;
  9.1237 -				if (cpu_core_id[cpu] == cpu_core_id[i]) {
  9.1238 -					siblings++;
  9.1239 -					cpu_set(i, cpu_sibling_map[cpu]);
  9.1240 -				}
  9.1241 -			}
  9.1242 -		} else {
  9.1243 -			siblings++;
  9.1244 -			cpu_set(cpu, cpu_sibling_map[cpu]);
  9.1245 -		}
  9.1246 -
  9.1247 -		if (siblings != smp_num_siblings) {
  9.1248 -			printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
  9.1249 -			smp_num_siblings = siblings;
  9.1250 -		}
  9.1251 -
  9.1252 -		if (c->x86_num_cores > 1) {
  9.1253 -			for (i = 0; i < NR_CPUS; i++) {
  9.1254 -				if (!cpu_isset(i, cpu_callout_map))
  9.1255 -					continue;
  9.1256 -				if (phys_proc_id[cpu] == phys_proc_id[i]) {
  9.1257 -					cpu_set(i, cpu_core_map[cpu]);
  9.1258 -				}
  9.1259 -			}
  9.1260 -		} else {
  9.1261 -			cpu_core_map[cpu] = cpu_sibling_map[cpu];
  9.1262 -		}
  9.1263 -	}
  9.1264 -
  9.1265 -	smpboot_setup_io_apic();
  9.1266 -
  9.1267 -#if 0
  9.1268 -	setup_boot_APIC_clock();
  9.1269 -
  9.1270 -	/*
  9.1271 -	 * Synchronize the TSC with the AP
  9.1272 -	 */
  9.1273 -	if (cpu_has_tsc && cpucount && cpu_khz)
  9.1274 -		synchronize_tsc_bp();
  9.1275 -#endif
  9.1276 -}
  9.1277 -
  9.1278 -/* These are wrappers to interface to the new boot process.  Someone
  9.1279 -   who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
  9.1280 -void __init smp_prepare_cpus(unsigned int max_cpus)
  9.1281 -{
  9.1282 -	smp_commenced_mask = cpumask_of_cpu(0);
  9.1283 -	cpu_callin_map = cpumask_of_cpu(0);
  9.1284 -	mb();
  9.1285 -	smp_boot_cpus(max_cpus);
  9.1286 -}
  9.1287 -
  9.1288 -void __devinit smp_prepare_boot_cpu(void)
  9.1289 -{
  9.1290 -	cpu_set(smp_processor_id(), cpu_online_map);
  9.1291 -	cpu_set(smp_processor_id(), cpu_callout_map);
  9.1292 -}
  9.1293 -
  9.1294 -#ifdef CONFIG_HOTPLUG_CPU
  9.1295 -#include <asm-xen/xenbus.h>
  9.1296 -/* hotplug down/up funtion pointer and target vcpu */
  9.1297 -struct vcpu_hotplug_handler_t {
  9.1298 -	void (*fn) (int vcpu);
  9.1299 -	u32 vcpu;
  9.1300 -};
  9.1301 -static struct vcpu_hotplug_handler_t vcpu_hotplug_handler;
  9.1302 -
  9.1303 -static int vcpu_hotplug_cpu_process(void *unused)
  9.1304 -{
  9.1305 -	struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler;
  9.1306 -
  9.1307 -	if (handler->fn) {
  9.1308 -		(*(handler->fn)) (handler->vcpu);
  9.1309 -		handler->fn = NULL;
  9.1310 -	}
  9.1311 -	return 0;
  9.1312 -}
  9.1313 -
  9.1314 -static void __vcpu_hotplug_handler(void *unused)
  9.1315 -{
  9.1316 -	int err;
  9.1317 -
  9.1318 -	err = kernel_thread(vcpu_hotplug_cpu_process,
  9.1319 -			    NULL, CLONE_FS | CLONE_FILES);
  9.1320 -	if (err < 0)
  9.1321 -		printk(KERN_ALERT "Error creating hotplug_cpu process!\n");
  9.1322 -}
  9.1323 -
  9.1324 -static void handle_vcpu_hotplug_event(struct xenbus_watch *, const char *);
  9.1325 -static struct notifier_block xsn_cpu;
  9.1326 -
  9.1327 -/* xenbus watch struct */
  9.1328 -static struct xenbus_watch cpu_watch = {
  9.1329 -	.node = "cpu",
  9.1330 -	.callback = handle_vcpu_hotplug_event
  9.1331 -};
  9.1332 -
  9.1333 -static int setup_cpu_watcher(struct notifier_block *notifier,
  9.1334 -			      unsigned long event, void *data)
  9.1335 -{
  9.1336 -	int err;
  9.1337 -
  9.1338 -	err = register_xenbus_watch(&cpu_watch);
  9.1339 -	if (err)
  9.1340 -		printk("Failed to register watch on /cpu\n");
  9.1341 -
  9.1342 -	return NOTIFY_DONE;
  9.1343 -}
  9.1344 -
  9.1345 -static void handle_vcpu_hotplug_event(struct xenbus_watch *watch, const char *node)
  9.1346 -{
  9.1347 -	static DECLARE_WORK(vcpu_hotplug_work, __vcpu_hotplug_handler, NULL);
  9.1348 -	struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler;
  9.1349 -	ssize_t ret;
  9.1350 -	int err, cpu;
  9.1351 -	char state[8];
  9.1352 -	char dir[32];
  9.1353 -	char *cpustr;
  9.1354 -
  9.1355 -	/* get a pointer to start of cpu string */
  9.1356 -	if ((cpustr = strstr(node, "cpu/")) != NULL) {
  9.1357 -
  9.1358 -		/* find which cpu state changed, note vcpu for handler */
  9.1359 -		sscanf(cpustr, "cpu/%d", &cpu);
  9.1360 -		handler->vcpu = cpu;
  9.1361 -
  9.1362 -		/* calc the dir for xenbus read */
  9.1363 -		sprintf(dir, "cpu/%d", cpu);
  9.1364 -
  9.1365 -		/* make sure watch that was triggered is changes to the correct key */
  9.1366 -		if ((strcmp(node + strlen(dir), "/availability")) != 0)
  9.1367 -			return;
  9.1368 -
  9.1369 -		/* get the state value */
  9.1370 -		err = xenbus_scanf(NULL, dir, "availability", "%s", state);
  9.1371 -
  9.1372 -		if (err != 1) {
  9.1373 -			printk(KERN_ERR
  9.1374 -			       "XENBUS: Unable to read cpu state\n");
  9.1375 -			return;
  9.1376 -		}
  9.1377 -
  9.1378 -		/* if we detect a state change, take action */
  9.1379 -		if (strcmp(state, "online") == 0) {
  9.1380 -			/* offline -> online */
  9.1381 -			if (!cpu_isset(cpu, cpu_online_map)) {
  9.1382 -				handler->fn = (void *)&cpu_up;
  9.1383 -				ret = schedule_work(&vcpu_hotplug_work);
  9.1384 -			} 
  9.1385 -		} else if (strcmp(state, "offline") == 0) {
  9.1386 -			/* online -> offline */
  9.1387 -			if (cpu_isset(cpu, cpu_online_map)) {
  9.1388 -				handler->fn = (void *)&cpu_down;
  9.1389 -				ret = schedule_work(&vcpu_hotplug_work);
  9.1390 -			} 
  9.1391 -		} else {
  9.1392 -			printk(KERN_ERR
  9.1393 -			       "XENBUS: unknown state(%s) on node(%s)\n", state,
  9.1394 -			       node);
  9.1395 -		}
  9.1396 -	}
  9.1397 -	return;
  9.1398 -}
  9.1399 -
  9.1400 -static int __init setup_vcpu_hotplug_event(void)
  9.1401 -{
  9.1402 -	xsn_cpu.notifier_call = setup_cpu_watcher;
  9.1403 -
  9.1404 -	register_xenstore_notifier(&xsn_cpu);
  9.1405 -
  9.1406 -	return 0;
  9.1407 -}
  9.1408 -
  9.1409 -subsys_initcall(setup_vcpu_hotplug_event);
  9.1410 -
  9.1411 -/* must be called with the cpucontrol mutex held */
  9.1412 -static int __devinit cpu_enable(unsigned int cpu)
  9.1413 -{
  9.1414 -#ifdef CONFIG_SMP_ALTERNATIVES
  9.1415 -	if (num_online_cpus() == 1)
  9.1416 -		prepare_for_smp();
  9.1417 -#endif
  9.1418 -
  9.1419 -	/* get the target out of its holding state */
  9.1420 -	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
  9.1421 -	wmb();
  9.1422 -
  9.1423 -	/* wait for the processor to ack it. timeout? */
  9.1424 -	while (!cpu_online(cpu))
  9.1425 -		cpu_relax();
  9.1426 -
  9.1427 -	fixup_irqs(cpu_online_map);
  9.1428 -
  9.1429 -	/* counter the disable in fixup_irqs() */
  9.1430 -	local_irq_enable();
  9.1431 -	return 0;
  9.1432 -}
  9.1433 -
  9.1434 -int __cpu_disable(void)
  9.1435 -{
  9.1436 -	cpumask_t map = cpu_online_map;
  9.1437 -	int cpu = smp_processor_id();
  9.1438 -
  9.1439 -	/*
  9.1440 -	 * Perhaps use cpufreq to drop frequency, but that could go
  9.1441 -	 * into generic code.
  9.1442 - 	 *
  9.1443 -	 * We won't take down the boot processor on i386 due to some
  9.1444 -	 * interrupts only being able to be serviced by the BSP.
  9.1445 -	 * Especially so if we're not using an IOAPIC	-zwane
  9.1446 -	 */
  9.1447 -	if (cpu == 0)
  9.1448 -		return -EBUSY;
  9.1449 -
  9.1450 -	cpu_clear(cpu, map);
  9.1451 -	fixup_irqs(map);
  9.1452 -
  9.1453 -	/* It's now safe to remove this processor from the online map */
  9.1454 -	cpu_clear(cpu, cpu_online_map);
  9.1455 -
  9.1456 -#ifdef CONFIG_SMP_ALTERNATIVES
  9.1457 -	if (num_online_cpus() == 1)
  9.1458 -		unprepare_for_smp();
  9.1459 -#endif
  9.1460 -
  9.1461 -	return 0;
  9.1462 -}
  9.1463 -
  9.1464 -void __cpu_die(unsigned int cpu)
  9.1465 -{
  9.1466 -	/* We don't do anything here: idle task is faking death itself. */
  9.1467 -	unsigned int i;
  9.1468 -
  9.1469 -	for (i = 0; i < 10; i++) {
  9.1470 -		/* They ack this in play_dead by setting CPU_DEAD */
  9.1471 -		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
  9.1472 -			return;
  9.1473 -		current->state = TASK_UNINTERRUPTIBLE;
  9.1474 -		schedule_timeout(HZ/10);
  9.1475 -	}
  9.1476 - 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
  9.1477 -}
  9.1478 -
  9.1479 -#else /* ... !CONFIG_HOTPLUG_CPU */
  9.1480 -int __cpu_disable(void)
  9.1481 -{
  9.1482 -	return -ENOSYS;
  9.1483 -}
  9.1484 -
  9.1485 -void __cpu_die(unsigned int cpu)
  9.1486 -{
  9.1487 -	/* We said "no" in __cpu_disable */
  9.1488 -	BUG();
  9.1489 -}
  9.1490 -#endif /* CONFIG_HOTPLUG_CPU */
  9.1491 -
  9.1492 -int __devinit __cpu_up(unsigned int cpu)
  9.1493 -{
  9.1494 -	/* In case one didn't come up */
  9.1495 -	if (!cpu_isset(cpu, cpu_callin_map)) {
  9.1496 -		printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
  9.1497 -		local_irq_enable();
  9.1498 -		return -EIO;
  9.1499 -	}
  9.1500 -
  9.1501 -#ifdef CONFIG_HOTPLUG_CPU
  9.1502 -#ifdef CONFIG_XEN
  9.1503 -	/* Tell hypervisor to bring vcpu up. */
  9.1504 -	HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
  9.1505 -#endif
  9.1506 -	/* Already up, and in cpu_quiescent now? */
  9.1507 -	if (cpu_isset(cpu, smp_commenced_mask)) {
  9.1508 -		cpu_enable(cpu);
  9.1509 -		return 0;
  9.1510 -	}
  9.1511 -#endif
  9.1512 -
  9.1513 -	local_irq_enable();
  9.1514 -	/* Unleash the CPU! */
  9.1515 -	cpu_set(cpu, smp_commenced_mask);
  9.1516 -	while (!cpu_isset(cpu, cpu_online_map))
  9.1517 -		mb();
  9.1518 -	return 0;
  9.1519 -}
  9.1520 -
  9.1521 -void __init smp_cpus_done(unsigned int max_cpus)
  9.1522 -{
  9.1523 -#if 1
  9.1524 -#else
  9.1525 -#ifdef CONFIG_X86_IO_APIC
  9.1526 -	setup_ioapic_dest();
  9.1527 -#endif
  9.1528 -	zap_low_mappings();
  9.1529 -	/*
  9.1530 -	 * Disable executability of the SMP trampoline:
  9.1531 -	 */
  9.1532 -	set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
  9.1533 -#endif
  9.1534 -}
  9.1535 -
  9.1536 -extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
  9.1537 -extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
  9.1538 -
  9.1539 -void smp_intr_init(void)
  9.1540 -{
  9.1541 -	int cpu = smp_processor_id();
  9.1542 -
  9.1543 -	per_cpu(resched_irq, cpu) =
  9.1544 -		bind_ipi_to_irq(RESCHEDULE_VECTOR);
  9.1545 -	sprintf(resched_name[cpu], "resched%d", cpu);
  9.1546 -	BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
  9.1547 -	                   SA_INTERRUPT, resched_name[cpu], NULL));
  9.1548 -
  9.1549 -	per_cpu(callfunc_irq, cpu) =
  9.1550 -		bind_ipi_to_irq(CALL_FUNCTION_VECTOR);
  9.1551 -	sprintf(callfunc_name[cpu], "callfunc%d", cpu);
  9.1552 -	BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
  9.1553 -	                   smp_call_function_interrupt,
  9.1554 -	                   SA_INTERRUPT, callfunc_name[cpu], NULL));
  9.1555 -}
  9.1556 -
  9.1557 -static void smp_intr_exit(void)
  9.1558 -{
  9.1559 -	int cpu = smp_processor_id();
  9.1560 -
  9.1561 -	free_irq(per_cpu(resched_irq, cpu), NULL);
  9.1562 -	unbind_ipi_from_irq(RESCHEDULE_VECTOR);
  9.1563 -
  9.1564 -	free_irq(per_cpu(callfunc_irq, cpu), NULL);
  9.1565 -	unbind_ipi_from_irq(CALL_FUNCTION_VECTOR);
  9.1566 -}
  9.1567 -
  9.1568 -extern void local_setup_timer_irq(void);
  9.1569 -extern void local_teardown_timer_irq(void);
  9.1570 -
  9.1571 -void smp_suspend(void)
  9.1572 -{
  9.1573 -	local_teardown_timer_irq();
  9.1574 -	smp_intr_exit();
  9.1575 -}
  9.1576 -
  9.1577 -void smp_resume(void)
  9.1578 -{
  9.1579 -	smp_intr_init();
  9.1580 -	local_setup_timer();
  9.1581 -}
  9.1582 -
  9.1583 -void vcpu_prepare(int vcpu)
  9.1584 -{
  9.1585 -	extern void hypervisor_callback(void);
  9.1586 -	extern void failsafe_callback(void);
  9.1587 -	extern void smp_trap_init(trap_info_t *);
  9.1588 -	extern void cpu_restore(void);
  9.1589 -	vcpu_guest_context_t ctxt;
  9.1590 -	struct task_struct *idle = idle_task(vcpu);
  9.1591 -
  9.1592 -	if (vcpu == 0)
  9.1593 -		return;
  9.1594 -
  9.1595 -	memset(&ctxt, 0, sizeof(ctxt));
  9.1596 -
  9.1597 -	ctxt.user_regs.ds = __USER_DS;
  9.1598 -	ctxt.user_regs.es = __USER_DS;
  9.1599 -	ctxt.user_regs.fs = 0;
  9.1600 -	ctxt.user_regs.gs = 0;
  9.1601 -	ctxt.user_regs.ss = __KERNEL_DS;
  9.1602 -	ctxt.user_regs.cs = __KERNEL_CS;
  9.1603 -	ctxt.user_regs.eip = (unsigned long)cpu_restore;
  9.1604 -	ctxt.user_regs.esp = idle->thread.esp;
  9.1605 -	ctxt.user_regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_IOPL_RING1;
  9.1606 -
  9.1607 -	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
  9.1608 -
  9.1609 -	smp_trap_init(ctxt.trap_ctxt);
  9.1610 -
  9.1611 -	ctxt.ldt_ents = 0;
  9.1612 -
  9.1613 -	ctxt.gdt_frames[0] = virt_to_mfn(cpu_gdt_descr[vcpu].address);
  9.1614 -	ctxt.gdt_ents      = cpu_gdt_descr[vcpu].size / 8;
  9.1615 -
  9.1616 -	ctxt.kernel_ss = __KERNEL_DS;
  9.1617 -	ctxt.kernel_sp = idle->thread.esp0;
  9.1618 -
  9.1619 -	ctxt.event_callback_cs     = __KERNEL_CS;
  9.1620 -	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
  9.1621 -	ctxt.failsafe_callback_cs  = __KERNEL_CS;
  9.1622 -	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
  9.1623 -
  9.1624 -	ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
  9.1625 -
  9.1626 -	(void)HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt);
  9.1627 -	(void)HYPERVISOR_vcpu_op(VCPUOP_up, vcpu, NULL);
  9.1628 -}
    10.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c	Fri Oct 21 13:58:39 2005 -0600
    10.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c	Mon Oct 24 09:08:13 2005 -0600
    10.3 @@ -122,7 +122,8 @@ static u32 shadow_tv_version;
    10.4  static u64 processed_system_time;   /* System time (ns) at last processing. */
    10.5  static DEFINE_PER_CPU(u64, processed_system_time);
    10.6  
    10.7 -#define NS_PER_TICK (1000000000ULL/HZ)
    10.8 +/* Must be signed, as it's compared with s64 quantities which can be -ve. */
    10.9 +#define NS_PER_TICK (1000000000LL/HZ)
   10.10  
   10.11  static inline void __normalize_time(time_t *sec, s64 *nsec)
   10.12  {
   10.13 @@ -235,9 +236,9 @@ static void __update_wallclock(time_t se
   10.14  
   10.15  	/* Adjust wall-clock time base based on wall_jiffies ticks. */
   10.16  	wc_nsec = processed_system_time;
   10.17 -	wc_nsec += (u64)sec * 1000000000ULL;
   10.18 -	wc_nsec += (u64)nsec;
   10.19 -	wc_nsec -= (jiffies - wall_jiffies) * (u64)(NSEC_PER_SEC / HZ);
   10.20 +	wc_nsec += sec * (u64)NSEC_PER_SEC;
   10.21 +	wc_nsec += nsec;
   10.22 +	wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
   10.23  
   10.24  	/* Split wallclock base into seconds and nanoseconds. */
   10.25  	tmp = wc_nsec;
   10.26 @@ -437,7 +438,7 @@ int do_settimeofday(struct timespec *tv)
   10.27  	 * be stale, so we can retry with fresh ones.
   10.28  	 */
   10.29  	for ( ; ; ) {
   10.30 -		nsec = (s64)tv->tv_nsec - (s64)get_nsec_offset(shadow);
   10.31 +		nsec = tv->tv_nsec - get_nsec_offset(shadow);
   10.32  		if (time_values_up_to_date(cpu))
   10.33  			break;
   10.34  		get_time_values_from_xen();
   10.35 @@ -558,7 +559,7 @@ irqreturn_t timer_interrupt(int irq, voi
   10.36  	}
   10.37  	while (!time_values_up_to_date(cpu));
   10.38  
   10.39 -	if (unlikely(delta < (s64)-1000000) || unlikely(delta_cpu < 0)) {
   10.40 +	if (unlikely(delta < -1000000LL) || unlikely(delta_cpu < 0)) {
   10.41  		printk("Timer ISR/%d: Time went backwards: "
   10.42  		       "delta=%lld cpu_delta=%lld shadow=%lld "
   10.43  		       "off=%lld processed=%lld cpu_processed=%lld\n",
   10.44 @@ -784,7 +785,7 @@ void __init time_init(void)
   10.45  	rdtscll(vxtime.last_tsc);
   10.46  #endif
   10.47  
   10.48 -	per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER);
   10.49 +	per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER, 0);
   10.50  	(void)setup_irq(per_cpu(timer_irq, 0), &irq_timer);
   10.51  }
   10.52  
   10.53 @@ -802,7 +803,7 @@ static inline u64 jiffies_to_st(unsigned
   10.54  		 * but that's ok: we'll just end up with a shorter timeout. */
   10.55  		if (delta < 1) 
   10.56  			delta = 1;
   10.57 -		st = processed_system_time + ((u64)delta * NS_PER_TICK);
   10.58 +		st = processed_system_time + (delta * (u64)NS_PER_TICK);
   10.59  	} while (read_seqretry(&xtime_lock, seq));
   10.60  
   10.61  	return st;
   10.62 @@ -851,21 +852,12 @@ void time_resume(void)
   10.63  
   10.64  #ifdef CONFIG_SMP
   10.65  static char timer_name[NR_CPUS][15];
   10.66 -void local_setup_timer_irq(void)
   10.67 -{
   10.68 -	int cpu = smp_processor_id();
   10.69  
   10.70 -	if (cpu == 0)
   10.71 -		return;
   10.72 -	per_cpu(timer_irq, cpu) = bind_virq_to_irq(VIRQ_TIMER);
   10.73 -	sprintf(timer_name[cpu], "timer%d", cpu);
   10.74 -	BUG_ON(request_irq(per_cpu(timer_irq, cpu), timer_interrupt,
   10.75 -	                   SA_INTERRUPT, timer_name[cpu], NULL));
   10.76 -}
   10.77 +void local_setup_timer(unsigned int cpu)
   10.78 +{
   10.79 +	int seq;
   10.80  
   10.81 -void local_setup_timer(void)
   10.82 -{
   10.83 -	int seq, cpu = smp_processor_id();
   10.84 +	BUG_ON(cpu == 0);
   10.85  
   10.86  	do {
   10.87  		seq = read_seqbegin(&xtime_lock);
   10.88 @@ -873,17 +865,17 @@ void local_setup_timer(void)
   10.89  			per_cpu(shadow_time, cpu).system_timestamp;
   10.90  	} while (read_seqretry(&xtime_lock, seq));
   10.91  
   10.92 -	local_setup_timer_irq();
   10.93 +	per_cpu(timer_irq, cpu) = bind_virq_to_irq(VIRQ_TIMER, cpu);
   10.94 +	sprintf(timer_name[cpu], "timer%d", cpu);
   10.95 +	BUG_ON(request_irq(per_cpu(timer_irq, cpu), timer_interrupt,
   10.96 +	                   SA_INTERRUPT, timer_name[cpu], NULL));
   10.97  }
   10.98  
   10.99 -void local_teardown_timer_irq(void)
  10.100 +void local_teardown_timer(unsigned int cpu)
  10.101  {
  10.102 -	int cpu = smp_processor_id();
  10.103 -
  10.104 -	if (cpu == 0)
  10.105 -		return;
  10.106 +	BUG_ON(cpu == 0);
  10.107  	free_irq(per_cpu(timer_irq, cpu), NULL);
  10.108 -	unbind_virq_from_irq(VIRQ_TIMER);
  10.109 +	unbind_virq_from_irq(VIRQ_TIMER, cpu);
  10.110  }
  10.111  #endif
  10.112  
    11.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c	Fri Oct 21 13:58:39 2005 -0600
    11.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c	Mon Oct 24 09:08:13 2005 -0600
    11.3 @@ -123,9 +123,18 @@ int direct_remap_pfn_range(struct vm_are
    11.4  	return __direct_remap_pfn_range(
    11.5  		vma->vm_mm, address, mfn, size, prot, domid);
    11.6  }
    11.7 -
    11.8  EXPORT_SYMBOL(direct_remap_pfn_range);
    11.9  
   11.10 +int direct_kernel_remap_pfn_range(unsigned long address, 
   11.11 +				  unsigned long mfn,
   11.12 +				  unsigned long size, 
   11.13 +				  pgprot_t prot,
   11.14 +				  domid_t  domid)
   11.15 +{
   11.16 +	return __direct_remap_pfn_range(
   11.17 +		&init_mm, address, mfn, size, prot, domid);
   11.18 +}
   11.19 +EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
   11.20  
   11.21  /* FIXME: This is horribly broken on PAE */ 
   11.22  static int lookup_pte_fn(
    12.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/Makefile	Fri Oct 21 13:58:39 2005 -0600
    12.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/Makefile	Mon Oct 24 09:08:13 2005 -0600
    12.3 @@ -15,4 +15,4 @@ obj-y   := evtchn.o fixup.o reboot.o gnt
    12.4  
    12.5  obj-$(CONFIG_PROC_FS) += xen_proc.o
    12.6  obj-$(CONFIG_NET)     += skbuff.o
    12.7 -obj-$(CONFIG_SMP)     += smp.o
    12.8 +obj-$(CONFIG_SMP)     += smp.o smpboot.o
    13.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Fri Oct 21 13:58:39 2005 -0600
    13.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Mon Oct 24 09:08:13 2005 -0600
    13.3 @@ -42,6 +42,7 @@
    13.4  #include <asm-xen/xen-public/physdev.h>
    13.5  #include <asm/hypervisor.h>
    13.6  #include <asm-xen/evtchn.h>
    13.7 +#include <linux/mc146818rtc.h> /* RTC_IRQ */
    13.8  
    13.9  /*
   13.10   * This lock protects updates to the following mapping and reference-count
   13.11 @@ -70,8 +71,8 @@ static unsigned long pirq_needs_unmask_n
   13.12  
   13.13  #ifdef CONFIG_SMP
   13.14  
   13.15 -static u8  cpu_evtchn[NR_EVENT_CHANNELS];
   13.16 -static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
   13.17 +static u8 cpu_evtchn[NR_EVENT_CHANNELS];
   13.18 +static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
   13.19  
   13.20  #define active_evtchns(cpu,sh,idx)		\
   13.21  	((sh)->evtchn_pending[idx] &		\
   13.22 @@ -136,7 +137,7 @@ EXPORT_SYMBOL(force_evtchn_callback);
   13.23  /* NB. Interrupts are disabled on entry. */
   13.24  asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
   13.25  {
   13.26 -	u32     l1, l2;
   13.27 +	unsigned long  l1, l2;
   13.28  	unsigned int   l1i, l2i, port;
   13.29  	int            irq, cpu = smp_processor_id();
   13.30  	shared_info_t *s = HYPERVISOR_shared_info;
   13.31 @@ -148,13 +149,13 @@ asmlinkage void evtchn_do_upcall(struct 
   13.32  	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
   13.33  	while (l1 != 0) {
   13.34  		l1i = __ffs(l1);
   13.35 -		l1 &= ~(1 << l1i);
   13.36 +		l1 &= ~(1UL << l1i);
   13.37          
   13.38  		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
   13.39  			l2i = __ffs(l2);
   13.40 -			l2 &= ~(1 << l2i);
   13.41 +			l2 &= ~(1UL << l2i);
   13.42              
   13.43 -			port = (l1i << 5) + l2i;
   13.44 +			port = (l1i * BITS_PER_LONG) + l2i;
   13.45  			if ((irq = evtchn_to_irq[port]) != -1)
   13.46  				do_IRQ(irq, regs);
   13.47  			else
   13.48 @@ -178,11 +179,10 @@ static int find_unbound_irq(void)
   13.49  	return irq;
   13.50  }
   13.51  
   13.52 -int bind_virq_to_irq(int virq)
   13.53 +int bind_virq_to_irq(int virq, int cpu)
   13.54  {
   13.55  	evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq };
   13.56  	int evtchn, irq;
   13.57 -	int cpu = smp_processor_id();
   13.58  
   13.59  	spin_lock(&irq_mapping_update_lock);
   13.60  
   13.61 @@ -209,10 +209,9 @@ int bind_virq_to_irq(int virq)
   13.62  }
   13.63  EXPORT_SYMBOL(bind_virq_to_irq);
   13.64  
   13.65 -void unbind_virq_from_irq(int virq)
   13.66 +void unbind_virq_from_irq(int virq, int cpu)
   13.67  {
   13.68  	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   13.69 -	int cpu    = smp_processor_id();
   13.70  	int irq    = per_cpu(virq_to_irq, cpu)[virq];
   13.71  	int evtchn = irq_to_evtchn[irq];
   13.72  
   13.73 @@ -240,11 +239,10 @@ void unbind_virq_from_irq(int virq)
   13.74  }
   13.75  EXPORT_SYMBOL(unbind_virq_from_irq);
   13.76  
   13.77 -int bind_ipi_to_irq(int ipi)
   13.78 +int bind_ipi_to_irq(int ipi, int cpu)
   13.79  {
   13.80  	evtchn_op_t op = { .cmd = EVTCHNOP_bind_ipi };
   13.81  	int evtchn, irq;
   13.82 -	int cpu = smp_processor_id();
   13.83  
   13.84  	spin_lock(&irq_mapping_update_lock);
   13.85  
   13.86 @@ -272,10 +270,9 @@ int bind_ipi_to_irq(int ipi)
   13.87  }
   13.88  EXPORT_SYMBOL(bind_ipi_to_irq);
   13.89  
   13.90 -void unbind_ipi_from_irq(int ipi)
   13.91 +void unbind_ipi_from_irq(int ipi, int cpu)
   13.92  {
   13.93  	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   13.94 -	int cpu    = smp_processor_id();
   13.95  	int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
   13.96  	int irq    = evtchn_to_irq[evtchn];
   13.97  
   13.98 @@ -748,6 +745,13 @@ void __init init_IRQ(void)
   13.99  	{
  13.100  		irq_bindcount[pirq_to_irq(i)] = 1;
  13.101  
  13.102 +#ifdef RTC_IRQ
  13.103 +		/* If not domain 0, force our RTC driver to fail its probe. */
  13.104 +		if ((i == RTC_IRQ) &&
  13.105 +		    !(xen_start_info->flags & SIF_INITDOMAIN))
  13.106 +			continue;
  13.107 +#endif
  13.108 +
  13.109  		irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
  13.110  		irq_desc[pirq_to_irq(i)].action  = 0;
  13.111  		irq_desc[pirq_to_irq(i)].depth   = 1;
    14.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Fri Oct 21 13:58:39 2005 -0600
    14.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Mon Oct 24 09:08:13 2005 -0600
    14.3 @@ -26,7 +26,6 @@
    14.4  // the distinction when we return the reason code to them.
    14.5  #define SHUTDOWN_HALT      4
    14.6  
    14.7 -
    14.8  void machine_restart(char * __unused)
    14.9  {
   14.10  	/* We really want to get pending console data out before we die. */
   14.11 @@ -60,6 +59,8 @@ EXPORT_SYMBOL(machine_power_off);
   14.12  
   14.13  /* Ignore multiple shutdown requests. */
   14.14  static int shutting_down = SHUTDOWN_INVALID;
   14.15 +static void __shutdown_handler(void *unused);
   14.16 +static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
   14.17  
   14.18  #ifndef CONFIG_HOTPLUG_CPU
   14.19  #define cpu_down(x) (-EOPNOTSUPP)
   14.20 @@ -243,41 +244,47 @@ static int shutdown_process(void *__unus
   14.21  	return 0;
   14.22  }
   14.23  
   14.24 -static struct task_struct *kthread_create_on_cpu(int (*f)(void *arg),
   14.25 -						 void *arg,
   14.26 -						 const char *name,
   14.27 -						 int cpu)
   14.28 +static int kthread_create_on_cpu(int (*f)(void *arg),
   14.29 +				 void *arg,
   14.30 +				 const char *name,
   14.31 +				 int cpu)
   14.32  {
   14.33  	struct task_struct *p;
   14.34  	p = kthread_create(f, arg, name);
   14.35 +	if (IS_ERR(p))
   14.36 +		return PTR_ERR(p);
   14.37  	kthread_bind(p, cpu);
   14.38  	wake_up_process(p);
   14.39 -	return p;
   14.40 +	return 0;
   14.41  }
   14.42  
   14.43  static void __shutdown_handler(void *unused)
   14.44  {
   14.45  	int err;
   14.46  
   14.47 -	if (shutting_down != SHUTDOWN_SUSPEND) {
   14.48 +	if (shutting_down != SHUTDOWN_SUSPEND)
   14.49  		err = kernel_thread(shutdown_process, NULL,
   14.50  				    CLONE_FS | CLONE_FILES);
   14.51 -		if ( err < 0 )
   14.52 -			printk(KERN_ALERT "Error creating shutdown "
   14.53 -			       "process!\n");
   14.54 -	} else {
   14.55 -		kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
   14.56 +	else
   14.57 +		err = kthread_create_on_cpu(__do_suspend, NULL, "suspend", 0);
   14.58 +
   14.59 +	if ( err < 0 ) {
   14.60 +		printk(KERN_WARNING "Error creating shutdown process (%d): "
   14.61 +		       "retrying...\n", -err);
   14.62 +		schedule_delayed_work(&shutdown_work, HZ/2);
   14.63  	}
   14.64  }
   14.65  
   14.66  static void shutdown_handler(struct xenbus_watch *watch,
   14.67  			     const char **vec, unsigned int len)
   14.68  {
   14.69 -	static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
   14.70  	char *str;
   14.71  	struct xenbus_transaction *xbt;
   14.72  	int err;
   14.73  
   14.74 +	if (shutting_down != SHUTDOWN_INVALID)
   14.75 +		goto out;
   14.76 +
   14.77   again:
   14.78  	xbt = xenbus_transaction_start();
   14.79  	if (IS_ERR(xbt))
   14.80 @@ -312,6 +319,7 @@ static void shutdown_handler(struct xenb
   14.81  
   14.82  	kfree(str);
   14.83  
   14.84 + out:
   14.85  	if (shutting_down != SHUTDOWN_INVALID)
   14.86  		schedule_work(&shutdown_work);
   14.87  }
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c	Mon Oct 24 09:08:13 2005 -0600
    15.3 @@ -0,0 +1,417 @@
    15.4 +/*
    15.5 + *	Xen SMP booting functions
    15.6 + *
    15.7 + *	See arch/i386/kernel/smpboot.c for copyright and credits for derived
    15.8 + *	portions of this file.
    15.9 + */
   15.10 +
   15.11 +#include <linux/module.h>
   15.12 +#include <linux/config.h>
   15.13 +#include <linux/init.h>
   15.14 +#include <linux/kernel.h>
   15.15 +#include <linux/mm.h>
   15.16 +#include <linux/sched.h>
   15.17 +#include <linux/kernel_stat.h>
   15.18 +#include <linux/smp_lock.h>
   15.19 +#include <linux/irq.h>
   15.20 +#include <linux/bootmem.h>
   15.21 +#include <linux/notifier.h>
   15.22 +#include <linux/cpu.h>
   15.23 +#include <linux/percpu.h>
   15.24 +#include <asm/desc.h>
   15.25 +#include <asm/arch_hooks.h>
   15.26 +#include <asm/pgalloc.h>
   15.27 +#include <asm-xen/evtchn.h>
   15.28 +#include <asm-xen/xen-public/vcpu.h>
   15.29 +#include <asm-xen/xenbus.h>
   15.30 +
   15.31 +#ifdef CONFIG_SMP_ALTERNATIVES
   15.32 +#include <asm/smp_alt.h>
   15.33 +#endif
   15.34 +
   15.35 +extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
   15.36 +extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
   15.37 +
   15.38 +extern void local_setup_timer(unsigned int cpu);
   15.39 +extern void local_teardown_timer(unsigned int cpu);
   15.40 +
   15.41 +extern void hypervisor_callback(void);
   15.42 +extern void failsafe_callback(void);
   15.43 +extern void system_call(void);
   15.44 +extern void smp_trap_init(trap_info_t *);
   15.45 +
   15.46 +extern cpumask_t cpu_initialized;
   15.47 +
   15.48 +/* Number of siblings per CPU package */
   15.49 +int smp_num_siblings = 1;
   15.50 +int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
   15.51 +EXPORT_SYMBOL(phys_proc_id);
   15.52 +int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
   15.53 +EXPORT_SYMBOL(cpu_core_id);
   15.54 +
   15.55 +cpumask_t cpu_online_map;
   15.56 +EXPORT_SYMBOL(cpu_online_map);
   15.57 +cpumask_t cpu_possible_map;
   15.58 +EXPORT_SYMBOL(cpu_possible_map);
   15.59 +
   15.60 +struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
   15.61 +EXPORT_SYMBOL(cpu_data);
   15.62 +
   15.63 +#ifdef CONFIG_HOTPLUG_CPU
   15.64 +DEFINE_PER_CPU(int, cpu_state) = { 0 };
   15.65 +#endif
   15.66 +
   15.67 +static DEFINE_PER_CPU(int, resched_irq);
   15.68 +static DEFINE_PER_CPU(int, callfunc_irq);
   15.69 +static char resched_name[NR_CPUS][15];
   15.70 +static char callfunc_name[NR_CPUS][15];
   15.71 +
   15.72 +u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
   15.73 +
   15.74 +void *xquad_portio;
   15.75 +
   15.76 +cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
   15.77 +cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
   15.78 +EXPORT_SYMBOL(cpu_core_map);
   15.79 +
   15.80 +#if defined(__i386__)
   15.81 +u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
   15.82 +EXPORT_SYMBOL(x86_cpu_to_apicid);
   15.83 +#elif !defined(CONFIG_X86_IO_APIC)
   15.84 +unsigned int maxcpus = NR_CPUS;
   15.85 +#endif
   15.86 +
   15.87 +void __init smp_alloc_memory(void)
   15.88 +{
   15.89 +}
   15.90 +
   15.91 +static void xen_smp_intr_init(unsigned int cpu)
   15.92 +{
   15.93 +	per_cpu(resched_irq, cpu) =
   15.94 +		bind_ipi_to_irq(RESCHEDULE_VECTOR, cpu);
   15.95 +	sprintf(resched_name[cpu], "resched%d", cpu);
   15.96 +	BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
   15.97 +	                   SA_INTERRUPT, resched_name[cpu], NULL));
   15.98 +
   15.99 +	per_cpu(callfunc_irq, cpu) =
  15.100 +		bind_ipi_to_irq(CALL_FUNCTION_VECTOR, cpu);
  15.101 +	sprintf(callfunc_name[cpu], "callfunc%d", cpu);
  15.102 +	BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
  15.103 +	                   smp_call_function_interrupt,
  15.104 +	                   SA_INTERRUPT, callfunc_name[cpu], NULL));
  15.105 +
  15.106 +	if (cpu != 0)
  15.107 +		local_setup_timer(cpu);
  15.108 +}
  15.109 +
  15.110 +#ifdef CONFIG_HOTPLUG_CPU
  15.111 +static void xen_smp_intr_exit(unsigned int cpu)
  15.112 +{
  15.113 +	if (cpu != 0)
  15.114 +		local_teardown_timer(cpu);
  15.115 +
  15.116 +	free_irq(per_cpu(resched_irq, cpu), NULL);
  15.117 +	unbind_ipi_from_irq(RESCHEDULE_VECTOR, cpu);
  15.118 +
  15.119 +	free_irq(per_cpu(callfunc_irq, cpu), NULL);
  15.120 +	unbind_ipi_from_irq(CALL_FUNCTION_VECTOR, cpu);
  15.121 +}
  15.122 +#endif
  15.123 +
  15.124 +static void cpu_bringup(void)
  15.125 +{
  15.126 +	if (!cpu_isset(smp_processor_id(), cpu_initialized))
  15.127 +		cpu_init();
  15.128 +	local_irq_enable();
  15.129 +	cpu_idle();
  15.130 +}
  15.131 +
  15.132 +void vcpu_prepare(int vcpu)
  15.133 +{
  15.134 +	vcpu_guest_context_t ctxt;
  15.135 +	struct task_struct *idle = idle_task(vcpu);
  15.136 +
  15.137 +	if (vcpu == 0)
  15.138 +		return;
  15.139 +
  15.140 +	memset(&ctxt, 0, sizeof(ctxt));
  15.141 +
  15.142 +	ctxt.flags = VGCF_IN_KERNEL;
  15.143 +	ctxt.user_regs.ds = __USER_DS;
  15.144 +	ctxt.user_regs.es = __USER_DS;
  15.145 +	ctxt.user_regs.fs = 0;
  15.146 +	ctxt.user_regs.gs = 0;
  15.147 +	ctxt.user_regs.ss = __KERNEL_DS;
  15.148 +	ctxt.user_regs.eip = (unsigned long)cpu_bringup;
  15.149 +	ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
  15.150 +
  15.151 +	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
  15.152 +
  15.153 +	smp_trap_init(ctxt.trap_ctxt);
  15.154 +
  15.155 +	ctxt.ldt_ents = 0;
  15.156 +
  15.157 +	ctxt.gdt_frames[0] = virt_to_mfn(cpu_gdt_descr[vcpu].address);
  15.158 +	ctxt.gdt_ents      = cpu_gdt_descr[vcpu].size / 8;
  15.159 +
  15.160 +#ifdef __i386__
  15.161 +	ctxt.user_regs.cs = __KERNEL_CS;
  15.162 +	ctxt.user_regs.esp = idle->thread.esp;
  15.163 +
  15.164 +	ctxt.kernel_ss = __KERNEL_DS;
  15.165 +	ctxt.kernel_sp = idle->thread.esp0;
  15.166 +
  15.167 +	ctxt.event_callback_cs     = __KERNEL_CS;
  15.168 +	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
  15.169 +	ctxt.failsafe_callback_cs  = __KERNEL_CS;
  15.170 +	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
  15.171 +
  15.172 +	ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
  15.173 +#else
  15.174 +	ctxt.user_regs.cs = __KERNEL_CS | 3;
  15.175 +	ctxt.user_regs.esp = idle->thread.rsp;
  15.176 +
  15.177 +	ctxt.kernel_ss = __KERNEL_DS;
  15.178 +	ctxt.kernel_sp = idle->thread.rsp0;
  15.179 +
  15.180 +	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
  15.181 +	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
  15.182 +	ctxt.syscall_callback_eip  = (unsigned long)system_call;
  15.183 +
  15.184 +	ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
  15.185 +
  15.186 +	ctxt.gs_base_kernel = (unsigned long)(cpu_pda + vcpu);
  15.187 +#endif
  15.188 +
  15.189 +	BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt));
  15.190 +}
  15.191 +
  15.192 +void __init smp_prepare_cpus(unsigned int max_cpus)
  15.193 +{
  15.194 +	int cpu, rc;
  15.195 +	struct task_struct *idle;
  15.196 +
  15.197 +	if (max_cpus == 0)
  15.198 +		return;
  15.199 +
  15.200 +	xen_smp_intr_init(0);
  15.201 +
  15.202 +	for (cpu = 1; cpu < max_cpus; cpu++) {
  15.203 +		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
  15.204 +		if (rc == -ENOENT)
  15.205 +			break;
  15.206 +		BUG_ON(rc != 0);
  15.207 +
  15.208 +		cpu_data[cpu] = boot_cpu_data;
  15.209 +		cpu_2_logical_apicid[cpu] = cpu;
  15.210 +		x86_cpu_to_apicid[cpu] = cpu;
  15.211 +
  15.212 +		idle = fork_idle(cpu);
  15.213 +		if (IS_ERR(idle))
  15.214 +			panic("failed fork for CPU %d", cpu);
  15.215 +
  15.216 +#ifdef __x86_64__
  15.217 +		cpu_pda[cpu].pcurrent = idle;
  15.218 +		cpu_pda[cpu].cpunumber = cpu;
  15.219 +		per_cpu(init_tss,cpu).rsp0 = idle->thread.rsp;
  15.220 +		clear_ti_thread_flag(idle->thread_info, TIF_FORK);
  15.221 +#endif
  15.222 +
  15.223 +		irq_ctx_init(cpu);
  15.224 +
  15.225 +		cpu_gdt_descr[cpu].address =
  15.226 +			__get_free_page(GFP_KERNEL|__GFP_ZERO);
  15.227 +		BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
  15.228 +		cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
  15.229 +		memcpy((void *)cpu_gdt_descr[cpu].address,
  15.230 +		       (void *)cpu_gdt_descr[0].address,
  15.231 +		       cpu_gdt_descr[0].size);
  15.232 +		make_page_readonly((void *)cpu_gdt_descr[cpu].address);
  15.233 +
  15.234 +		cpu_set(cpu, cpu_possible_map);
  15.235 +		if (xen_start_info->flags & SIF_INITDOMAIN)
  15.236 +			cpu_set(cpu, cpu_present_map);
  15.237 +
  15.238 +		vcpu_prepare(cpu);
  15.239 +	}
  15.240 +
  15.241 +	/* Currently, Xen gives no dynamic NUMA/HT info. */
  15.242 +	for (cpu = 0; cpu < NR_CPUS; cpu++) {
  15.243 +		cpus_clear(cpu_sibling_map[cpu]);
  15.244 +		cpus_clear(cpu_core_map[cpu]);
  15.245 +	}
  15.246 +
  15.247 +#ifdef CONFIG_X86_IO_APIC
  15.248 +	/*
  15.249 +	 * Here we can be sure that there is an IO-APIC in the system. Let's
  15.250 +	 * go and set it up:
  15.251 +	 */
  15.252 +	if (!skip_ioapic_setup && nr_ioapics)
  15.253 +		setup_IO_APIC();
  15.254 +#endif
  15.255 +}
  15.256 +
  15.257 +void __devinit smp_prepare_boot_cpu(void)
  15.258 +{
  15.259 +	cpu_possible_map = cpumask_of_cpu(0);
  15.260 +	cpu_present_map  = cpumask_of_cpu(0);
  15.261 +	cpu_online_map   = cpumask_of_cpu(0);
  15.262 +
  15.263 +	cpu_data[0] = boot_cpu_data;
  15.264 +	cpu_2_logical_apicid[0] = 0;
  15.265 +	x86_cpu_to_apicid[0] = 0;
  15.266 +
  15.267 +	current_thread_info()->cpu = 0;
  15.268 +	cpus_clear(cpu_sibling_map[0]);
  15.269 +	cpu_set(0, cpu_sibling_map[0]);
  15.270 +
  15.271 +	cpus_clear(cpu_core_map[0]);
  15.272 +	cpu_set(0, cpu_core_map[0]);
  15.273 +}
  15.274 +
  15.275 +static void vcpu_hotplug(unsigned int cpu)
  15.276 +{
  15.277 +	int err;
  15.278 +	char dir[32], state[32];
  15.279 +
  15.280 +	if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
  15.281 +		return;
  15.282 +
  15.283 +	sprintf(dir, "cpu/%d", cpu);
  15.284 +	err = xenbus_scanf(NULL, dir, "availability", "%s", state);
  15.285 +	if (err != 1) {
  15.286 +		printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
  15.287 +		return;
  15.288 +	}
  15.289 +
  15.290 +	if (strcmp(state, "online") == 0) {
  15.291 +		cpu_set(cpu, cpu_present_map);
  15.292 +		(void)cpu_up(cpu);
  15.293 +	} else if (strcmp(state, "offline") == 0) {
  15.294 +#ifdef CONFIG_HOTPLUG_CPU
  15.295 +		(void)cpu_down(cpu);
  15.296 +#else
  15.297 +		printk(KERN_INFO "Ignoring CPU%d hotplug request\n", cpu);
  15.298 +#endif
  15.299 +	} else {
  15.300 +		printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
  15.301 +		       state, cpu);
  15.302 +	}
  15.303 +}
  15.304 +
  15.305 +static void handle_vcpu_hotplug_event(
  15.306 +	struct xenbus_watch *watch, const char **vec, unsigned int len)
  15.307 +{
  15.308 +	int cpu;
  15.309 +	char *cpustr;
  15.310 +	const char *node = vec[XS_WATCH_PATH];
  15.311 +
  15.312 +	if ((cpustr = strstr(node, "cpu/")) != NULL) {
  15.313 +		sscanf(cpustr, "cpu/%d", &cpu);
  15.314 +		vcpu_hotplug(cpu);
  15.315 +	}
  15.316 +}
  15.317 +
  15.318 +static int setup_cpu_watcher(struct notifier_block *notifier,
  15.319 +			      unsigned long event, void *data)
  15.320 +{
  15.321 +	int i;
  15.322 +
  15.323 +	static struct xenbus_watch cpu_watch = {
  15.324 +		.node = "cpu",
  15.325 +		.callback = handle_vcpu_hotplug_event };
  15.326 +	(void)register_xenbus_watch(&cpu_watch);
  15.327 +
  15.328 +	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
  15.329 +		for_each_cpu(i)
  15.330 +			vcpu_hotplug(i);
  15.331 +		printk(KERN_INFO "Brought up %ld CPUs\n",
  15.332 +		       (long)num_online_cpus());
  15.333 +	}
  15.334 +
  15.335 +	return NOTIFY_DONE;
  15.336 +}
  15.337 +
  15.338 +static int __init setup_vcpu_hotplug_event(void)
  15.339 +{
  15.340 +	static struct notifier_block xsn_cpu = {
  15.341 +		.notifier_call = setup_cpu_watcher };
  15.342 +	register_xenstore_notifier(&xsn_cpu);
  15.343 +	return 0;
  15.344 +}
  15.345 +
  15.346 +subsys_initcall(setup_vcpu_hotplug_event);
  15.347 +
  15.348 +#ifdef CONFIG_HOTPLUG_CPU
  15.349 +
  15.350 +int __cpu_disable(void)
  15.351 +{
  15.352 +	cpumask_t map = cpu_online_map;
  15.353 +	int cpu = smp_processor_id();
  15.354 +
  15.355 +	if (cpu == 0)
  15.356 +		return -EBUSY;
  15.357 +
  15.358 +	cpu_clear(cpu, map);
  15.359 +	fixup_irqs(map);
  15.360 +	cpu_clear(cpu, cpu_online_map);
  15.361 +
  15.362 +	return 0;
  15.363 +}
  15.364 +
  15.365 +void __cpu_die(unsigned int cpu)
  15.366 +{
  15.367 +	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
  15.368 +		current->state = TASK_UNINTERRUPTIBLE;
  15.369 +		schedule_timeout(HZ/10);
  15.370 +	}
  15.371 +
  15.372 +	xen_smp_intr_exit(cpu);
  15.373 +
  15.374 +#ifdef CONFIG_SMP_ALTERNATIVES
  15.375 +	if (num_online_cpus() == 1)
  15.376 +		unprepare_for_smp();
  15.377 +#endif
  15.378 +}
  15.379 +
  15.380 +#else /* !CONFIG_HOTPLUG_CPU */
  15.381 +
  15.382 +int __cpu_disable(void)
  15.383 +{
  15.384 +	return -ENOSYS;
  15.385 +}
  15.386 +
  15.387 +void __cpu_die(unsigned int cpu)
  15.388 +{
  15.389 +	BUG();
  15.390 +}
  15.391 +
  15.392 +#endif /* CONFIG_HOTPLUG_CPU */
  15.393 +
  15.394 +int __devinit __cpu_up(unsigned int cpu)
  15.395 +{
  15.396 +#ifdef CONFIG_SMP_ALTERNATIVES
  15.397 +	if (num_online_cpus() == 1)
  15.398 +		prepare_for_smp();
  15.399 +#endif
  15.400 +
  15.401 +	xen_smp_intr_init(cpu);
  15.402 +	cpu_set(cpu, cpu_online_map);
  15.403 +	HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
  15.404 +
  15.405 +	return 0;
  15.406 +}
  15.407 +
  15.408 +void __init smp_cpus_done(unsigned int max_cpus)
  15.409 +{
  15.410 +}
  15.411 +
  15.412 +/*
  15.413 + * Local variables:
  15.414 + *  c-file-style: "linux"
  15.415 + *  indent-tabs-mode: t
  15.416 + *  c-indent-level: 8
  15.417 + *  c-basic-offset: 8
  15.418 + *  tab-width: 8
  15.419 + * End:
  15.420 + */
    16.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile	Fri Oct 21 13:58:39 2005 -0600
    16.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile	Mon Oct 24 09:08:13 2005 -0600
    16.3 @@ -25,7 +25,7 @@ obj-$(CONFIG_ACPI_BOOT)		+= acpi/
    16.4  c-obj-$(CONFIG_X86_MSR)		+= msr.o
    16.5  obj-$(CONFIG_MICROCODE)		+= microcode.o
    16.6  obj-$(CONFIG_X86_CPUID)		+= cpuid.o
    16.7 -obj-$(CONFIG_SMP)		+= smp.o smpboot.o
    16.8 +obj-$(CONFIG_SMP)		+= smp.o
    16.9  obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o
   16.10  c-obj-$(CONFIG_X86_LOCAL_APIC)	+= nmi.o
   16.11  obj-$(CONFIG_X86_IO_APIC)	+= io_apic.o mpparse.o
    17.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/e820.c	Fri Oct 21 13:58:39 2005 -0600
    17.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/e820.c	Mon Oct 24 09:08:13 2005 -0600
    17.3 @@ -519,7 +519,7 @@ void __init setup_memory_region(void)
    17.4  	e820_print_map(who);
    17.5  }
    17.6  
    17.7 -#else  /* CONFIX_XEN */
    17.8 +#else  /* CONFIG_XEN */
    17.9  
   17.10  extern unsigned long xen_override_max_pfn;
   17.11  extern union xen_start_info_union xen_start_info_union;
   17.12 @@ -528,10 +528,13 @@ unsigned long __init e820_end_of_ram(voi
   17.13  {
   17.14          unsigned long max_end_pfn = xen_start_info->nr_pages;
   17.15  
   17.16 -	if ( xen_override_max_pfn <  max_end_pfn)
   17.17 +	if ( xen_override_max_pfn < max_end_pfn)
   17.18  		xen_override_max_pfn = max_end_pfn;
   17.19 -	
   17.20 -        return xen_override_max_pfn;
   17.21 +
   17.22 +	/* 8MB slack, to make up for address space allocations in backends. */
   17.23 +	xen_override_max_pfn += 8 << (20 - PAGE_SHIFT);
   17.24 +
   17.25 +	return xen_override_max_pfn;
   17.26  }
   17.27  
   17.28  void __init e820_reserve_resources(void) 
    18.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c	Fri Oct 21 13:58:39 2005 -0600
    18.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c	Mon Oct 24 09:08:13 2005 -0600
    18.3 @@ -429,7 +429,7 @@ static __init void parse_cmdline_early (
    18.4  static void __init contig_initmem_init(void)
    18.5  {
    18.6          unsigned long bootmap_size = init_bootmem(start_pfn, end_pfn);
    18.7 -        free_bootmem(0, end_pfn << PAGE_SHIFT);   
    18.8 +        free_bootmem(0, xen_start_info->nr_pages << PAGE_SHIFT);
    18.9          reserve_bootmem(HIGH_MEMORY,
   18.10                          (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1)
   18.11                          - HIGH_MEMORY);
   18.12 @@ -733,20 +733,22 @@ void __init setup_arch(char **cmdline_p)
   18.13  #ifdef CONFIG_XEN
   18.14  	{
   18.15  		int i, j, k, fpp;
   18.16 +
   18.17  		/* Make sure we have a large enough P->M table. */
   18.18 -		if (end_pfn > xen_start_info->nr_pages) {
   18.19 -			phys_to_machine_mapping = alloc_bootmem(
   18.20 -				end_pfn * sizeof(unsigned long));
   18.21 -			memset(phys_to_machine_mapping, ~0,
   18.22 -			       end_pfn * sizeof(unsigned long));
   18.23 -			memcpy(phys_to_machine_mapping,
   18.24 -			       (unsigned long *)xen_start_info->mfn_list,
   18.25 -			       xen_start_info->nr_pages * sizeof(unsigned long));
   18.26 -			free_bootmem(
   18.27 -				__pa(xen_start_info->mfn_list), 
   18.28 -				PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
   18.29 -						sizeof(unsigned long))));
   18.30 -		}
   18.31 +		phys_to_machine_mapping = alloc_bootmem(
   18.32 +			end_pfn * sizeof(unsigned long));
   18.33 +		memset(phys_to_machine_mapping, ~0,
   18.34 +		       end_pfn * sizeof(unsigned long));
   18.35 +		memcpy(phys_to_machine_mapping,
   18.36 +		       (unsigned long *)xen_start_info->mfn_list,
   18.37 +		       xen_start_info->nr_pages * sizeof(unsigned long));
   18.38 +		free_bootmem(
   18.39 +			__pa(xen_start_info->mfn_list), 
   18.40 +			PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
   18.41 +					sizeof(unsigned long))));
   18.42 +		make_pages_readonly((void *)xen_start_info->mfn_list,
   18.43 +				    PFN_UP(xen_start_info->nr_pages *
   18.44 +					   sizeof(unsigned long)));
   18.45  
   18.46  		/* 
   18.47  		 * Initialise the list of the frames that specify the list of 
    19.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c	Fri Oct 21 13:58:39 2005 -0600
    19.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.3 @@ -1,1270 +0,0 @@
    19.4 -/*
    19.5 - *	x86 SMP booting functions
    19.6 - *
    19.7 - *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
    19.8 - *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
    19.9 - *	Copyright 2001 Andi Kleen, SuSE Labs.
   19.10 - *
   19.11 - *	Much of the core SMP work is based on previous work by Thomas Radke, to
   19.12 - *	whom a great many thanks are extended.
   19.13 - *
   19.14 - *	Thanks to Intel for making available several different Pentium,
   19.15 - *	Pentium Pro and Pentium-II/Xeon MP machines.
   19.16 - *	Original development of Linux SMP code supported by Caldera.
   19.17 - *
   19.18 - *	This code is released under the GNU General Public License version 2
   19.19 - *
   19.20 - *	Fixes
   19.21 - *		Felix Koop	:	NR_CPUS used properly
   19.22 - *		Jose Renau	:	Handle single CPU case.
   19.23 - *		Alan Cox	:	By repeated request 8) - Total BogoMIP report.
   19.24 - *		Greg Wright	:	Fix for kernel stacks panic.
   19.25 - *		Erich Boleyn	:	MP v1.4 and additional changes.
   19.26 - *	Matthias Sattler	:	Changes for 2.1 kernel map.
   19.27 - *	Michel Lespinasse	:	Changes for 2.1 kernel map.
   19.28 - *	Michael Chastain	:	Change trampoline.S to gnu as.
   19.29 - *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
   19.30 - *		Ingo Molnar	:	Added APIC timers, based on code
   19.31 - *					from Jose Renau
   19.32 - *		Ingo Molnar	:	various cleanups and rewrites
   19.33 - *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
   19.34 - *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
   19.35 - *	Andi Kleen		:	Changed for SMP boot into long mode.
   19.36 - *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
   19.37 - *      Andi Kleen              :       Converted to new state machine.
   19.38 - *					Various cleanups.
   19.39 - *					Probably mostly hotplug CPU ready now.
   19.40 - */
   19.41 -
   19.42 -
   19.43 -#include <linux/config.h>
   19.44 -#include <linux/init.h>
   19.45 -
   19.46 -#include <linux/mm.h>
   19.47 -#include <linux/kernel_stat.h>
   19.48 -#include <linux/smp_lock.h>
   19.49 -#include <linux/irq.h>
   19.50 -#include <linux/bootmem.h>
   19.51 -#include <linux/thread_info.h>
   19.52 -#include <linux/module.h>
   19.53 -#ifdef CONFIG_XEN
   19.54 -#include <linux/interrupt.h>
   19.55 -#endif
   19.56 -
   19.57 -#include <linux/delay.h>
   19.58 -#include <linux/mc146818rtc.h>
   19.59 -#include <asm/mtrr.h>
   19.60 -#include <asm/pgalloc.h>
   19.61 -#include <asm/desc.h>
   19.62 -#include <asm/kdebug.h>
   19.63 -#include <asm/tlbflush.h>
   19.64 -#include <asm/proto.h>
   19.65 -#include <asm/nmi.h>
   19.66 -#ifdef CONFIG_XEN
   19.67 -#include <asm/arch_hooks.h>
   19.68 -#include <asm-xen/evtchn.h>
   19.69 -#include <asm-xen/xen-public/vcpu.h>
   19.70 -#endif
   19.71 -
   19.72 -/* Change for real CPU hotplug. Note other files need to be fixed
   19.73 -   first too. */
   19.74 -#define __cpuinit __init
   19.75 -#define __cpuinitdata __initdata
   19.76 -
   19.77 -#if defined(CONFIG_XEN) && !defined(CONFIG_XEN_PRIVILEGED_GUEST)
   19.78 -	unsigned int maxcpus = NR_CPUS;
   19.79 -#endif
   19.80 -
   19.81 -/* Number of siblings per CPU package */
   19.82 -int smp_num_siblings = 1;
   19.83 -/* Package ID of each logical CPU */
   19.84 -u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
   19.85 -u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
   19.86 -EXPORT_SYMBOL(phys_proc_id);
   19.87 -EXPORT_SYMBOL(cpu_core_id);
   19.88 -
   19.89 -/* Bitmask of currently online CPUs */
   19.90 -cpumask_t cpu_online_map;
   19.91 -
   19.92 -EXPORT_SYMBOL(cpu_online_map);
   19.93 -
   19.94 -/*
   19.95 - * Private maps to synchronize booting between AP and BP.
   19.96 - * Probably not needed anymore, but it makes for easier debugging. -AK
   19.97 - */
   19.98 -cpumask_t cpu_callin_map;
   19.99 -cpumask_t cpu_callout_map;
  19.100 -
  19.101 -cpumask_t cpu_possible_map;
  19.102 -EXPORT_SYMBOL(cpu_possible_map);
  19.103 -
  19.104 -/* Per CPU bogomips and other parameters */
  19.105 -struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
  19.106 -
  19.107 -/* Set when the idlers are all forked */
  19.108 -int smp_threads_ready;
  19.109 -
  19.110 -cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
  19.111 -cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
  19.112 -EXPORT_SYMBOL(cpu_core_map);
  19.113 -
  19.114 -#ifndef CONFIG_XEN
  19.115 -/*
  19.116 - * Trampoline 80x86 program as an array.
  19.117 - */
  19.118 -
  19.119 -extern unsigned char trampoline_data[];
  19.120 -extern unsigned char trampoline_end[];
  19.121 -
  19.122 -/*
  19.123 - * Currently trivial. Write the real->protected mode
  19.124 - * bootstrap into the page concerned. The caller
  19.125 - * has made sure it's suitably aligned.
  19.126 - */
  19.127 -
  19.128 -static unsigned long __cpuinit setup_trampoline(void)
  19.129 -{
  19.130 -	void *tramp = __va(SMP_TRAMPOLINE_BASE); 
  19.131 -	memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
  19.132 -	return virt_to_phys(tramp);
  19.133 -}
  19.134 -#endif
  19.135 -
  19.136 -/*
  19.137 - * The bootstrap kernel entry code has set these up. Save them for
  19.138 - * a given CPU
  19.139 - */
  19.140 -
  19.141 -static void __cpuinit smp_store_cpu_info(int id)
  19.142 -{
  19.143 -	struct cpuinfo_x86 *c = cpu_data + id;
  19.144 -
  19.145 -	*c = boot_cpu_data;
  19.146 -	identify_cpu(c);
  19.147 -	print_cpu_info(c);
  19.148 -}
  19.149 -
  19.150 -#ifndef CONFIG_XEN
  19.151 -/*
  19.152 - * New Funky TSC sync algorithm borrowed from IA64.
  19.153 - * Main advantage is that it doesn't reset the TSCs fully and
  19.154 - * in general looks more robust and it works better than my earlier
  19.155 - * attempts. I believe it was written by David Mosberger. Some minor
  19.156 - * adjustments for x86-64 by me -AK
  19.157 - *
  19.158 - * Original comment reproduced below.
  19.159 - *
  19.160 - * Synchronize TSC of the current (slave) CPU with the TSC of the
  19.161 - * MASTER CPU (normally the time-keeper CPU).  We use a closed loop to
  19.162 - * eliminate the possibility of unaccounted-for errors (such as
  19.163 - * getting a machine check in the middle of a calibration step).  The
  19.164 - * basic idea is for the slave to ask the master what itc value it has
  19.165 - * and to read its own itc before and after the master responds.  Each
  19.166 - * iteration gives us three timestamps:
  19.167 - *
  19.168 - *	slave		master
  19.169 - *
  19.170 - *	t0 ---\
  19.171 - *             ---\
  19.172 - *		   --->
  19.173 - *			tm
  19.174 - *		   /---
  19.175 - *	       /---
  19.176 - *	t1 <---
  19.177 - *
  19.178 - *
  19.179 - * The goal is to adjust the slave's TSC such that tm falls exactly
  19.180 - * half-way between t0 and t1.  If we achieve this, the clocks are
  19.181 - * synchronized provided the interconnect between the slave and the
  19.182 - * master is symmetric.  Even if the interconnect were asymmetric, we
  19.183 - * would still know that the synchronization error is smaller than the
  19.184 - * roundtrip latency (t0 - t1).
  19.185 - *
  19.186 - * When the interconnect is quiet and symmetric, this lets us
  19.187 - * synchronize the TSC to within one or two cycles.  However, we can
  19.188 - * only *guarantee* that the synchronization is accurate to within a
  19.189 - * round-trip time, which is typically in the range of several hundred
  19.190 - * cycles (e.g., ~500 cycles).  In practice, this means that the TSCs
  19.191 - * are usually almost perfectly synchronized, but we shouldn't assume
  19.192 - * that the accuracy is much better than half a micro second or so.
  19.193 - *
  19.194 - * [there are other errors like the latency of RDTSC and of the
  19.195 - * WRMSR. These can also account to hundreds of cycles. So it's
  19.196 - * probably worse. It claims 153 cycles error on a dual Opteron,
  19.197 - * but I suspect the numbers are actually somewhat worse -AK]
  19.198 - */
  19.199 -
  19.200 -#define MASTER	0
  19.201 -#define SLAVE	(SMP_CACHE_BYTES/8)
  19.202 -
  19.203 -/* Intentionally don't use cpu_relax() while TSC synchronization
  19.204 -   because we don't want to go into funky power save modi or cause
  19.205 -   hypervisors to schedule us away.  Going to sleep would likely affect
  19.206 -   latency and low latency is the primary objective here. -AK */
  19.207 -#define no_cpu_relax() barrier()
  19.208 -
  19.209 -static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
  19.210 -static volatile __cpuinitdata unsigned long go[SLAVE + 1];
  19.211 -static int notscsync __cpuinitdata;
  19.212 -
  19.213 -#undef DEBUG_TSC_SYNC
  19.214 -
  19.215 -#define NUM_ROUNDS	64	/* magic value */
  19.216 -#define NUM_ITERS	5	/* likewise */
  19.217 -
  19.218 -/* Callback on boot CPU */
  19.219 -static __cpuinit void sync_master(void *arg)
  19.220 -{
  19.221 -	unsigned long flags, i;
  19.222 -
  19.223 -	if (smp_processor_id() != boot_cpu_id)
  19.224 -		return;
  19.225 -
  19.226 -	go[MASTER] = 0;
  19.227 -
  19.228 -	local_irq_save(flags);
  19.229 -	{
  19.230 -		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
  19.231 -			while (!go[MASTER])
  19.232 -				no_cpu_relax();
  19.233 -			go[MASTER] = 0;
  19.234 -			rdtscll(go[SLAVE]);
  19.235 -		}
  19.236 -	}
  19.237 -	local_irq_restore(flags);
  19.238 -}
  19.239 -
  19.240 -/*
  19.241 - * Return the number of cycles by which our tsc differs from the tsc
  19.242 - * on the master (time-keeper) CPU.  A positive number indicates our
  19.243 - * tsc is ahead of the master, negative that it is behind.
  19.244 - */
  19.245 -static inline long
  19.246 -get_delta(long *rt, long *master)
  19.247 -{
  19.248 -	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
  19.249 -	unsigned long tcenter, t0, t1, tm;
  19.250 -	int i;
  19.251 -
  19.252 -	for (i = 0; i < NUM_ITERS; ++i) {
  19.253 -		rdtscll(t0);
  19.254 -		go[MASTER] = 1;
  19.255 -		while (!(tm = go[SLAVE]))
  19.256 -			no_cpu_relax();
  19.257 -		go[SLAVE] = 0;
  19.258 -		rdtscll(t1);
  19.259 -
  19.260 -		if (t1 - t0 < best_t1 - best_t0)
  19.261 -			best_t0 = t0, best_t1 = t1, best_tm = tm;
  19.262 -	}
  19.263 -
  19.264 -	*rt = best_t1 - best_t0;
  19.265 -	*master = best_tm - best_t0;
  19.266 -
  19.267 -	/* average best_t0 and best_t1 without overflow: */
  19.268 -	tcenter = (best_t0/2 + best_t1/2);
  19.269 -	if (best_t0 % 2 + best_t1 % 2 == 2)
  19.270 -		++tcenter;
  19.271 -	return tcenter - best_tm;
  19.272 -}
  19.273 -
  19.274 -static __cpuinit void sync_tsc(void)
  19.275 -{
  19.276 -	int i, done = 0;
  19.277 -	long delta, adj, adjust_latency = 0;
  19.278 -	unsigned long flags, rt, master_time_stamp, bound;
  19.279 -#if DEBUG_TSC_SYNC
  19.280 -	static struct syncdebug {
  19.281 -		long rt;	/* roundtrip time */
  19.282 -		long master;	/* master's timestamp */
  19.283 -		long diff;	/* difference between midpoint and master's timestamp */
  19.284 -		long lat;	/* estimate of tsc adjustment latency */
  19.285 -	} t[NUM_ROUNDS] __cpuinitdata;
  19.286 -#endif
  19.287 -
  19.288 -	go[MASTER] = 1;
  19.289 -
  19.290 -	smp_call_function(sync_master, NULL, 1, 0);
  19.291 -
  19.292 -	while (go[MASTER])	/* wait for master to be ready */
  19.293 -		no_cpu_relax();
  19.294 -
  19.295 -	spin_lock_irqsave(&tsc_sync_lock, flags);
  19.296 -	{
  19.297 -		for (i = 0; i < NUM_ROUNDS; ++i) {
  19.298 -			delta = get_delta(&rt, &master_time_stamp);
  19.299 -			if (delta == 0) {
  19.300 -				done = 1;	/* let's lock on to this... */
  19.301 -				bound = rt;
  19.302 -			}
  19.303 -
  19.304 -			if (!done) {
  19.305 -				unsigned long t;
  19.306 -				if (i > 0) {
  19.307 -					adjust_latency += -delta;
  19.308 -					adj = -delta + adjust_latency/4;
  19.309 -				} else
  19.310 -					adj = -delta;
  19.311 -
  19.312 -				rdtscll(t);
  19.313 -				wrmsrl(MSR_IA32_TSC, t + adj);
  19.314 -			}
  19.315 -#if DEBUG_TSC_SYNC
  19.316 -			t[i].rt = rt;
  19.317 -			t[i].master = master_time_stamp;
  19.318 -			t[i].diff = delta;
  19.319 -			t[i].lat = adjust_latency/4;
  19.320 -#endif
  19.321 -		}
  19.322 -	}
  19.323 -	spin_unlock_irqrestore(&tsc_sync_lock, flags);
  19.324 -
  19.325 -#if DEBUG_TSC_SYNC
  19.326 -	for (i = 0; i < NUM_ROUNDS; ++i)
  19.327 -		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
  19.328 -		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
  19.329 -#endif
  19.330 -
  19.331 -	printk(KERN_INFO
  19.332 -	       "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
  19.333 -	       "maxerr %lu cycles)\n",
  19.334 -	       smp_processor_id(), boot_cpu_id, delta, rt);
  19.335 -}
  19.336 -
  19.337 -static void __cpuinit tsc_sync_wait(void)
  19.338 -{
  19.339 -	if (notscsync || !cpu_has_tsc)
  19.340 -		return;
  19.341 -	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
  19.342 -			boot_cpu_id);
  19.343 -	sync_tsc();
  19.344 -}
  19.345 -
  19.346 -static __init int notscsync_setup(char *s)
  19.347 -{
  19.348 -	notscsync = 1;
  19.349 -	return 0;
  19.350 -}
  19.351 -__setup("notscsync", notscsync_setup);
  19.352 -#endif
  19.353 -
  19.354 -static atomic_t init_deasserted __cpuinitdata;
  19.355 -
  19.356 -/*
  19.357 - * Report back to the Boot Processor.
  19.358 - * Running on AP.
  19.359 - */
  19.360 -void __cpuinit smp_callin(void)
  19.361 -{
  19.362 -	int cpuid, phys_id;
  19.363 -	unsigned long timeout;
  19.364 -
  19.365 -#ifndef CONFIG_XEN
  19.366 -	/*
  19.367 -	 * If waken up by an INIT in an 82489DX configuration
  19.368 -	 * we may get here before an INIT-deassert IPI reaches
  19.369 -	 * our local APIC.  We have to wait for the IPI or we'll
  19.370 -	 * lock up on an APIC access.
  19.371 -	 */
  19.372 -	while (!atomic_read(&init_deasserted))
  19.373 -		cpu_relax();
  19.374 -
  19.375 -#endif
  19.376 -	/*
  19.377 -	 * (This works even if the APIC is not enabled.)
  19.378 -	 */
  19.379 -#ifndef CONFIG_XEN
  19.380 -	phys_id = GET_APIC_ID(apic_read(APIC_ID));
  19.381 -#else
  19.382 -	phys_id = smp_processor_id();
  19.383 -#endif
  19.384 -	cpuid = smp_processor_id();
  19.385 -	if (cpu_isset(cpuid, cpu_callin_map)) {
  19.386 -		panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
  19.387 -					phys_id, cpuid);
  19.388 -	}
  19.389 -	Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
  19.390 -
  19.391 -	/*
  19.392 -	 * STARTUP IPIs are fragile beasts as they might sometimes
  19.393 -	 * trigger some glue motherboard logic. Complete APIC bus
  19.394 -	 * silence for 1 second, this overestimates the time the
  19.395 -	 * boot CPU is spending to send the up to 2 STARTUP IPIs
  19.396 -	 * by a factor of two. This should be enough.
  19.397 -	 */
  19.398 -
  19.399 -	/*
  19.400 -	 * Waiting 2s total for startup (udelay is not yet working)
  19.401 -	 */
  19.402 -	timeout = jiffies + 2*HZ;
  19.403 -	while (time_before(jiffies, timeout)) {
  19.404 -		/*
  19.405 -		 * Has the boot CPU finished it's STARTUP sequence?
  19.406 -		 */
  19.407 -		if (cpu_isset(cpuid, cpu_callout_map))
  19.408 -			break;
  19.409 -		cpu_relax();
  19.410 -	}
  19.411 -
  19.412 -	if (!time_before(jiffies, timeout)) {
  19.413 -		panic("smp_callin: CPU%d started up but did not get a callout!\n",
  19.414 -			cpuid);
  19.415 -	}
  19.416 -
  19.417 -#ifndef CONFIG_XEN
  19.418 -	/*
  19.419 -	 * the boot CPU has finished the init stage and is spinning
  19.420 -	 * on callin_map until we finish. We are free to set up this
  19.421 -	 * CPU, first the APIC. (this is probably redundant on most
  19.422 -	 * boards)
  19.423 -	 */
  19.424 -
  19.425 -	Dprintk("CALLIN, before setup_local_APIC().\n");
  19.426 -	setup_local_APIC();
  19.427 -#endif
  19.428 -
  19.429 -	/*
  19.430 -	 * Get our bogomips.
  19.431 -	 */
  19.432 -	calibrate_delay();
  19.433 -	Dprintk("Stack at about %p\n",&cpuid);
  19.434 -
  19.435 -#ifndef CONFIG_XEN
  19.436 -	disable_APIC_timer();
  19.437 -#endif
  19.438 -
  19.439 -	/*
  19.440 -	 * Save our processor parameters
  19.441 -	 */
  19.442 - 	smp_store_cpu_info(cpuid);
  19.443 -
  19.444 -	/*
  19.445 -	 * Allow the master to continue.
  19.446 -	 */
  19.447 -	cpu_set(cpuid, cpu_callin_map);
  19.448 -}
  19.449 -
  19.450 -#ifdef CONFIG_XEN
  19.451 -extern void local_setup_timer(void);
  19.452 -#endif
  19.453 -
  19.454 -/*
  19.455 - * Setup code on secondary processor (after comming out of the trampoline)
  19.456 - */
  19.457 -void __cpuinit start_secondary(void)
  19.458 -{
  19.459 -	/*
  19.460 -	 * Dont put anything before smp_callin(), SMP
  19.461 -	 * booting is too fragile that we want to limit the
  19.462 -	 * things done here to the most necessary things.
  19.463 -	 */
  19.464 -	cpu_init();
  19.465 -	smp_callin();
  19.466 -
  19.467 -	/* otherwise gcc will move up the smp_processor_id before the cpu_init */
  19.468 -	barrier();
  19.469 -
  19.470 -#ifndef CONFIG_XEN
  19.471 -	Dprintk("cpu %d: setting up apic clock\n", smp_processor_id()); 	
  19.472 -	setup_secondary_APIC_clock();
  19.473 -
  19.474 -	Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
  19.475 -
  19.476 -	if (nmi_watchdog == NMI_IO_APIC) {
  19.477 -		disable_8259A_irq(0);
  19.478 -		enable_NMI_through_LVT0(NULL);
  19.479 -		enable_8259A_irq(0);
  19.480 -	}
  19.481 -
  19.482 -	enable_APIC_timer();
  19.483 -#else
  19.484 -	local_setup_timer();
  19.485 -	smp_intr_init();
  19.486 -	local_irq_enable();
  19.487 -#endif
  19.488 -
  19.489 -	/*
  19.490 -	 * Allow the master to continue.
  19.491 -	 */
  19.492 -	cpu_set(smp_processor_id(), cpu_online_map);
  19.493 -	mb();
  19.494 -
  19.495 -#ifndef CONFIG_XEN
  19.496 -	/* Wait for TSC sync to not schedule things before.
  19.497 -	   We still process interrupts, which could see an inconsistent
  19.498 -	   time in that window unfortunately. */
  19.499 -	tsc_sync_wait();
  19.500 -#endif
  19.501 -
  19.502 -	cpu_idle();
  19.503 -}
  19.504 -
  19.505 -extern volatile unsigned long init_rsp;
  19.506 -extern void (*initial_code)(void);
  19.507 -
  19.508 -#ifndef CONFIG_XEN
  19.509 -#if APIC_DEBUG
  19.510 -static void inquire_remote_apic(int apicid)
  19.511 -{
  19.512 -	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
  19.513 -	char *names[] = { "ID", "VERSION", "SPIV" };
  19.514 -	int timeout, status;
  19.515 -
  19.516 -	printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
  19.517 -
  19.518 -	for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
  19.519 -		printk("... APIC #%d %s: ", apicid, names[i]);
  19.520 -
  19.521 -		/*
  19.522 -		 * Wait for idle.
  19.523 -		 */
  19.524 -		apic_wait_icr_idle();
  19.525 -
  19.526 -		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
  19.527 -		apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
  19.528 -
  19.529 -		timeout = 0;
  19.530 -		do {
  19.531 -			udelay(100);
  19.532 -			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
  19.533 -		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
  19.534 -
  19.535 -		switch (status) {
  19.536 -		case APIC_ICR_RR_VALID:
  19.537 -			status = apic_read(APIC_RRR);
  19.538 -			printk("%08x\n", status);
  19.539 -			break;
  19.540 -		default:
  19.541 -			printk("failed\n");
  19.542 -		}
  19.543 -	}
  19.544 -}
  19.545 -#endif
  19.546 -
  19.547 -/*
  19.548 - * Kick the secondary to wake up.
  19.549 - */
  19.550 -static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
  19.551 -{
  19.552 -	unsigned long send_status = 0, accept_status = 0;
  19.553 -	int maxlvt, timeout, num_starts, j;
  19.554 -
  19.555 -	Dprintk("Asserting INIT.\n");
  19.556 -
  19.557 -	/*
  19.558 -	 * Turn INIT on target chip
  19.559 -	 */
  19.560 -	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
  19.561 -
  19.562 -	/*
  19.563 -	 * Send IPI
  19.564 -	 */
  19.565 -	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
  19.566 -				| APIC_DM_INIT);
  19.567 -
  19.568 -	Dprintk("Waiting for send to finish...\n");
  19.569 -	timeout = 0;
  19.570 -	do {
  19.571 -		Dprintk("+");
  19.572 -		udelay(100);
  19.573 -		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
  19.574 -	} while (send_status && (timeout++ < 1000));
  19.575 -
  19.576 -	mdelay(10);
  19.577 -
  19.578 -	Dprintk("Deasserting INIT.\n");
  19.579 -
  19.580 -	/* Target chip */
  19.581 -	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
  19.582 -
  19.583 -	/* Send IPI */
  19.584 -	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
  19.585 -
  19.586 -	Dprintk("Waiting for send to finish...\n");
  19.587 -	timeout = 0;
  19.588 -	do {
  19.589 -		Dprintk("+");
  19.590 -		udelay(100);
  19.591 -		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
  19.592 -	} while (send_status && (timeout++ < 1000));
  19.593 -
  19.594 -	atomic_set(&init_deasserted, 1);
  19.595 -
  19.596 -	/*
  19.597 -	 * Should we send STARTUP IPIs ?
  19.598 -	 *
  19.599 -	 * Determine this based on the APIC version.
  19.600 -	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
  19.601 -	 */
  19.602 -	if (APIC_INTEGRATED(apic_version[phys_apicid]))
  19.603 -		num_starts = 2;
  19.604 -	else
  19.605 -		num_starts = 0;
  19.606 -
  19.607 -	/*
  19.608 -	 * Run STARTUP IPI loop.
  19.609 -	 */
  19.610 -	Dprintk("#startup loops: %d.\n", num_starts);
  19.611 -
  19.612 -	maxlvt = get_maxlvt();
  19.613 -
  19.614 -	for (j = 1; j <= num_starts; j++) {
  19.615 -		Dprintk("Sending STARTUP #%d.\n",j);
  19.616 -		apic_read_around(APIC_SPIV);
  19.617 -		apic_write(APIC_ESR, 0);
  19.618 -		apic_read(APIC_ESR);
  19.619 -		Dprintk("After apic_write.\n");
  19.620 -
  19.621 -		/*
  19.622 -		 * STARTUP IPI
  19.623 -		 */
  19.624 -
  19.625 -		/* Target chip */
  19.626 -		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
  19.627 -
  19.628 -		/* Boot on the stack */
  19.629 -		/* Kick the second */
  19.630 -		apic_write_around(APIC_ICR, APIC_DM_STARTUP
  19.631 -					| (start_rip >> 12));
  19.632 -
  19.633 -		/*
  19.634 -		 * Give the other CPU some time to accept the IPI.
  19.635 -		 */
  19.636 -		udelay(300);
  19.637 -
  19.638 -		Dprintk("Startup point 1.\n");
  19.639 -
  19.640 -		Dprintk("Waiting for send to finish...\n");
  19.641 -		timeout = 0;
  19.642 -		do {
  19.643 -			Dprintk("+");
  19.644 -			udelay(100);
  19.645 -			send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
  19.646 -		} while (send_status && (timeout++ < 1000));
  19.647 -
  19.648 -		/*
  19.649 -		 * Give the other CPU some time to accept the IPI.
  19.650 -		 */
  19.651 -		udelay(200);
  19.652 -		/*
  19.653 -		 * Due to the Pentium erratum 3AP.
  19.654 -		 */
  19.655 -		if (maxlvt > 3) {
  19.656 -			apic_read_around(APIC_SPIV);
  19.657 -			apic_write(APIC_ESR, 0);
  19.658 -		}
  19.659 -		accept_status = (apic_read(APIC_ESR) & 0xEF);
  19.660 -		if (send_status || accept_status)
  19.661 -			break;
  19.662 -	}
  19.663 -	Dprintk("After Startup.\n");
  19.664 -
  19.665 -	if (send_status)
  19.666 -		printk(KERN_ERR "APIC never delivered???\n");
  19.667 -	if (accept_status)
  19.668 -		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
  19.669 -
  19.670 -	return (send_status | accept_status);
  19.671 -}
  19.672 -#endif
  19.673 -
  19.674 -/*
  19.675 - * Boot one CPU.
  19.676 - */
  19.677 -static int __cpuinit do_boot_cpu(int cpu, int apicid)
  19.678 -{
  19.679 -	struct task_struct *idle;
  19.680 -	unsigned long boot_error;
  19.681 -	int timeout;
  19.682 -	unsigned long start_rip;
  19.683 -#ifdef CONFIG_XEN
  19.684 -	vcpu_guest_context_t ctxt;
  19.685 -	extern void startup_64_smp(void);
  19.686 -	extern void hypervisor_callback(void);
  19.687 -	extern void failsafe_callback(void);
  19.688 -	extern void smp_trap_init(trap_info_t *);
  19.689 -	int i;
  19.690 -#endif
  19.691 -	/*
  19.692 -	 * We can't use kernel_thread since we must avoid to
  19.693 -	 * reschedule the child.
  19.694 -	 */
  19.695 -	idle = fork_idle(cpu);
  19.696 -	if (IS_ERR(idle)) {
  19.697 -		printk("failed fork for CPU %d\n", cpu);
  19.698 -		return PTR_ERR(idle);
  19.699 -	}
  19.700 -
  19.701 -	cpu_pda[cpu].pcurrent = idle;
  19.702 -
  19.703 -#ifndef CONFIG_XEN
  19.704 -	start_rip = setup_trampoline();
  19.705 -#else
  19.706 -	start_rip = (unsigned long)startup_64_smp;
  19.707 -#endif
  19.708 -
  19.709 -	init_rsp = idle->thread.rsp;
  19.710 -	per_cpu(init_tss,cpu).rsp0 = init_rsp;
  19.711 -	initial_code = start_secondary;
  19.712 -	clear_ti_thread_flag(idle->thread_info, TIF_FORK);
  19.713 -
  19.714 -	printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid,
  19.715 -	       start_rip, init_rsp);
  19.716 -
  19.717 -	/*
  19.718 -	 * This grunge runs the startup process for
  19.719 -	 * the targeted processor.
  19.720 -	 */
  19.721 -
  19.722 -	atomic_set(&init_deasserted, 0);
  19.723 -
  19.724 -#ifdef CONFIG_XEN
  19.725 -	cpu_gdt_descr[cpu].address = __get_free_page(GFP_KERNEL|__GFP_ZERO);
  19.726 -	BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
  19.727 -	cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
  19.728 -	memcpy((void *)cpu_gdt_descr[cpu].address,
  19.729 -		(void *)cpu_gdt_descr[0].address, cpu_gdt_descr[0].size);
  19.730 -
  19.731 -	memset(&ctxt, 0, sizeof(ctxt));
  19.732 -
  19.733 -	ctxt.flags = VGCF_IN_KERNEL;
  19.734 -	ctxt.user_regs.ds = __USER_DS;
  19.735 -	ctxt.user_regs.es = __USER_DS;
  19.736 -	ctxt.user_regs.fs = 0;
  19.737 -	ctxt.user_regs.gs = 0;
  19.738 -	ctxt.user_regs.ss = __KERNEL_DS|0x3;
  19.739 -	ctxt.user_regs.cs = __KERNEL_CS|0x3;
  19.740 -	ctxt.user_regs.rip = start_rip;
  19.741 -	ctxt.user_regs.rsp = idle->thread.rsp;
  19.742 -#define X86_EFLAGS_IOPL_RING3 0x3000
  19.743 -	ctxt.user_regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_IOPL_RING3;
  19.744 -
  19.745 -	/* FPU is set up to default initial state. */
  19.746 -	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
  19.747 -
  19.748 -	smp_trap_init(ctxt.trap_ctxt);
  19.749 -
  19.750 -	/* No LDT. */
  19.751 -	ctxt.ldt_ents = 0;
  19.752 -
  19.753 -	{
  19.754 -		unsigned long va;
  19.755 -		int f;
  19.756 -
  19.757 -		for (va = cpu_gdt_descr[cpu].address, f = 0;
  19.758 -		     va < cpu_gdt_descr[cpu].address + cpu_gdt_descr[cpu].size;
  19.759 -		     va += PAGE_SIZE, f++) {
  19.760 -			ctxt.gdt_frames[f] = virt_to_mfn(va);
  19.761 -			make_page_readonly((void *)va);
  19.762 -		}
  19.763 -		ctxt.gdt_ents = GDT_ENTRIES;
  19.764 -	}
  19.765 -
  19.766 -	/* Ring 1 stack is the initial stack. */
  19.767 -	ctxt.kernel_ss = __KERNEL_DS;
  19.768 -	ctxt.kernel_sp = idle->thread.rsp;
  19.769 -
  19.770 -	/* Callback handlers. */
  19.771 -	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
  19.772 -	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
  19.773 -	ctxt.syscall_callback_eip  = (unsigned long)system_call;
  19.774 -
  19.775 -	ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
  19.776 -
  19.777 -	boot_error  = HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt);
  19.778 -	if (boot_error)
  19.779 -		printk("boot error: %ld\n", boot_error);
  19.780 -
  19.781 -	if (!boot_error) {
  19.782 -		HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
  19.783 -
  19.784 -		/*
  19.785 -		 * allow APs to start initializing.
  19.786 -		 */
  19.787 -		Dprintk("Before Callout %d.\n", cpu);
  19.788 -		cpu_set(cpu, cpu_callout_map);
  19.789 -		Dprintk("After Callout %d.\n", cpu);
  19.790 -
  19.791 -		/*
  19.792 -		 * Wait 5s total for a response
  19.793 -		 */
  19.794 -		for (timeout = 0; timeout < 50000; timeout++) {
  19.795 -			if (cpu_isset(cpu, cpu_callin_map))
  19.796 -				break;	/* It has booted */
  19.797 -			udelay(100);
  19.798 -		}
  19.799 -
  19.800 -		if (cpu_isset(cpu, cpu_callin_map)) {
  19.801 -			/* number CPUs logically, starting from 1 (BSP is 0) */
  19.802 -			Dprintk("CPU has booted.\n");
  19.803 -		} else {
  19.804 -			boot_error= 1;
  19.805 -		}
  19.806 -	}
  19.807 -	x86_cpu_to_apicid[cpu] = apicid;
  19.808 -#else
  19.809 -	Dprintk("Setting warm reset code and vector.\n");
  19.810 -
  19.811 -	CMOS_WRITE(0xa, 0xf);
  19.812 -	local_flush_tlb();
  19.813 -	Dprintk("1.\n");
  19.814 -	*((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
  19.815 -	Dprintk("2.\n");
  19.816 -	*((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
  19.817 -	Dprintk("3.\n");
  19.818 -
  19.819 -	/*
  19.820 -	 * Be paranoid about clearing APIC errors.
  19.821 -	 */
  19.822 -	if (APIC_INTEGRATED(apic_version[apicid])) {
  19.823 -		apic_read_around(APIC_SPIV);
  19.824 -		apic_write(APIC_ESR, 0);
  19.825 -		apic_read(APIC_ESR);
  19.826 -	}
  19.827 -
  19.828 -	/*
  19.829 -	 * Status is now clean
  19.830 -	 */
  19.831 -	boot_error = 0;
  19.832 -
  19.833 -	/*
  19.834 -	 * Starting actual IPI sequence...
  19.835 -	 */
  19.836 -	boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
  19.837 -
  19.838 -	if (!boot_error) {
  19.839 -		/*
  19.840 -		 * allow APs to start initializing.
  19.841 -		 */
  19.842 -		Dprintk("Before Callout %d.\n", cpu);
  19.843 -		cpu_set(cpu, cpu_callout_map);
  19.844 -		Dprintk("After Callout %d.\n", cpu);
  19.845 -
  19.846 -		/*
  19.847 -		 * Wait 5s total for a response
  19.848 -		 */
  19.849 -		for (timeout = 0; timeout < 50000; timeout++) {
  19.850 -			if (cpu_isset(cpu, cpu_callin_map))
  19.851 -				break;	/* It has booted */
  19.852 -			udelay(100);
  19.853 -		}
  19.854 -
  19.855 -		if (cpu_isset(cpu, cpu_callin_map)) {
  19.856 -			/* number CPUs logically, starting from 1 (BSP is 0) */
  19.857 -			Dprintk("CPU has booted.\n");
  19.858 -		} else {
  19.859 -			boot_error = 1;
  19.860 -			if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
  19.861 -					== 0xA5)
  19.862 -				/* trampoline started but...? */
  19.863 -				printk("Stuck ??\n");
  19.864 -			else
  19.865 -				/* trampoline code not run */
  19.866 -				printk("Not responding.\n");
  19.867 -#if APIC_DEBUG
  19.868 -			inquire_remote_apic(apicid);
  19.869 -#endif
  19.870 -		}
  19.871 -	}
  19.872 -#endif
  19.873 -	if (boot_error) {
  19.874 -		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
  19.875 -		clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
  19.876 -		cpu_clear(cpu, cpu_present_map);
  19.877 -		cpu_clear(cpu, cpu_possible_map);
  19.878 -		x86_cpu_to_apicid[cpu] = BAD_APICID;
  19.879 -		x86_cpu_to_log_apicid[cpu] = BAD_APICID;
  19.880 -		return -EIO;
  19.881 -	}
  19.882 -
  19.883 -	return 0;
  19.884 -}
  19.885 -
  19.886 -cycles_t cacheflush_time;
  19.887 -unsigned long cache_decay_ticks;
  19.888 -
  19.889 -/*
  19.890 - * Construct cpu_sibling_map[], so that we can tell the sibling CPU
  19.891 - * on SMT systems efficiently.
  19.892 - */
  19.893 -static __cpuinit void detect_siblings(void)
  19.894 -{
  19.895 -	int cpu;
  19.896 -
  19.897 -	for (cpu = 0; cpu < NR_CPUS; cpu++) {
  19.898 -		cpus_clear(cpu_sibling_map[cpu]);
  19.899 -		cpus_clear(cpu_core_map[cpu]);
  19.900 -	}
  19.901 -
  19.902 -	for_each_online_cpu (cpu) {
  19.903 -		struct cpuinfo_x86 *c = cpu_data + cpu;
  19.904 -		int siblings = 0;
  19.905 -		int i;
  19.906 -		if (smp_num_siblings > 1) {
  19.907 -			for_each_online_cpu (i) {
  19.908 -				if (cpu_core_id[cpu] == cpu_core_id[i]) {
  19.909 -					siblings++;
  19.910 -					cpu_set(i, cpu_sibling_map[cpu]);
  19.911 -				}
  19.912 -			}
  19.913 -		} else {
  19.914 -			siblings++;
  19.915 -			cpu_set(cpu, cpu_sibling_map[cpu]);
  19.916 -		}
  19.917 -
  19.918 -		if (siblings != smp_num_siblings) {
  19.919 -			printk(KERN_WARNING
  19.920 -	       "WARNING: %d siblings found for CPU%d, should be %d\n",
  19.921 -			       siblings, cpu, smp_num_siblings);
  19.922 -			smp_num_siblings = siblings;
  19.923 -		}
  19.924 -		if (c->x86_num_cores > 1) {
  19.925 -			for_each_online_cpu(i) {
  19.926 -				if (phys_proc_id[cpu] == phys_proc_id[i])
  19.927 -					cpu_set(i, cpu_core_map[cpu]);
  19.928 -			}
  19.929 -		} else
  19.930 -			cpu_core_map[cpu] = cpu_sibling_map[cpu];
  19.931 -	}
  19.932 -}
  19.933 -
  19.934 -#ifndef CONFIG_XEN
  19.935 -/*
  19.936 - * Cleanup possible dangling ends...
  19.937 - */
  19.938 -static __cpuinit void smp_cleanup_boot(void)
  19.939 -{
  19.940 -	/*
  19.941 -	 * Paranoid:  Set warm reset code and vector here back
  19.942 -	 * to default values.
  19.943 -	 */
  19.944 -	CMOS_WRITE(0, 0xf);
  19.945 -
  19.946 -	/*
  19.947 -	 * Reset trampoline flag
  19.948 -	 */
  19.949 -	*((volatile int *) phys_to_virt(0x467)) = 0;
  19.950 -
  19.951 -#ifndef CONFIG_HOTPLUG_CPU
  19.952 -	/*
  19.953 -	 * Free pages reserved for SMP bootup.
  19.954 -	 * When you add hotplug CPU support later remove this
  19.955 -	 * Note there is more work to be done for later CPU bootup.
  19.956 -	 */
  19.957 -
  19.958 -	free_page((unsigned long) __va(PAGE_SIZE));
  19.959 -	free_page((unsigned long) __va(SMP_TRAMPOLINE_BASE));
  19.960 -#endif
  19.961 -}
  19.962 -#endif
  19.963 -
  19.964 -/*
  19.965 - * Fall back to non SMP mode after errors.
  19.966 - *
  19.967 - * RED-PEN audit/test this more. I bet there is more state messed up here.
  19.968 - */
  19.969 -static __cpuinit void disable_smp(void)
  19.970 -{
  19.971 -	cpu_present_map = cpumask_of_cpu(0);
  19.972 -	cpu_possible_map = cpumask_of_cpu(0);
  19.973 -#ifndef CONFIG_XEN
  19.974 -	if (smp_found_config)
  19.975 -		phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
  19.976 -	else
  19.977 -		phys_cpu_present_map = physid_mask_of_physid(0);
  19.978 -#endif
  19.979 -	cpu_set(0, cpu_sibling_map[0]);
  19.980 -	cpu_set(0, cpu_core_map[0]);
  19.981 -}
  19.982 -
  19.983 -/*
  19.984 - * Handle user cpus=... parameter.
  19.985 - */
  19.986 -static __cpuinit void enforce_max_cpus(unsigned max_cpus)
  19.987 -{
  19.988 -	int i, k;
  19.989 -	k = 0;
  19.990 -	for (i = 0; i < NR_CPUS; i++) {
  19.991 -		if (!cpu_possible(i))
  19.992 -			continue;
  19.993 -		if (++k > max_cpus) {
  19.994 -			cpu_clear(i, cpu_possible_map);
  19.995 -			cpu_clear(i, cpu_present_map);
  19.996 -		}
  19.997 -	}
  19.998 -}
  19.999 -
 19.1000 -/*
 19.1001 - * Various sanity checks.
 19.1002 - */
 19.1003 -static int __cpuinit smp_sanity_check(unsigned max_cpus)
 19.1004 -{
 19.1005 -#ifndef CONFIG_XEN
 19.1006 -	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
 19.1007 -		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
 19.1008 -		       hard_smp_processor_id());
 19.1009 -		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
 19.1010 -	}
 19.1011 -
 19.1012 -	/*
 19.1013 -	 * If we couldn't find an SMP configuration at boot time,
 19.1014 -	 * get out of here now!
 19.1015 -	 */
 19.1016 -	if (!smp_found_config) {
 19.1017 -		printk(KERN_NOTICE "SMP motherboard not detected.\n");
 19.1018 -		disable_smp();
 19.1019 -		if (APIC_init_uniprocessor())
 19.1020 -			printk(KERN_NOTICE "Local APIC not detected."
 19.1021 -					   " Using dummy APIC emulation.\n");
 19.1022 -		return -1;
 19.1023 -	}
 19.1024 -
 19.1025 -	/*
 19.1026 -	 * Should not be necessary because the MP table should list the boot
 19.1027 -	 * CPU too, but we do it for the sake of robustness anyway.
 19.1028 -	 */
 19.1029 -	if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
 19.1030 -		printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
 19.1031 -								 boot_cpu_id);
 19.1032 -		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
 19.1033 -	}
 19.1034 -
 19.1035 -	/*
 19.1036 -	 * If we couldn't find a local APIC, then get out of here now!
 19.1037 -	 */
 19.1038 -	if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
 19.1039 -		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
 19.1040 -			boot_cpu_id);
 19.1041 -		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
 19.1042 -		nr_ioapics = 0;
 19.1043 -		return -1;
 19.1044 -	}
 19.1045 -#endif
 19.1046 -
 19.1047 -	/*
 19.1048 -	 * If SMP should be disabled, then really disable it!
 19.1049 -	 */
 19.1050 -	if (!max_cpus) {
 19.1051 -#ifdef CONFIG_XEN
 19.1052 -		HYPERVISOR_shared_info->n_vcpu = 1;
 19.1053 -#endif
 19.1054 -		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
 19.1055 -#ifndef CONFIG_XEN
 19.1056 -		nr_ioapics = 0;
 19.1057 -#endif
 19.1058 -		return -1;
 19.1059 -	}
 19.1060 -
 19.1061 -	return 0;
 19.1062 -}
 19.1063 -
 19.1064 -/*
 19.1065 - * Prepare for SMP bootup.  The MP table or ACPI has been read
 19.1066 - * earlier.  Just do some sanity checking here and enable APIC mode.
 19.1067 - */
 19.1068 -void __cpuinit smp_prepare_cpus(unsigned int max_cpus)
 19.1069 -{
 19.1070 -	int i;
 19.1071 -
 19.1072 -#if defined(CONFIG_XEN) && !defined(CONFIG_XEN_PRIVILEGED_GUEST)
 19.1073 -#else
 19.1074 -	nmi_watchdog_default();
 19.1075 -#endif
 19.1076 -	current_cpu_data = boot_cpu_data;
 19.1077 -	current_thread_info()->cpu = 0;  /* needed? */
 19.1078 -
 19.1079 -	enforce_max_cpus(max_cpus);
 19.1080 -
 19.1081 -	/*
 19.1082 -	 * Fill in cpu_present_mask
 19.1083 -	 */
 19.1084 -	for (i = 0; i < NR_CPUS; i++) {
 19.1085 -#ifndef CONFIG_XEN
 19.1086 -		int apicid = cpu_present_to_apicid(i);
 19.1087 -		if (physid_isset(apicid, phys_cpu_present_map)) {
 19.1088 -#else
 19.1089 -		if (i < HYPERVISOR_shared_info->n_vcpu) {
 19.1090 -#endif
 19.1091 -			cpu_set(i, cpu_present_map);
 19.1092 -			/* possible map would be different if we supported real
 19.1093 -			   CPU hotplug. */
 19.1094 -			cpu_set(i, cpu_possible_map);
 19.1095 -		}
 19.1096 -	}
 19.1097 -
 19.1098 -	if (smp_sanity_check(max_cpus) < 0) {
 19.1099 -		printk(KERN_INFO "SMP disabled\n");
 19.1100 -		disable_smp();
 19.1101 -		return;
 19.1102 -	}
 19.1103 -
 19.1104 -#ifdef CONFIG_XEN
 19.1105 -	smp_intr_init();
 19.1106 -#else
 19.1107 -
 19.1108 -	/*
 19.1109 -	 * Switch from PIC to APIC mode.
 19.1110 -	 */
 19.1111 -	connect_bsp_APIC();
 19.1112 -	setup_local_APIC();
 19.1113 -
 19.1114 -	if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
 19.1115 -		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
 19.1116 -		      GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
 19.1117 -		/* Or can we switch back to PIC here? */
 19.1118 -	}
 19.1119 -#endif
 19.1120 -
 19.1121 -	/*
 19.1122 -	 * Now start the IO-APICs
 19.1123 -	 */
 19.1124 -#if defined(CONFIG_XEN) && !defined(CONFIG_XEN_PRIVILEGED_GUEST)
 19.1125 -#else
 19.1126 -	if (!skip_ioapic_setup && nr_ioapics)
 19.1127 -		setup_IO_APIC();
 19.1128 -	else
 19.1129 -		nr_ioapics = 0;
 19.1130 -#endif
 19.1131 -
 19.1132 -	/*
 19.1133 -	 * Set up local APIC timer on boot CPU.
 19.1134 -	 */
 19.1135 -
 19.1136 -#ifndef CONFIG_XEN
 19.1137 -	setup_boot_APIC_clock();
 19.1138 -#endif
 19.1139 -}
 19.1140 -
 19.1141 -/*
 19.1142 - * Early setup to make printk work.
 19.1143 - */
 19.1144 -void __init smp_prepare_boot_cpu(void)
 19.1145 -{
 19.1146 -	int me = smp_processor_id();
 19.1147 -	cpu_set(me, cpu_online_map);
 19.1148 -	cpu_set(me, cpu_callout_map);
 19.1149 -}
 19.1150 -
 19.1151 -/*
 19.1152 - * Entry point to boot a CPU.
 19.1153 - *
 19.1154 - * This is all __cpuinit, not __devinit for now because we don't support
 19.1155 - * CPU hotplug (yet).
 19.1156 - */
 19.1157 -int __cpuinit __cpu_up(unsigned int cpu)
 19.1158 -{
 19.1159 -	int err;
 19.1160 -#ifndef CONFIG_XEN
 19.1161 -	int apicid = cpu_present_to_apicid(cpu);
 19.1162 -#else
 19.1163 -	int apicid = cpu;
 19.1164 -#endif
 19.1165 -
 19.1166 -	WARN_ON(irqs_disabled());
 19.1167 -
 19.1168 -	Dprintk("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
 19.1169 -
 19.1170 -#ifndef CONFIG_XEN
 19.1171 -	if (apicid == BAD_APICID || apicid == boot_cpu_id ||
 19.1172 -	    !physid_isset(apicid, phys_cpu_present_map)) {
 19.1173 -		printk("__cpu_up: bad cpu %d\n", cpu);
 19.1174 -		return -EINVAL;
 19.1175 -	}
 19.1176 -#endif
 19.1177 -
 19.1178 -	/* Boot it! */
 19.1179 -	err = do_boot_cpu(cpu, apicid);
 19.1180 -	if (err < 0) {
 19.1181 -		Dprintk("do_boot_cpu failed %d\n", err);
 19.1182 -		return err;
 19.1183 -	}
 19.1184 -
 19.1185 -	/* Unleash the CPU! */
 19.1186 -	Dprintk("waiting for cpu %d\n", cpu);
 19.1187 -
 19.1188 -	while (!cpu_isset(cpu, cpu_online_map))
 19.1189 -		cpu_relax();
 19.1190 -	return 0;
 19.1191 -}
 19.1192 -
 19.1193 -/*
 19.1194 - * Finish the SMP boot.
 19.1195 - */
 19.1196 -void __cpuinit smp_cpus_done(unsigned int max_cpus)
 19.1197 -{
 19.1198 -#ifndef CONFIG_XEN
 19.1199 -	zap_low_mappings();
 19.1200 -	smp_cleanup_boot();
 19.1201 -
 19.1202 -#ifdef CONFIG_X86_IO_APIC
 19.1203 -	setup_ioapic_dest();
 19.1204 -#endif
 19.1205 -#endif
 19.1206 -
 19.1207 -	detect_siblings();
 19.1208 -#ifndef CONFIG_XEN
 19.1209 -	time_init_gtod();
 19.1210 -
 19.1211 -	check_nmi_watchdog();
 19.1212 -#endif
 19.1213 -}
 19.1214 -
 19.1215 -#ifdef CONFIG_XEN
 19.1216 -extern int bind_ipi_to_irq(int ipi);
 19.1217 -extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
 19.1218 -extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
 19.1219 -
 19.1220 -static DEFINE_PER_CPU(int, resched_irq);
 19.1221 -static DEFINE_PER_CPU(int, callfunc_irq);
 19.1222 -static char resched_name[NR_CPUS][15];
 19.1223 -static char callfunc_name[NR_CPUS][15];
 19.1224 -
 19.1225 -void smp_intr_init(void)
 19.1226 -{
 19.1227 -	int cpu = smp_processor_id();
 19.1228 -
 19.1229 -	per_cpu(resched_irq, cpu) =
 19.1230 -		bind_ipi_to_irq(RESCHEDULE_VECTOR);
 19.1231 -	sprintf(resched_name[cpu], "resched%d", cpu);
 19.1232 -	BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
 19.1233 -	                   SA_INTERRUPT, resched_name[cpu], NULL));
 19.1234 -
 19.1235 -	per_cpu(callfunc_irq, cpu) =
 19.1236 -		bind_ipi_to_irq(CALL_FUNCTION_VECTOR);
 19.1237 -	sprintf(callfunc_name[cpu], "callfunc%d", cpu);
 19.1238 -	BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
 19.1239 -	                   smp_call_function_interrupt,
 19.1240 -	                   SA_INTERRUPT, callfunc_name[cpu], NULL));
 19.1241 -}
 19.1242 -
 19.1243 -static void smp_intr_exit(void)
 19.1244 -{
 19.1245 -	int cpu = smp_processor_id();
 19.1246 -
 19.1247 -	free_irq(per_cpu(resched_irq, cpu), NULL);
 19.1248 -	unbind_ipi_from_irq(RESCHEDULE_VECTOR);
 19.1249 -
 19.1250 -	free_irq(per_cpu(callfunc_irq, cpu), NULL);
 19.1251 -	unbind_ipi_from_irq(CALL_FUNCTION_VECTOR);
 19.1252 -}
 19.1253 -
 19.1254 -extern void local_setup_timer_irq(void);
 19.1255 -extern void local_teardown_timer_irq(void);
 19.1256 -
 19.1257 -void smp_suspend(void)
 19.1258 -{
 19.1259 -	local_teardown_timer_irq();
 19.1260 -	smp_intr_exit();
 19.1261 -}
 19.1262 -
 19.1263 -void smp_resume(void)
 19.1264 -{
 19.1265 -	smp_intr_init();
 19.1266 -	local_setup_timer_irq();
 19.1267 -}
 19.1268 -
 19.1269 -void vcpu_prepare(int vcpu)
 19.1270 -{
 19.1271 -}
 19.1272 -
 19.1273 -#endif
    20.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c	Fri Oct 21 13:58:39 2005 -0600
    20.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/x8664_ksyms.c	Mon Oct 24 09:08:13 2005 -0600
    20.3 @@ -113,14 +113,11 @@ EXPORT_SYMBOL(clear_page);
    20.4  
    20.5  EXPORT_SYMBOL(cpu_pda);
    20.6  #ifdef CONFIG_SMP
    20.7 -EXPORT_SYMBOL(cpu_data);
    20.8 -EXPORT_SYMBOL(cpu_online_map);
    20.9  EXPORT_SYMBOL(__write_lock_failed);
   20.10  EXPORT_SYMBOL(__read_lock_failed);
   20.11  
   20.12  EXPORT_SYMBOL(synchronize_irq);
   20.13  EXPORT_SYMBOL(smp_call_function);
   20.14 -EXPORT_SYMBOL(cpu_callout_map);
   20.15  #endif
   20.16  
   20.17  #ifdef CONFIG_VT
    21.1 --- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c	Fri Oct 21 13:58:39 2005 -0600
    21.2 +++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c	Mon Oct 24 09:08:13 2005 -0600
    21.3 @@ -502,7 +502,7 @@ static int dealloc_pte_fn(
    21.4  		.extent_order = 0,
    21.5  		.domid        = DOMID_SELF
    21.6  	};
    21.7 -	set_pte(pte, __pte_ma(0));
    21.8 +	set_pte_at(&init_mm, addr, pte, __pte_ma(0));
    21.9  	phys_to_machine_mapping[__pa(addr) >> PAGE_SHIFT] =
   21.10  		INVALID_P2M_ENTRY;
   21.11  	BUG_ON(HYPERVISOR_memory_op(
   21.12 @@ -521,10 +521,9 @@ struct page *balloon_alloc_empty_page_ra
   21.13  
   21.14  	scrub_pages(vstart, 1 << order);
   21.15  
   21.16 +	balloon_lock(flags);
   21.17  	BUG_ON(generic_page_range(
   21.18  		&init_mm, vstart, PAGE_SIZE << order, dealloc_pte_fn, NULL));
   21.19 -
   21.20 -	balloon_lock(flags);
   21.21  	current_pages -= 1UL << order;
   21.22  	balloon_unlock(flags);
   21.23  
    22.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Fri Oct 21 13:58:39 2005 -0600
    22.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Mon Oct 24 09:08:13 2005 -0600
    22.3 @@ -494,10 +494,6 @@ static int __init blkif_init(void)
    22.4  	int i;
    22.5  	struct page *page;
    22.6  
    22.7 -	if (!(xen_start_info->flags & SIF_INITDOMAIN) &&
    22.8 -	    !(xen_start_info->flags & SIF_BLK_BE_DOMAIN))
    22.9 -		return 0;
   22.10 -
   22.11  	blkif_interface_init();
   22.12  
   22.13  	page = balloon_alloc_empty_page_range(MMAP_PAGES);
    23.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Fri Oct 21 13:58:39 2005 -0600
    23.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Mon Oct 24 09:08:13 2005 -0600
    23.3 @@ -725,21 +725,9 @@ static struct xenbus_driver blkfront = {
    23.4  	.suspend = blkfront_suspend,
    23.5  };
    23.6  
    23.7 -static void __init init_blk_xenbus(void)
    23.8 -{
    23.9 -	xenbus_register_driver(&blkfront);
   23.10 -}
   23.11 -
   23.12  static int __init xlblk_init(void)
   23.13  {
   23.14 -	if ((xen_start_info->flags & SIF_INITDOMAIN) ||
   23.15 -	    (xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
   23.16 -		return 0;
   23.17 -
   23.18 -	IPRINTK("Initialising virtual block device driver\n");
   23.19 -
   23.20 -	init_blk_xenbus();
   23.21 -
   23.22 +	xenbus_register_driver(&blkfront);
   23.23  	return 0;
   23.24  }
   23.25  
    24.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Fri Oct 21 13:58:39 2005 -0600
    24.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Mon Oct 24 09:08:13 2005 -0600
    24.3 @@ -861,11 +861,7 @@ static int __init blkif_init(void)
    24.4  {
    24.5  	int i, j, err;
    24.6  	struct page *page;
    24.7 -/*
    24.8 -  if ( !(xen_start_info->flags & SIF_INITDOMAIN) &&
    24.9 -  !(xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
   24.10 -  return 0;
   24.11 -*/
   24.12 +
   24.13  	blkif_interface_init();
   24.14  
   24.15  	page = balloon_alloc_empty_page_range(MMAP_PAGES);
    25.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/console.c	Fri Oct 21 13:58:39 2005 -0600
    25.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c	Mon Oct 24 09:08:13 2005 -0600
    25.3 @@ -768,7 +768,7 @@ static int __init xencons_init(void)
    25.4  #endif
    25.5  
    25.6  	if (xen_start_info->flags & SIF_INITDOMAIN) {
    25.7 -		xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
    25.8 +		xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
    25.9  		(void)request_irq(xencons_priv_irq,
   25.10  				  xencons_priv_interrupt, 0, "console", NULL);
   25.11  	} else {
    26.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Fri Oct 21 13:58:39 2005 -0600
    26.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Mon Oct 24 09:08:13 2005 -0600
    26.3 @@ -34,14 +34,18 @@ int xencons_ring_send(const char *data, 
    26.4  {
    26.5  	int sent = 0;
    26.6  	struct xencons_interface *intf = xencons_interface();
    26.7 +	XENCONS_RING_IDX cons, prod;
    26.8  
    26.9 -	while ((sent < len) &&
   26.10 -	       (intf->out_prod - intf->out_cons) < sizeof(intf->out)) {
   26.11 -		intf->out[MASK_XENCONS_IDX(intf->out_prod, intf->out)] =
   26.12 -			data[sent];
   26.13 -		intf->out_prod++;
   26.14 -		sent++;
   26.15 -	}
   26.16 +	cons = intf->out_cons;
   26.17 +	prod = intf->out_prod;
   26.18 +	mb();
   26.19 +	BUG_ON((prod - cons) > sizeof(intf->out));
   26.20 +
   26.21 +	while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
   26.22 +		intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
   26.23 +
   26.24 +	wmb();
   26.25 +	intf->out_prod = prod;
   26.26  
   26.27  	/* Use evtchn: this is called early, before irq is set up. */
   26.28  	notify_remote_via_evtchn(xen_start_info->console_evtchn);
   26.29 @@ -52,16 +56,23 @@ int xencons_ring_send(const char *data, 
   26.30  static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
   26.31  {
   26.32  	struct xencons_interface *intf = xencons_interface();
   26.33 +	XENCONS_RING_IDX cons, prod;
   26.34  
   26.35 -	while (intf->in_cons != intf->in_prod) {
   26.36 +	cons = intf->in_cons;
   26.37 +	prod = intf->in_prod;
   26.38 +	mb();
   26.39 +	BUG_ON((prod - cons) > sizeof(intf->in));
   26.40 +
   26.41 +	while (cons != prod) {
   26.42  		if (xencons_receiver != NULL)
   26.43  			xencons_receiver(
   26.44 -				intf->in + MASK_XENCONS_IDX(intf->in_cons,
   26.45 -							    intf->in),
   26.46 +				intf->in + MASK_XENCONS_IDX(cons++, intf->in),
   26.47  				1, regs);
   26.48 -		intf->in_cons++;
   26.49  	}
   26.50  
   26.51 +	wmb();
   26.52 +	intf->in_cons = cons;
   26.53 +
   26.54  	return IRQ_HANDLED;
   26.55  }
   26.56  
    27.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Fri Oct 21 13:58:39 2005 -0600
    27.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Mon Oct 24 09:08:13 2005 -0600
    27.3 @@ -58,7 +58,7 @@ netif_t *alloc_netif(domid_t domid, unsi
    27.4  	dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
    27.5  	if (dev == NULL) {
    27.6  		DPRINTK("Could not create netif: out of memory\n");
    27.7 -		return NULL;
    27.8 +		return ERR_PTR(-ENOMEM);
    27.9  	}
   27.10  
   27.11  	netif = netdev_priv(dev);
   27.12 @@ -104,7 +104,7 @@ netif_t *alloc_netif(domid_t domid, unsi
   27.13  		DPRINTK("Could not register new net device %s: err=%d\n",
   27.14  			dev->name, err);
   27.15  		free_netdev(dev);
   27.16 -		return NULL;
   27.17 +		return ERR_PTR(err);
   27.18  	}
   27.19  
   27.20  	DPRINTK("Successfully created netif\n");
    28.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Fri Oct 21 13:58:39 2005 -0600
    28.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Mon Oct 24 09:08:13 2005 -0600
    28.3 @@ -781,12 +781,6 @@ static int __init netback_init(void)
    28.4  	int i;
    28.5  	struct page *page;
    28.6  
    28.7 -	if (!(xen_start_info->flags & SIF_NET_BE_DOMAIN) &&
    28.8 -	    !(xen_start_info->flags & SIF_INITDOMAIN))
    28.9 -		return 0;
   28.10 -
   28.11 -	IPRINTK("Initialising Xen netif backend.\n");
   28.12 -
   28.13  	/* We can increase reservation by this much in net_rx_action(). */
   28.14  	balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
   28.15  
   28.16 @@ -817,7 +811,7 @@ static int __init netback_init(void)
   28.17  
   28.18  	netif_xenbus_init();
   28.19  
   28.20 -	(void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
   28.21 +	(void)request_irq(bind_virq_to_irq(VIRQ_DEBUG, 0),
   28.22  			  netif_be_dbg, SA_SHIRQ, 
   28.23  			  "net-be-dbg", &netif_be_dbg);
   28.24  
    29.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Fri Oct 21 13:58:39 2005 -0600
    29.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Mon Oct 24 09:08:13 2005 -0600
    29.3 @@ -28,10 +28,6 @@ struct backend_info
    29.4  	netif_t *netif;
    29.5  
    29.6  	long int frontend_id;
    29.7 -#if 0
    29.8 -	long int pdev;
    29.9 -	long int readonly;
   29.10 -#endif
   29.11  
   29.12  	/* watch back end for changes */
   29.13  	struct xenbus_watch backend_watch;
   29.14 @@ -155,14 +151,6 @@ static void backend_changed(struct xenbu
   29.15  			return;
   29.16  		}
   29.17  
   29.18 -#if 0
   29.19 -		err = vbd_create(be->netif, handle, be->pdev, be->readonly);
   29.20 -		if (err) {
   29.21 -			xenbus_dev_error(dev, err, "creating vbd structure");
   29.22 -			return;
   29.23 -		}
   29.24 -#endif
   29.25 -
   29.26  		kobject_hotplug(&dev->dev.kobj, KOBJ_ONLINE);
   29.27  
   29.28  		/* Pass in NULL node to skip exist test. */
   29.29 @@ -173,34 +161,27 @@ static void backend_changed(struct xenbu
   29.30  static int netback_hotplug(struct xenbus_device *xdev, char **envp,
   29.31  			   int num_envp, char *buffer, int buffer_size)
   29.32  {
   29.33 -	struct backend_info *be;
   29.34 -	netif_t *netif;
   29.35 -	char **key, *val;
   29.36 +	struct backend_info *be = xdev->data;
   29.37 +	netif_t *netif = be->netif;
   29.38  	int i = 0, length = 0;
   29.39 -	static char *env_vars[] = { "script", "domain", "mac", "bridge", "ip",
   29.40 -				    NULL };
   29.41  
   29.42 -	be = xdev->data;
   29.43 -	netif = be->netif;
   29.44 +	char *val = xenbus_read(NULL, xdev->nodename, "script", NULL);
   29.45 +	if (IS_ERR(val)) {
   29.46 +		int err = PTR_ERR(val);
   29.47 +		xenbus_dev_error(xdev, err, "reading script");
   29.48 +		return err;
   29.49 +	}
   29.50 +	else {
   29.51 +		add_hotplug_env_var(envp, num_envp, &i,
   29.52 +				    buffer, buffer_size, &length,
   29.53 +				    "script=%s", val);
   29.54 +		kfree(val);
   29.55 +	}
   29.56  
   29.57  	add_hotplug_env_var(envp, num_envp, &i,
   29.58  			    buffer, buffer_size, &length,
   29.59  			    "vif=%s", netif->dev->name);
   29.60  
   29.61 -	key = env_vars;
   29.62 -	while (*key != NULL) {
   29.63 -		val = xenbus_read(NULL, xdev->nodename, *key, NULL);
   29.64 -		if (!IS_ERR(val)) {
   29.65 -			char buf[strlen(*key) + 4];
   29.66 -			sprintf(buf, "%s=%%s", *key);
   29.67 -			add_hotplug_env_var(envp, num_envp, &i,
   29.68 -					    buffer, buffer_size, &length,
   29.69 -					    buf, val);
   29.70 -			kfree(val);
   29.71 -		}
   29.72 -		key++;
   29.73 -	}
   29.74 -
   29.75  	envp[i] = NULL;
   29.76  
   29.77  	return 0;
    30.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Fri Oct 21 13:58:39 2005 -0600
    30.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Mon Oct 24 09:08:13 2005 -0600
    30.3 @@ -25,8 +25,6 @@
    30.4  #include <asm/pgtable.h>
    30.5  #include <asm/uaccess.h>
    30.6  #include <asm/tlb.h>
    30.7 -#include <asm-xen/xen-public/xen.h>
    30.8 -#include <asm/hypervisor.h>
    30.9  #include <asm-xen/linux-public/privcmd.h>
   30.10  #include <asm/hypervisor.h>
   30.11  #include <asm-xen/xen-public/xen.h>
   30.12 @@ -219,41 +217,6 @@ static int privcmd_ioctl(struct inode *i
   30.13  	}
   30.14  	break;
   30.15  
   30.16 -	case IOCTL_PRIVCMD_INITDOMAIN_STORE: {
   30.17 -		extern int do_xenbus_probe(void*);
   30.18 -		unsigned long page;
   30.19 -
   30.20 -		if (xen_start_info->store_evtchn != 0) {
   30.21 -			ret = xen_start_info->store_mfn;
   30.22 -			break;
   30.23 -		}
   30.24 -
   30.25 -		/* Allocate page. */
   30.26 -		page = get_zeroed_page(GFP_KERNEL);
   30.27 -		if (!page) {
   30.28 -			ret = -ENOMEM;
   30.29 -			break;
   30.30 -		}
   30.31 -
   30.32 -		/* We don't refcnt properly, so set reserved on page.
   30.33 -		 * (this allocation is permanent) */
   30.34 -		SetPageReserved(virt_to_page(page));
   30.35 -
   30.36 -		/* Initial connect. Setup channel and page. */
   30.37 -		xen_start_info->store_evtchn = data;
   30.38 -		xen_start_info->store_mfn =
   30.39 -			pfn_to_mfn(virt_to_phys((void *)page) >>
   30.40 -				   PAGE_SHIFT);
   30.41 -		ret = xen_start_info->store_mfn;
   30.42 -
   30.43 -		/* 
   30.44 -		** Complete initialization of xenbus (viz. set up the 
   30.45 -		** connection to xenstored now that it has started). 
   30.46 -		*/
   30.47 -		kthread_run(do_xenbus_probe, NULL, "xenbus_probe");
   30.48 -	}
   30.49 -	break;
   30.50 -
   30.51  	default:
   30.52  		ret = -EINVAL;
   30.53  		break;
    31.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Fri Oct 21 13:58:39 2005 -0600
    31.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Mon Oct 24 09:08:13 2005 -0600
    31.3 @@ -17,6 +17,7 @@
    31.4  #include <linux/types.h>
    31.5  #include <linux/list.h>
    31.6  #include <linux/miscdevice.h>
    31.7 +#include <linux/poll.h>
    31.8  #include <asm/uaccess.h>
    31.9  #include <asm-xen/xenbus.h>
   31.10  #include <asm-xen/xen-public/grant_table.h>
   31.11 @@ -680,9 +681,14 @@ vtpm_op_release(struct inode *inode, str
   31.12  }
   31.13  
   31.14  static unsigned int
   31.15 -vtpm_op_poll(struct file *file, struct poll_table_struct *pst)
   31.16 +vtpm_op_poll(struct file *file, struct poll_table_struct *pts)
   31.17  {
   31.18 -	return 0;
   31.19 +	unsigned int flags = POLLOUT | POLLWRNORM;
   31.20 +	poll_wait(file, &dataex.wait_queue, pts);
   31.21 +	if (!list_empty(&dataex.pending_pak)) {
   31.22 +		flags |= POLLIN | POLLRDNORM;
   31.23 +	}
   31.24 +	return flags;
   31.25  }
   31.26  
   31.27  static struct file_operations vtpm_ops = {
   31.28 @@ -1070,11 +1076,6 @@ static int __init
   31.29  tpmback_init(void)
   31.30  {
   31.31  	int rc;
   31.32 -	if (!(xen_start_info->flags & SIF_TPM_BE_DOMAIN) &&
   31.33 -	    !(xen_start_info->flags & SIF_INITDOMAIN)) {
   31.34 -	    	printk(KERN_ALERT "Neither TPM-BE Domain nor INIT domain!\n");
   31.35 -		return 0;
   31.36 -	}
   31.37  
   31.38  	if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) {
   31.39  		printk(KERN_ALERT "Could not register misc device for TPM BE.\n");
    32.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Fri Oct 21 13:58:39 2005 -0600
    32.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Mon Oct 24 09:08:13 2005 -0600
    32.3 @@ -39,6 +39,7 @@
    32.4  #include <linux/interrupt.h>
    32.5  #include <linux/init.h>
    32.6  #include <linux/tpmfe.h>
    32.7 +#include <linux/err.h>
    32.8  
    32.9  #include <asm/semaphore.h>
   32.10  #include <asm/io.h>
   32.11 @@ -372,7 +373,7 @@ again:
   32.12  	info->watch.callback = watch_for_status;
   32.13  	err = register_xenbus_watch(&info->watch);
   32.14  	if (err) {
   32.15 -		message = "registering watch on backend";
   32.16 +		xenbus_dev_error(dev, err, "registering watch on backend");
   32.17  		goto destroy_tpmring;
   32.18  	}
   32.19  
   32.20 @@ -398,6 +399,8 @@ static int tpmfront_probe(struct xenbus_
   32.21  	int err;
   32.22  	struct tpmfront_info *info;
   32.23  	int handle;
   32.24 +	int len = max(XS_WATCH_PATH, XS_WATCH_TOKEN) + 1;
   32.25 +	const char *vec[len];
   32.26  
   32.27  	err = xenbus_scanf(NULL, dev->nodename,
   32.28  	                   "handle", "%i", &handle);
   32.29 @@ -427,6 +430,10 @@ static int tpmfront_probe(struct xenbus_
   32.30  		return err;
   32.31  	}
   32.32  
   32.33 +	vec[XS_WATCH_PATH]  = info->watch.node;
   32.34 +	vec[XS_WATCH_TOKEN] = NULL;
   32.35 +	watch_for_status(&info->watch, vec, len);
   32.36 +
   32.37  	return 0;
   32.38  }
   32.39  
    33.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Fri Oct 21 13:58:39 2005 -0600
    33.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Mon Oct 24 09:08:13 2005 -0600
    33.3 @@ -33,164 +33,139 @@
    33.4  #include <linux/interrupt.h>
    33.5  #include <linux/sched.h>
    33.6  #include <linux/err.h>
    33.7 +#include <asm-xen/xenbus.h>
    33.8  #include "xenbus_comms.h"
    33.9  
   33.10 -#define RINGBUF_DATASIZE ((PAGE_SIZE / 2) - sizeof(struct ringbuf_head))
   33.11 -struct ringbuf_head
   33.12 -{
   33.13 -	u32 write; /* Next place to write to */
   33.14 -	u32 read; /* Next place to read from */
   33.15 -	u8 flags;
   33.16 -	char buf[0];
   33.17 -} __attribute__((packed));
   33.18 +static int xenbus_irq      = 0;
   33.19  
   33.20 -static int xenbus_irq;
   33.21 +extern void xenbus_probe(void *); 
   33.22 +extern int xenstored_ready; 
   33.23 +static DECLARE_WORK(probe_work, xenbus_probe, NULL);
   33.24  
   33.25  DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
   33.26  
   33.27 -static inline struct ringbuf_head *outbuf(void)
   33.28 +static inline struct xenstore_domain_interface *xenstore_domain_interface(void)
   33.29  {
   33.30  	return mfn_to_virt(xen_start_info->store_mfn);
   33.31  }
   33.32  
   33.33 -static inline struct ringbuf_head *inbuf(void)
   33.34 -{
   33.35 -	return mfn_to_virt(xen_start_info->store_mfn) + PAGE_SIZE/2;
   33.36 -}
   33.37 -
   33.38  static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
   33.39  {
   33.40 +	if(unlikely(xenstored_ready == 0)) {
   33.41 +		xenstored_ready = 1; 
   33.42 +		schedule_work(&probe_work); 
   33.43 +	} 
   33.44 +
   33.45  	wake_up(&xb_waitq);
   33.46  	return IRQ_HANDLED;
   33.47  }
   33.48  
   33.49 -static int check_buffer(const struct ringbuf_head *h)
   33.50 -{
   33.51 -	return (h->write < RINGBUF_DATASIZE && h->read < RINGBUF_DATASIZE);
   33.52 -}
   33.53 -
   33.54 -/* We can't fill last byte: would look like empty buffer. */
   33.55 -static void *get_output_chunk(const struct ringbuf_head *h,
   33.56 -			      void *buf, u32 *len)
   33.57 +static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
   33.58  {
   33.59 -	u32 read_mark;
   33.60 -
   33.61 -	if (h->read == 0)
   33.62 -		read_mark = RINGBUF_DATASIZE - 1;
   33.63 -	else
   33.64 -		read_mark = h->read - 1;
   33.65 -
   33.66 -	/* Here to the end of buffer, unless they haven't read some out. */
   33.67 -	*len = RINGBUF_DATASIZE - h->write;
   33.68 -	if (read_mark >= h->write)
   33.69 -		*len = read_mark - h->write;
   33.70 -	return buf + h->write;
   33.71 +	return ((prod - cons) <= XENSTORE_RING_SIZE);
   33.72  }
   33.73  
   33.74 -static const void *get_input_chunk(const struct ringbuf_head *h,
   33.75 -				   const void *buf, u32 *len)
   33.76 +static void *get_output_chunk(XENSTORE_RING_IDX cons,
   33.77 +			      XENSTORE_RING_IDX prod,
   33.78 +			      char *buf, uint32_t *len)
   33.79  {
   33.80 -	/* Here to the end of buffer, unless they haven't written some. */
   33.81 -	*len = RINGBUF_DATASIZE - h->read;
   33.82 -	if (h->write >= h->read)
   33.83 -		*len = h->write - h->read;
   33.84 -	return buf + h->read;
   33.85 +	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
   33.86 +	if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
   33.87 +		*len = XENSTORE_RING_SIZE - (prod - cons);
   33.88 +	return buf + MASK_XENSTORE_IDX(prod);
   33.89  }
   33.90  
   33.91 -static void update_output_chunk(struct ringbuf_head *h, u32 len)
   33.92 -{
   33.93 -	h->write += len;
   33.94 -	if (h->write == RINGBUF_DATASIZE)
   33.95 -		h->write = 0;
   33.96 -}
   33.97 -
   33.98 -static void update_input_chunk(struct ringbuf_head *h, u32 len)
   33.99 +static const void *get_input_chunk(XENSTORE_RING_IDX cons,
  33.100 +				   XENSTORE_RING_IDX prod,
  33.101 +				   const char *buf, uint32_t *len)
  33.102  {
  33.103 -	h->read += len;
  33.104 -	if (h->read == RINGBUF_DATASIZE)
  33.105 -		h->read = 0;
  33.106 -}
  33.107 -
  33.108 -static int output_avail(struct ringbuf_head *out)
  33.109 -{
  33.110 -	unsigned int avail;
  33.111 -
  33.112 -	get_output_chunk(out, out->buf, &avail);
  33.113 -	return avail != 0;
  33.114 +	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
  33.115 +	if ((prod - cons) < *len)
  33.116 +		*len = prod - cons;
  33.117 +	return buf + MASK_XENSTORE_IDX(cons);
  33.118  }
  33.119  
  33.120  int xb_write(const void *data, unsigned len)
  33.121  {
  33.122 -	struct ringbuf_head h;
  33.123 -	struct ringbuf_head *out = outbuf();
  33.124 +	struct xenstore_domain_interface *intf = xenstore_domain_interface();
  33.125 +	XENSTORE_RING_IDX cons, prod;
  33.126  
  33.127 -	do {
  33.128 +	while (len != 0) {
  33.129  		void *dst;
  33.130  		unsigned int avail;
  33.131  
  33.132 -		wait_event_interruptible(xb_waitq, output_avail(out));
  33.133 +		wait_event_interruptible(xb_waitq,
  33.134 +					 (intf->req_prod - intf->req_cons) !=
  33.135 +					 XENSTORE_RING_SIZE);
  33.136  
  33.137 +		/* Read indexes, then verify. */
  33.138 +		cons = intf->req_cons;
  33.139 +		prod = intf->req_prod;
  33.140  		mb();
  33.141 -		h = *out;
  33.142 -		if (!check_buffer(&h))
  33.143 +		if (!check_indexes(cons, prod))
  33.144  			return -EIO;
  33.145  
  33.146 -		dst = get_output_chunk(&h, out->buf, &avail);
  33.147 +		dst = get_output_chunk(cons, prod, intf->req, &avail);
  33.148  		if (avail == 0)
  33.149  			continue;
  33.150  		if (avail > len)
  33.151  			avail = len;
  33.152 +
  33.153  		memcpy(dst, data, avail);
  33.154  		data += avail;
  33.155  		len -= avail;
  33.156 -		update_output_chunk(out, avail);
  33.157 +
  33.158 +		/* Other side must not see new header until data is there. */
  33.159 +		wmb();
  33.160 +		intf->req_prod += avail;
  33.161 +
  33.162 +		/* This implies mb() before other side sees interrupt. */
  33.163  		notify_remote_via_evtchn(xen_start_info->store_evtchn);
  33.164 -	} while (len != 0);
  33.165 +	}
  33.166  
  33.167  	return 0;
  33.168  }
  33.169  
  33.170 -int xs_input_avail(void)
  33.171 -{
  33.172 -	unsigned int avail;
  33.173 -	struct ringbuf_head *in = inbuf();
  33.174 -
  33.175 -	get_input_chunk(in, in->buf, &avail);
  33.176 -	return avail != 0;
  33.177 -}
  33.178 -
  33.179  int xb_read(void *data, unsigned len)
  33.180  {
  33.181 -	struct ringbuf_head h;
  33.182 -	struct ringbuf_head *in = inbuf();
  33.183 -	int was_full;
  33.184 +	struct xenstore_domain_interface *intf = xenstore_domain_interface();
  33.185 +	XENSTORE_RING_IDX cons, prod;
  33.186  
  33.187  	while (len != 0) {
  33.188  		unsigned int avail;
  33.189  		const char *src;
  33.190  
  33.191 -		wait_event_interruptible(xb_waitq, xs_input_avail());
  33.192 +		wait_event_interruptible(xb_waitq,
  33.193 +					 intf->rsp_cons != intf->rsp_prod);
  33.194  
  33.195 +		/* Read indexes, then verify. */
  33.196 +		cons = intf->rsp_cons;
  33.197 +		prod = intf->rsp_prod;
  33.198  		mb();
  33.199 -		h = *in;
  33.200 -		if (!check_buffer(&h))
  33.201 +		if (!check_indexes(cons, prod))
  33.202  			return -EIO;
  33.203  
  33.204 -		src = get_input_chunk(&h, in->buf, &avail);
  33.205 +		src = get_input_chunk(cons, prod, intf->rsp, &avail);
  33.206  		if (avail == 0)
  33.207  			continue;
  33.208  		if (avail > len)
  33.209  			avail = len;
  33.210 -		was_full = !output_avail(&h);
  33.211 +
  33.212 +		/* We must read header before we read data. */
  33.213 +		rmb();
  33.214  
  33.215  		memcpy(data, src, avail);
  33.216  		data += avail;
  33.217  		len -= avail;
  33.218 -		update_input_chunk(in, avail);
  33.219 +
  33.220 +		/* Other side must not see free space until we've copied out */
  33.221 +		mb();
  33.222 +		intf->rsp_cons += avail;
  33.223 +
  33.224  		pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
  33.225 -		/* If it was full, tell them we've taken some. */
  33.226 -		if (was_full)
  33.227 -			notify_remote_via_evtchn(xen_start_info->store_evtchn);
  33.228 +
  33.229 +		/* Implies mb(): they will see new header. */
  33.230 +		notify_remote_via_evtchn(xen_start_info->store_evtchn);
  33.231  	}
  33.232  
  33.233  	return 0;
  33.234 @@ -203,10 +178,6 @@ int xb_init_comms(void)
  33.235  
  33.236  	if (xenbus_irq)
  33.237  		unbind_evtchn_from_irqhandler(xenbus_irq, &xb_waitq);
  33.238 -	xenbus_irq = 0;
  33.239 -
  33.240 -	if (!xen_start_info->store_evtchn)
  33.241 -		return 0;
  33.242  
  33.243  	err = bind_evtchn_to_irqhandler(
  33.244  		xen_start_info->store_evtchn, wake_waiting,
    34.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Fri Oct 21 13:58:39 2005 -0600
    34.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Mon Oct 24 09:08:13 2005 -0600
    34.3 @@ -27,18 +27,29 @@
    34.4   */
    34.5  #define DEBUG
    34.6  
    34.7 -#include <asm/hypervisor.h>
    34.8 -#include <asm-xen/xenbus.h>
    34.9 -#include <asm-xen/balloon.h>
   34.10  #include <linux/kernel.h>
   34.11  #include <linux/err.h>
   34.12  #include <linux/string.h>
   34.13  #include <linux/ctype.h>
   34.14  #include <linux/fcntl.h>
   34.15 -#include <stdarg.h>
   34.16 +#include <linux/mm.h>
   34.17  #include <linux/notifier.h>
   34.18 +#include <linux/kthread.h>
   34.19 +
   34.20 +#include <asm/io.h>
   34.21 +#include <asm/page.h>
   34.22 +#include <asm/pgtable.h>
   34.23 +#include <asm/hypervisor.h>
   34.24 +#include <asm-xen/xenbus.h>
   34.25 +#include <asm-xen/xen_proc.h>
   34.26 +#include <asm-xen/balloon.h>
   34.27 +#include <asm-xen/evtchn.h>
   34.28 +#include <asm-xen/linux-public/evtchn.h>
   34.29 +
   34.30  #include "xenbus_comms.h"
   34.31  
   34.32 +extern struct semaphore xenwatch_mutex;
   34.33 +
   34.34  #define streq(a, b) (strcmp((a), (b)) == 0)
   34.35  
   34.36  static struct notifier_block *xenstore_chain;
   34.37 @@ -229,13 +240,18 @@ static int xenbus_dev_remove(struct devi
   34.38  static int xenbus_register_driver_common(struct xenbus_driver *drv,
   34.39  					 struct xen_bus_type *bus)
   34.40  {
   34.41 +	int ret;
   34.42 +
   34.43  	drv->driver.name = drv->name;
   34.44  	drv->driver.bus = &bus->bus;
   34.45  	drv->driver.owner = drv->owner;
   34.46  	drv->driver.probe = xenbus_dev_probe;
   34.47  	drv->driver.remove = xenbus_dev_remove;
   34.48  
   34.49 -	return driver_register(&drv->driver);
   34.50 +	down(&xenwatch_mutex);
   34.51 +	ret = driver_register(&drv->driver);
   34.52 +	up(&xenwatch_mutex);
   34.53 +	return ret;
   34.54  }
   34.55  
   34.56  int xenbus_register_driver(struct xenbus_driver *drv)
   34.57 @@ -627,15 +643,19 @@ void xenbus_resume(void)
   34.58  	bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, resume_dev);
   34.59  }
   34.60  
   34.61 +
   34.62 +/* A flag to determine if xenstored is 'ready' (i.e. has started) */
   34.63 +int xenstored_ready = 0; 
   34.64 +
   34.65 +
   34.66  int register_xenstore_notifier(struct notifier_block *nb)
   34.67  {
   34.68  	int ret = 0;
   34.69  
   34.70 -	if (xen_start_info->store_evtchn) {
   34.71 +        if(xenstored_ready > 0) 
   34.72  		ret = nb->notifier_call(nb, 0, NULL);
   34.73 -	} else {
   34.74 +	else 
   34.75  		notifier_chain_register(&xenstore_chain, nb);
   34.76 -	}
   34.77  
   34.78  	return ret;
   34.79  }
   34.80 @@ -647,22 +667,11 @@ void unregister_xenstore_notifier(struct
   34.81  }
   34.82  EXPORT_SYMBOL(unregister_xenstore_notifier);
   34.83  
   34.84 -/* 
   34.85 -** Called either from below xenbus_probe_init() initcall (for domUs) 
   34.86 -** or, for dom0, from a thread created in privcmd/privcmd.c (after 
   34.87 -** the user-space tools have invoked initDomainStore()) 
   34.88 -*/
   34.89 -int do_xenbus_probe(void *unused)
   34.90 +
   34.91 +
   34.92 +void xenbus_probe(void *unused)
   34.93  {
   34.94 -	int err = 0;
   34.95 -
   34.96 -	/* Initialize the interface to xenstore. */
   34.97 -	err = xs_init();
   34.98 -	if (err) {
   34.99 -		printk("XENBUS: Error initializing xenstore comms:"
  34.100 -		       " %i\n", err);
  34.101 -		return err;
  34.102 -	}
  34.103 +	BUG_ON((xenstored_ready <= 0)); 
  34.104  
  34.105  	/* Enumerate devices in xenstore. */
  34.106  	xenbus_probe_devices(&xenbus_frontend);
  34.107 @@ -675,27 +684,101 @@ int do_xenbus_probe(void *unused)
  34.108  	/* Notify others that xenstore is up */
  34.109  	notifier_call_chain(&xenstore_chain, 0, 0);
  34.110  
  34.111 -	return 0;
  34.112 +	return;
  34.113  }
  34.114  
  34.115 +
  34.116 +static struct proc_dir_entry *xsd_mfn_intf;
  34.117 +static struct proc_dir_entry *xsd_port_intf;
  34.118 +
  34.119 +
  34.120 +static int xsd_mfn_read(char *page, char **start, off_t off,
  34.121 +                        int count, int *eof, void *data)
  34.122 +{
  34.123 +	int len; 
  34.124 +	len  = sprintf(page, "%ld", xen_start_info->store_mfn); 
  34.125 +	*eof = 1; 
  34.126 +	return len; 
  34.127 +}
  34.128 +
  34.129 +static int xsd_port_read(char *page, char **start, off_t off,
  34.130 +			 int count, int *eof, void *data)
  34.131 +{
  34.132 +	int len; 
  34.133 +
  34.134 +	len  = sprintf(page, "%d", xen_start_info->store_evtchn); 
  34.135 +	*eof = 1; 
  34.136 +	return len; 
  34.137 +}
  34.138 +
  34.139 +
  34.140  static int __init xenbus_probe_init(void)
  34.141  {
  34.142 -	if (xen_init() < 0)
  34.143 +	int err = 0;
  34.144 +	/* 
  34.145 +	** Domain0 doesn't have a store_evtchn or store_mfn yet. 
  34.146 +	*/
  34.147 +	int dom0 = (xen_start_info->store_evtchn == 0);
  34.148 +
  34.149 +	printk("xenbus_probe_init\n");
  34.150 +
  34.151 +	if (xen_init() < 0) {
  34.152 +		printk("xen_init failed\n");
  34.153  		return -ENODEV;
  34.154 +	}
  34.155  
  34.156 +	/* Register ourselves with the kernel bus & device subsystems */
  34.157  	bus_register(&xenbus_frontend.bus);
  34.158  	bus_register(&xenbus_backend.bus);
  34.159  	device_register(&xenbus_frontend.dev);
  34.160  	device_register(&xenbus_backend.dev);
  34.161  
  34.162 -	/* 
  34.163 -	** Domain0 doesn't have a store_evtchn yet - this will
  34.164 -	** be set up later by xend invoking initDomainStore() 
  34.165 -	*/
  34.166 -	if (!xen_start_info->store_evtchn)
  34.167 -		return 0;
  34.168 +	if (dom0) {
  34.169 +
  34.170 +		unsigned long page;
  34.171 +		evtchn_op_t op = { 0 };
  34.172 +
  34.173 +
  34.174 +		/* Allocate page. */
  34.175 +		page = get_zeroed_page(GFP_KERNEL);
  34.176 +		if (!page) 
  34.177 +			return -ENOMEM; 
  34.178 +
  34.179 +		/* We don't refcnt properly, so set reserved on page.
  34.180 +		 * (this allocation is permanent) */
  34.181 +		SetPageReserved(virt_to_page(page));
  34.182  
  34.183 -	do_xenbus_probe(NULL);
  34.184 +		xen_start_info->store_mfn =
  34.185 +			pfn_to_mfn(virt_to_phys((void *)page) >>
  34.186 +				   PAGE_SHIFT);
  34.187 +		
  34.188 +		/* Next allocate a local port which xenstored can bind to */
  34.189 +		op.cmd = EVTCHNOP_alloc_unbound;
  34.190 +		op.u.alloc_unbound.dom        = DOMID_SELF;
  34.191 +		op.u.alloc_unbound.remote_dom = 0; 
  34.192 +
  34.193 +		BUG_ON(HYPERVISOR_event_channel_op(&op)); 
  34.194 +		xen_start_info->store_evtchn = op.u.alloc_unbound.port;
  34.195 +
  34.196 +		/* And finally publish the above info in /proc/xen */
  34.197 +		if((xsd_mfn_intf = create_xen_proc_entry("xsd_mfn", 0400)))
  34.198 +			xsd_mfn_intf->read_proc = xsd_mfn_read; 
  34.199 +		if((xsd_port_intf = create_xen_proc_entry("xsd_port", 0400)))
  34.200 +			xsd_port_intf->read_proc = xsd_port_read;
  34.201 +	}
  34.202 +
  34.203 +	/* Initialize the interface to xenstore. */
  34.204 +	err = xs_init(); 
  34.205 +	if (err) {
  34.206 +		printk("XENBUS: Error initializing xenstore comms: %i\n", err);
  34.207 +		return err; 
  34.208 +	}
  34.209 +
  34.210 +	if (!dom0) {
  34.211 +		xenstored_ready = 1;
  34.212 +		xenbus_probe(NULL);
  34.213 +	}
  34.214 +
  34.215  	return 0;
  34.216  }
  34.217  
    35.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Fri Oct 21 13:58:39 2005 -0600
    35.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c	Mon Oct 24 09:08:13 2005 -0600
    35.3 @@ -92,7 +92,7 @@ static DEFINE_SPINLOCK(watch_events_lock
    35.4   * carrying out work.
    35.5   */
    35.6  static pid_t xenwatch_pid;
    35.7 -static DECLARE_MUTEX(xenwatch_mutex);
    35.8 +/* static */ DECLARE_MUTEX(xenwatch_mutex);
    35.9  static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
   35.10  
   35.11  static int get_error(const char *errorstring)
   35.12 @@ -516,17 +516,38 @@ int xenbus_printf(struct xenbus_transact
   35.13  }
   35.14  EXPORT_SYMBOL(xenbus_printf);
   35.15  
   35.16 +/**
   35.17 + * Return the path to the error node for the given device, or NULL on failure.
   35.18 + * If the value returned is non-NULL, then it is the caller's to kfree.
   35.19 + */
   35.20 +static char *error_path(struct xenbus_device *dev)
   35.21 +{
   35.22 +	char *path_buffer = kmalloc(strlen("error/") + strlen(dev->nodename) +
   35.23 +				    1, GFP_KERNEL);
   35.24 +	if (path_buffer == NULL) {
   35.25 +		return NULL;
   35.26 +	}
   35.27 +
   35.28 +	strcpy(path_buffer, "error/");
   35.29 +	strcpy(path_buffer + strlen("error/"), dev->nodename);
   35.30 +
   35.31 +	return path_buffer;
   35.32 +}
   35.33 +
   35.34  /* Report a (negative) errno into the store, with explanation. */
   35.35  void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
   35.36  {
   35.37  	va_list ap;
   35.38  	int ret;
   35.39  	unsigned int len;
   35.40 -	char *printf_buffer;
   35.41 +	char *printf_buffer = NULL, *path_buffer = NULL;
   35.42  
   35.43  	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
   35.44 -	if (printf_buffer == NULL)
   35.45 +	if (printf_buffer == NULL) {
   35.46 +		printk("xenbus: failed to write error node for %s (%d): %d\n",
   35.47 +		       dev->nodename, err, errno);
   35.48  		goto fail;
   35.49 +	}
   35.50  
   35.51  	len = sprintf(printf_buffer, "%i ", -err);
   35.52  	va_start(ap, fmt);
   35.53 @@ -535,15 +556,26 @@ void xenbus_dev_error(struct xenbus_devi
   35.54  
   35.55  	BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
   35.56  	dev->has_error = 1;
   35.57 -	if (xenbus_write(NULL, dev->nodename, "error", printf_buffer) != 0)
   35.58 +
   35.59 +	path_buffer = error_path(dev);
   35.60 +
   35.61 +	if (path_buffer == NULL) {
   35.62 +		printk("xenbus: failed to write error node for %s (%s): %d\n",
   35.63 +		       dev->nodename, printf_buffer, errno);
   35.64  		goto fail;
   35.65 +	}
   35.66  
   35.67 -	kfree(printf_buffer);
   35.68 -	return;
   35.69 +	if (xenbus_write(NULL, path_buffer, "error", printf_buffer) != 0) {
   35.70 +		printk("xenbus: failed to write error node for %s (%s)\n",
   35.71 +		       dev->nodename, printf_buffer);
   35.72 +		goto fail;
   35.73 +	}
   35.74  
   35.75 - fail:
   35.76 -	printk("xenbus: failed to write error node for %s (%s)\n",
   35.77 -	       dev->nodename, printf_buffer);
   35.78 +fail:
   35.79 +	if (printf_buffer)
   35.80 +		kfree(printf_buffer);
   35.81 +	if (path_buffer)
   35.82 +		kfree(path_buffer);
   35.83  }
   35.84  EXPORT_SYMBOL(xenbus_dev_error);
   35.85  
   35.86 @@ -551,11 +583,21 @@ EXPORT_SYMBOL(xenbus_dev_error);
   35.87  void xenbus_dev_ok(struct xenbus_device *dev)
   35.88  {
   35.89  	if (dev->has_error) {
   35.90 -		if (xenbus_rm(NULL, dev->nodename, "error") != 0)
   35.91 +		char *path_buffer = error_path(dev);
   35.92 +
   35.93 +		if (path_buffer == NULL) {
   35.94 +			printk("xenbus: failed to clear error node for %s: "
   35.95 +			       "%d\n", dev->nodename, errno);
   35.96 +			return;
   35.97 +		}
   35.98 +
   35.99 +		if (xenbus_rm(NULL, path_buffer, "error") != 0)
  35.100  			printk("xenbus: failed to clear error node for %s\n",
  35.101  			       dev->nodename);
  35.102  		else
  35.103  			dev->has_error = 0;
  35.104 +
  35.105 +		kfree(path_buffer);
  35.106  	}
  35.107  }
  35.108  EXPORT_SYMBOL(xenbus_dev_ok);
    36.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypervisor.h	Fri Oct 21 13:58:39 2005 -0600
    36.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypervisor.h	Mon Oct 24 09:08:13 2005 -0600
    36.3 @@ -50,6 +50,8 @@
    36.4  # endif
    36.5  #endif
    36.6  
    36.7 +extern shared_info_t *HYPERVISOR_shared_info;
    36.8 +
    36.9  /* arch/xen/i386/kernel/setup.c */
   36.10  extern start_info_t *xen_start_info;
   36.11  
    37.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/smpboot_hooks.h	Fri Oct 21 13:58:39 2005 -0600
    37.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    37.3 @@ -1,55 +0,0 @@
    37.4 -/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
    37.5 - * which needs to alter them. */
    37.6 -
    37.7 -static inline void smpboot_clear_io_apic_irqs(void)
    37.8 -{
    37.9 -#ifdef CONFIG_X86_IO_APIC
   37.10 -	io_apic_irqs = 0;
   37.11 -#endif
   37.12 -}
   37.13 -
   37.14 -static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
   37.15 -{
   37.16 -#if 1
   37.17 -	printk("smpboot_setup_warm_reset_vector\n");
   37.18 -#else
   37.19 -	CMOS_WRITE(0xa, 0xf);
   37.20 -	local_flush_tlb();
   37.21 -	Dprintk("1.\n");
   37.22 -	*((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
   37.23 -	Dprintk("2.\n");
   37.24 -	*((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
   37.25 -	Dprintk("3.\n");
   37.26 -#endif
   37.27 -}
   37.28 -
   37.29 -static inline void smpboot_restore_warm_reset_vector(void)
   37.30 -{
   37.31 -	/*
   37.32 -	 * Install writable page 0 entry to set BIOS data area.
   37.33 -	 */
   37.34 -	local_flush_tlb();
   37.35 -
   37.36 -	/*
   37.37 -	 * Paranoid:  Set warm reset code and vector here back
   37.38 -	 * to default values.
   37.39 -	 */
   37.40 -	CMOS_WRITE(0, 0xf);
   37.41 -
   37.42 -	*((volatile long *) phys_to_virt(0x467)) = 0;
   37.43 -}
   37.44 -
   37.45 -static inline void smpboot_setup_io_apic(void)
   37.46 -{
   37.47 -#ifdef CONFIG_X86_IO_APIC
   37.48 -	/*
   37.49 -	 * Here we can be sure that there is an IO-APIC in the system. Let's
   37.50 -	 * go and set it up:
   37.51 -	 */
   37.52 -	if (!skip_ioapic_setup && nr_ioapics)
   37.53 -		setup_IO_APIC();
   37.54 -#endif
   37.55 -}
   37.56 -
   37.57 -
   37.58 -#define	smp_found_config	(HYPERVISOR_shared_info->n_vcpu > 1)
    38.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Fri Oct 21 13:58:39 2005 -0600
    38.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Mon Oct 24 09:08:13 2005 -0600
    38.3 @@ -455,6 +455,11 @@ int direct_remap_pfn_range(struct vm_are
    38.4                              unsigned long size, 
    38.5                              pgprot_t prot,
    38.6                              domid_t  domid);
    38.7 +int direct_kernel_remap_pfn_range(unsigned long address, 
    38.8 +				  unsigned long mfn,
    38.9 +				  unsigned long size, 
   38.10 +				  pgprot_t prot,
   38.11 +				  domid_t  domid);
   38.12  int create_lookup_pte_addr(struct mm_struct *mm,
   38.13                             unsigned long address,
   38.14                             unsigned long *ptep);
    39.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/setup.h	Fri Oct 21 13:58:39 2005 -0600
    39.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/setup.h	Mon Oct 24 09:08:13 2005 -0600
    39.3 @@ -8,7 +8,7 @@
    39.4  
    39.5  #define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
    39.6  #define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
    39.7 -#define PFN_PHYS(x)	((x) << PAGE_SHIFT)
    39.8 +#define PFN_PHYS(x)	((unsigned long long)(x) << PAGE_SHIFT)
    39.9  
   39.10  /*
   39.11   * Reserved space for vmalloc and iomap - defined in asm/page.h
    40.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    40.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/smp.h	Mon Oct 24 09:08:13 2005 -0600
    40.3 @@ -0,0 +1,93 @@
    40.4 +#ifndef __ASM_SMP_H
    40.5 +#define __ASM_SMP_H
    40.6 +
    40.7 +/*
    40.8 + * We need the APIC definitions automatically as part of 'smp.h'
    40.9 + */
   40.10 +#ifndef __ASSEMBLY__
   40.11 +#include <linux/config.h>
   40.12 +#include <linux/kernel.h>
   40.13 +#include <linux/threads.h>
   40.14 +#include <linux/cpumask.h>
   40.15 +#endif
   40.16 +
   40.17 +#ifdef CONFIG_X86_LOCAL_APIC
   40.18 +#ifndef __ASSEMBLY__
   40.19 +#include <asm/fixmap.h>
   40.20 +#include <asm/bitops.h>
   40.21 +#include <asm/mpspec.h>
   40.22 +#ifdef CONFIG_X86_IO_APIC
   40.23 +#include <asm/io_apic.h>
   40.24 +#endif
   40.25 +#include <asm/apic.h>
   40.26 +#endif
   40.27 +#endif
   40.28 +
   40.29 +#define BAD_APICID 0xFFu
   40.30 +#ifdef CONFIG_SMP
   40.31 +#ifndef __ASSEMBLY__
   40.32 +
   40.33 +/*
   40.34 + * Private routines/data
   40.35 + */
   40.36 + 
   40.37 +extern void smp_alloc_memory(void);
   40.38 +extern int pic_mode;
   40.39 +extern int smp_num_siblings;
   40.40 +extern cpumask_t cpu_sibling_map[];
   40.41 +extern cpumask_t cpu_core_map[];
   40.42 +
   40.43 +extern void smp_flush_tlb(void);
   40.44 +extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
   40.45 +extern void smp_invalidate_rcv(void);		/* Process an NMI */
   40.46 +extern void (*mtrr_hook) (void);
   40.47 +extern void zap_low_mappings (void);
   40.48 +
   40.49 +#define MAX_APICID 256
   40.50 +extern u8 x86_cpu_to_apicid[];
   40.51 +
   40.52 +/*
   40.53 + * This function is needed by all SMP systems. It must _always_ be valid
   40.54 + * from the initial startup. We map APIC_BASE very early in page_setup(),
   40.55 + * so this is correct in the x86 case.
   40.56 + */
   40.57 +#define __smp_processor_id() (current_thread_info()->cpu)
   40.58 +
   40.59 +extern cpumask_t cpu_possible_map;
   40.60 +#define cpu_callin_map cpu_possible_map
   40.61 +
   40.62 +/* We don't mark CPUs online until __cpu_up(), so we need another measure */
   40.63 +static inline int num_booting_cpus(void)
   40.64 +{
   40.65 +	return cpus_weight(cpu_possible_map);
   40.66 +}
   40.67 +
   40.68 +#ifdef CONFIG_X86_LOCAL_APIC
   40.69 +
   40.70 +#ifdef APIC_DEFINITION
   40.71 +extern int hard_smp_processor_id(void);
   40.72 +#else
   40.73 +#include <mach_apicdef.h>
   40.74 +static inline int hard_smp_processor_id(void)
   40.75 +{
   40.76 +	/* we don't want to mark this access volatile - bad code generation */
   40.77 +	return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
   40.78 +}
   40.79 +#endif
   40.80 +
   40.81 +static __inline int logical_smp_processor_id(void)
   40.82 +{
   40.83 +	/* we don't want to mark this access volatile - bad code generation */
   40.84 +	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
   40.85 +}
   40.86 +
   40.87 +#endif
   40.88 +
   40.89 +extern int __cpu_disable(void);
   40.90 +extern void __cpu_die(unsigned int cpu);
   40.91 +#endif /* !__ASSEMBLY__ */
   40.92 +
   40.93 +#define NO_PROC_ID		0xFF		/* No processor magic marker */
   40.94 +
   40.95 +#endif
   40.96 +#endif
    41.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h	Fri Oct 21 13:58:39 2005 -0600
    41.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h	Mon Oct 24 09:08:13 2005 -0600
    41.3 @@ -497,22 +497,11 @@ unsigned long __set_mb_temp;            
    41.4   * includes these barriers, for example.
    41.5   */
    41.6  
    41.7 -/*
    41.8 - * Don't use smp_processor_id() in preemptible code: debug builds will barf.
    41.9 - * It's okay in these cases as we only read the upcall mask in preemptible
   41.10 - * regions, which is always safe.
   41.11 - */
   41.12 -#ifdef CONFIG_SMP
   41.13 -#define __this_cpu()	__smp_processor_id()
   41.14 -#else
   41.15 -#define __this_cpu()	0
   41.16 -#endif
   41.17 -
   41.18  #define __cli()								\
   41.19  do {									\
   41.20  	vcpu_info_t *_vcpu;						\
   41.21  	preempt_disable();						\
   41.22 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
   41.23 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   41.24  	_vcpu->evtchn_upcall_mask = 1;					\
   41.25  	preempt_enable_no_resched();					\
   41.26  	barrier();							\
   41.27 @@ -523,7 +512,7 @@ do {									\
   41.28  	vcpu_info_t *_vcpu;						\
   41.29  	barrier();							\
   41.30  	preempt_disable();						\
   41.31 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
   41.32 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   41.33  	_vcpu->evtchn_upcall_mask = 0;					\
   41.34  	barrier(); /* unmask then check (avoid races) */		\
   41.35  	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
   41.36 @@ -534,8 +523,10 @@ do {									\
   41.37  #define __save_flags(x)							\
   41.38  do {									\
   41.39  	vcpu_info_t *_vcpu;						\
   41.40 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
   41.41 +	preempt_disable();						\
   41.42 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   41.43  	(x) = _vcpu->evtchn_upcall_mask;				\
   41.44 +	preempt_enable();						\
   41.45  } while (0)
   41.46  
   41.47  #define __restore_flags(x)						\
   41.48 @@ -543,7 +534,7 @@ do {									\
   41.49  	vcpu_info_t *_vcpu;						\
   41.50  	barrier();							\
   41.51  	preempt_disable();						\
   41.52 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
   41.53 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   41.54  	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
   41.55  		barrier(); /* unmask then check (avoid races) */	\
   41.56  		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
   41.57 @@ -559,7 +550,7 @@ do {									\
   41.58  do {									\
   41.59  	vcpu_info_t *_vcpu;						\
   41.60  	preempt_disable();						\
   41.61 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
   41.62 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   41.63  	(x) = _vcpu->evtchn_upcall_mask;				\
   41.64  	_vcpu->evtchn_upcall_mask = 1;					\
   41.65  	preempt_enable_no_resched();					\
   41.66 @@ -572,8 +563,15 @@ do {									\
   41.67  #define local_irq_disable()	__cli()
   41.68  #define local_irq_enable()	__sti()
   41.69  
   41.70 +/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
   41.71  #define irqs_disabled()							\
   41.72 -	HYPERVISOR_shared_info->vcpu_data[__this_cpu()].evtchn_upcall_mask
   41.73 +({	int ___x;							\
   41.74 +	vcpu_info_t *_vcpu;						\
   41.75 +	preempt_disable();						\
   41.76 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   41.77 +	___x = (_vcpu->evtchn_upcall_mask != 0);			\
   41.78 +	preempt_enable_no_resched();					\
   41.79 +	___x; })
   41.80  
   41.81  /*
   41.82   * disable hlt during certain critical i/o operations
    42.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypervisor.h	Fri Oct 21 13:58:39 2005 -0600
    42.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypervisor.h	Mon Oct 24 09:08:13 2005 -0600
    42.3 @@ -39,6 +39,7 @@
    42.4  #include <asm/ptrace.h>
    42.5  #include <asm/page.h>
    42.6  
    42.7 +extern shared_info_t *HYPERVISOR_shared_info;
    42.8  extern start_info_t *xen_start_info;
    42.9  
   42.10  void force_evtchn_callback(void);
    43.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/smpboot_hooks.h	Fri Oct 21 13:58:39 2005 -0600
    43.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    43.3 @@ -1,55 +0,0 @@
    43.4 -/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
    43.5 - * which needs to alter them. */
    43.6 -
    43.7 -static inline void smpboot_clear_io_apic_irqs(void)
    43.8 -{
    43.9 -#ifdef CONFIG_X86_IO_APIC
   43.10 -	io_apic_irqs = 0;
   43.11 -#endif
   43.12 -}
   43.13 -
   43.14 -static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
   43.15 -{
   43.16 -#if 1
   43.17 -	printk("smpboot_setup_warm_reset_vector\n");
   43.18 -#else
   43.19 -	CMOS_WRITE(0xa, 0xf);
   43.20 -	local_flush_tlb();
   43.21 -	Dprintk("1.\n");
   43.22 -	*((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
   43.23 -	Dprintk("2.\n");
   43.24 -	*((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
   43.25 -	Dprintk("3.\n");
   43.26 -#endif
   43.27 -}
   43.28 -
   43.29 -static inline void smpboot_restore_warm_reset_vector(void)
   43.30 -{
   43.31 -	/*
   43.32 -	 * Install writable page 0 entry to set BIOS data area.
   43.33 -	 */
   43.34 -	local_flush_tlb();
   43.35 -
   43.36 -	/*
   43.37 -	 * Paranoid:  Set warm reset code and vector here back
   43.38 -	 * to default values.
   43.39 -	 */
   43.40 -	CMOS_WRITE(0, 0xf);
   43.41 -
   43.42 -	*((volatile long *) phys_to_virt(0x467)) = 0;
   43.43 -}
   43.44 -
   43.45 -static inline void smpboot_setup_io_apic(void)
   43.46 -{
   43.47 -#ifdef CONFIG_X86_IO_APIC
   43.48 -	/*
   43.49 -	 * Here we can be sure that there is an IO-APIC in the system. Let's
   43.50 -	 * go and set it up:
   43.51 -	 */
   43.52 -	if (!skip_ioapic_setup && nr_ioapics)
   43.53 -		setup_IO_APIC();
   43.54 -#endif
   43.55 -}
   43.56 -
   43.57 -
   43.58 -#define	smp_found_config	(HYPERVISOR_shared_info->n_vcpu > 1)
    44.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h	Fri Oct 21 13:58:39 2005 -0600
    44.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h	Mon Oct 24 09:08:13 2005 -0600
    44.3 @@ -533,6 +533,12 @@ int direct_remap_pfn_range(struct vm_are
    44.4                              pgprot_t prot,
    44.5                              domid_t  domid);
    44.6  
    44.7 +int direct_kernel_remap_pfn_range(unsigned long address, 
    44.8 +				  unsigned long mfn,
    44.9 +				  unsigned long size, 
   44.10 +				  pgprot_t prot,
   44.11 +				  domid_t  domid);
   44.12 +
   44.13  int create_lookup_pte_addr(struct mm_struct *mm,
   44.14                             unsigned long address,
   44.15                             unsigned long *ptep);
    45.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/smp.h	Fri Oct 21 13:58:39 2005 -0600
    45.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/smp.h	Mon Oct 24 09:08:13 2005 -0600
    45.3 @@ -34,7 +34,6 @@ struct pt_regs;
    45.4  extern cpumask_t cpu_present_mask;
    45.5  extern cpumask_t cpu_possible_map;
    45.6  extern cpumask_t cpu_online_map;
    45.7 -extern cpumask_t cpu_callout_map;
    45.8  
    45.9  /*
   45.10   * Private routines/data
   45.11 @@ -52,8 +51,8 @@ extern void zap_low_mappings(void);
   45.12  void smp_stop_cpu(void);
   45.13  extern cpumask_t cpu_sibling_map[NR_CPUS];
   45.14  extern cpumask_t cpu_core_map[NR_CPUS];
   45.15 -extern u8 phys_proc_id[NR_CPUS];
   45.16 -extern u8 cpu_core_id[NR_CPUS];
   45.17 +extern int phys_proc_id[NR_CPUS];
   45.18 +extern int cpu_core_id[NR_CPUS];
   45.19  
   45.20  #define SMP_TRAMPOLINE_BASE 0x6000
   45.21  
   45.22 @@ -65,7 +64,7 @@ extern u8 cpu_core_id[NR_CPUS];
   45.23  
   45.24  static inline int num_booting_cpus(void)
   45.25  {
   45.26 -	return cpus_weight(cpu_callout_map);
   45.27 +	return cpus_weight(cpu_possible_map);
   45.28  }
   45.29  
   45.30  #define __smp_processor_id() read_pda(cpunumber)
    46.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h	Fri Oct 21 13:58:39 2005 -0600
    46.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h	Mon Oct 24 09:08:13 2005 -0600
    46.3 @@ -321,22 +321,11 @@ static inline unsigned long __cmpxchg(vo
    46.4   * includes these barriers, for example.
    46.5   */
    46.6  
    46.7 -/*
    46.8 - * Don't use smp_processor_id() in preemptible code: debug builds will barf.
    46.9 - * It's okay in these cases as we only read the upcall mask in preemptible
   46.10 - * regions, which is always safe.
   46.11 - */
   46.12 -#ifdef CONFIG_SMP
   46.13 -#define __this_cpu()	__smp_processor_id()
   46.14 -#else
   46.15 -#define __this_cpu()	0
   46.16 -#endif
   46.17 -
   46.18  #define __cli()								\
   46.19  do {									\
   46.20  	vcpu_info_t *_vcpu;						\
   46.21  	preempt_disable();						\
   46.22 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
   46.23 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   46.24  	_vcpu->evtchn_upcall_mask = 1;					\
   46.25  	preempt_enable_no_resched();					\
   46.26  	barrier();							\
   46.27 @@ -347,7 +336,7 @@ do {									\
   46.28  	vcpu_info_t *_vcpu;						\
   46.29  	barrier();							\
   46.30  	preempt_disable();						\
   46.31 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
   46.32 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   46.33  	_vcpu->evtchn_upcall_mask = 0;					\
   46.34  	barrier(); /* unmask then check (avoid races) */		\
   46.35  	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
   46.36 @@ -358,8 +347,10 @@ do {									\
   46.37  #define __save_flags(x)							\
   46.38  do {									\
   46.39  	vcpu_info_t *_vcpu;						\
   46.40 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
   46.41 +	preempt_disable();						\
   46.42 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   46.43  	(x) = _vcpu->evtchn_upcall_mask;				\
   46.44 +	preempt_enable();						\
   46.45  } while (0)
   46.46  
   46.47  #define __restore_flags(x)						\
   46.48 @@ -367,7 +358,7 @@ do {									\
   46.49  	vcpu_info_t *_vcpu;						\
   46.50  	barrier();							\
   46.51  	preempt_disable();						\
   46.52 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
   46.53 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   46.54  	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
   46.55  		barrier(); /* unmask then check (avoid races) */	\
   46.56  		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
   46.57 @@ -383,7 +374,7 @@ do {									\
   46.58  do {									\
   46.59  	vcpu_info_t *_vcpu;						\
   46.60  	preempt_disable();						\
   46.61 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
   46.62 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   46.63  	(x) = _vcpu->evtchn_upcall_mask;				\
   46.64  	_vcpu->evtchn_upcall_mask = 1;					\
   46.65  	preempt_enable_no_resched();					\
   46.66 @@ -398,8 +389,15 @@ void cpu_idle_wait(void);
   46.67  #define local_irq_disable()	__cli()
   46.68  #define local_irq_enable()	__sti()
   46.69  
   46.70 +/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
   46.71  #define irqs_disabled()							\
   46.72 -	HYPERVISOR_shared_info->vcpu_data[__this_cpu()].evtchn_upcall_mask
   46.73 +({	int ___x;							\
   46.74 +	vcpu_info_t *_vcpu;						\
   46.75 +	preempt_disable();						\
   46.76 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
   46.77 +	___x = (_vcpu->evtchn_upcall_mask != 0);			\
   46.78 +	preempt_enable_no_resched();					\
   46.79 +	___x; })
   46.80  
   46.81  /*
   46.82   * disable hlt during certain critical i/o operations
    47.1 --- a/linux-2.6-xen-sparse/include/asm-xen/evtchn.h	Fri Oct 21 13:58:39 2005 -0600
    47.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h	Mon Oct 24 09:08:13 2005 -0600
    47.3 @@ -44,12 +44,12 @@
    47.4   */
    47.5  
    47.6  /* Dynamically bind a VIRQ source to Linux IRQ space. */
    47.7 -extern int  bind_virq_to_irq(int virq);
    47.8 -extern void unbind_virq_from_irq(int virq);
    47.9 +extern int  bind_virq_to_irq(int virq, int cpu);
   47.10 +extern void unbind_virq_from_irq(int virq, int cpu);
   47.11  
   47.12  /* Dynamically bind an IPI source to Linux IRQ space. */
   47.13 -extern int  bind_ipi_to_irq(int ipi);
   47.14 -extern void unbind_ipi_from_irq(int ipi);
   47.15 +extern int  bind_ipi_to_irq(int ipi, int cpu);
   47.16 +extern void unbind_ipi_from_irq(int ipi, int cpu);
   47.17  
   47.18  /*
   47.19   * Dynamically bind an event-channel port to an IRQ-like callback handler.
   47.20 @@ -99,8 +99,9 @@ static inline void unmask_evtchn(int por
   47.21  	 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
   47.22  	 * masked.
   47.23  	 */
   47.24 -	if (synch_test_bit         (port,    &s->evtchn_pending[0]) && 
   47.25 -	    !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel)) {
   47.26 +	if (synch_test_bit(port, &s->evtchn_pending[0]) && 
   47.27 +	    !synch_test_and_set_bit(port / BITS_PER_LONG,
   47.28 +				    &vcpu_info->evtchn_pending_sel)) {
   47.29  		vcpu_info->evtchn_upcall_pending = 1;
   47.30  		if (!vcpu_info->evtchn_upcall_mask)
   47.31  			force_evtchn_callback();
    48.1 --- a/linux-2.6-xen-sparse/include/asm-xen/linux-public/privcmd.h	Fri Oct 21 13:58:39 2005 -0600
    48.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/linux-public/privcmd.h	Mon Oct 24 09:08:13 2005 -0600
    48.3 @@ -76,8 +76,6 @@ typedef struct privcmd_blkmsg
    48.4  	_IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
    48.5  #define IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN			\
    48.6  	_IOC(_IOC_READ, 'P', 4, sizeof(unsigned long))
    48.7 -#define IOCTL_PRIVCMD_INITDOMAIN_STORE				\
    48.8 -	_IOC(_IOC_READ, 'P', 5, 0)
    48.9  
   48.10  #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
   48.11  
    49.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    49.2 +++ b/patches/linux-2.6.12/2.6.12.6.patch	Mon Oct 24 09:08:13 2005 -0600
    49.3 @@ -0,0 +1,1738 @@
    49.4 +diff --git a/Makefile b/Makefile
    49.5 +--- a/Makefile
    49.6 ++++ b/Makefile
    49.7 +@@ -1,7 +1,7 @@
    49.8 + VERSION = 2
    49.9 + PATCHLEVEL = 6
   49.10 + SUBLEVEL = 12
   49.11 +-EXTRAVERSION =
   49.12 ++EXTRAVERSION = .6
   49.13 + NAME=Woozy Numbat
   49.14 + 
   49.15 + # *DOCUMENTATION*
   49.16 +@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
   49.17 + #(which is the most common case IMHO) to avoid unneeded clutter in the big tags file.
   49.18 + #Adding $(srctree) adds about 20M on i386 to the size of the output file!
   49.19 + 
   49.20 +-ifeq ($(KBUILD_OUTPUT),)
   49.21 ++ifeq ($(src),$(obj))
   49.22 + __srctree =
   49.23 + else
   49.24 + __srctree = $(srctree)/
   49.25 +diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
   49.26 +--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
   49.27 ++++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
   49.28 +@@ -44,7 +44,7 @@
   49.29 + 
   49.30 + #define PFX "powernow-k8: "
   49.31 + #define BFX PFX "BIOS error: "
   49.32 +-#define VERSION "version 1.40.2"
   49.33 ++#define VERSION "version 1.40.4"
   49.34 + #include "powernow-k8.h"
   49.35 + 
   49.36 + /* serialize freq changes  */
   49.37 +@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
   49.38 + {
   49.39 + 	struct powernow_k8_data *data;
   49.40 + 	cpumask_t oldmask = CPU_MASK_ALL;
   49.41 +-	int rc;
   49.42 ++	int rc, i;
   49.43 + 
   49.44 + 	if (!check_supported_cpu(pol->cpu))
   49.45 + 		return -ENODEV;
   49.46 +@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
   49.47 + 	printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
   49.48 + 	       data->currfid, data->currvid);
   49.49 + 
   49.50 +-	powernow_data[pol->cpu] = data;
   49.51 ++	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
   49.52 ++		powernow_data[i] = data;
   49.53 ++	}
   49.54 + 
   49.55 + 	return 0;
   49.56 + 
   49.57 +diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
   49.58 +--- a/arch/i386/kernel/process.c
   49.59 ++++ b/arch/i386/kernel/process.c
   49.60 +@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
   49.61 + 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
   49.62 + 		return -EINVAL;
   49.63 + 
   49.64 ++	memset(&info, 0, sizeof(info));
   49.65 ++
   49.66 + 	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
   49.67 + 
   49.68 + 	info.entry_number = idx;
   49.69 +diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
   49.70 +--- a/arch/ia64/kernel/ptrace.c
   49.71 ++++ b/arch/ia64/kernel/ptrace.c
   49.72 +@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
   49.73 + 				*data = (pt->cr_ipsr & IPSR_MASK);
   49.74 + 			return 0;
   49.75 + 
   49.76 ++		      case PT_AR_RSC:
   49.77 ++			if (write_access)
   49.78 ++				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
   49.79 ++			else
   49.80 ++				*data = pt->ar_rsc;
   49.81 ++			return 0;
   49.82 ++
   49.83 + 		      case PT_AR_RNAT:
   49.84 + 			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
   49.85 + 			rnat_addr = (long) ia64_rse_rnat_addr((long *)
   49.86 +@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
   49.87 + 		      case PT_AR_BSPSTORE:
   49.88 + 			ptr = pt_reg_addr(pt, ar_bspstore);
   49.89 + 			break;
   49.90 +-		      case PT_AR_RSC:
   49.91 +-			ptr = pt_reg_addr(pt, ar_rsc);
   49.92 +-			break;
   49.93 + 		      case PT_AR_UNAT:
   49.94 + 			ptr = pt_reg_addr(pt, ar_unat);
   49.95 + 			break;
   49.96 +@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
   49.97 + static long
   49.98 + ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
   49.99 + {
  49.100 +-	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
  49.101 ++	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
  49.102 + 	struct unw_frame_info info;
  49.103 + 	struct switch_stack *sw;
  49.104 + 	struct ia64_fpreg fpval;
  49.105 +@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
  49.106 + 	/* app regs */
  49.107 + 
  49.108 + 	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
  49.109 +-	retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
  49.110 ++	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
  49.111 + 	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
  49.112 + 	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
  49.113 + 	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
  49.114 +@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
  49.115 + 	retval |= __get_user(nat_bits, &ppr->nat);
  49.116 + 
  49.117 + 	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
  49.118 ++	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
  49.119 + 	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
  49.120 + 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
  49.121 + 	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
  49.122 +diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
  49.123 +--- a/arch/ia64/kernel/signal.c
  49.124 ++++ b/arch/ia64/kernel/signal.c
  49.125 +@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
  49.126 + static long
  49.127 + restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
  49.128 + {
  49.129 +-	unsigned long ip, flags, nat, um, cfm;
  49.130 ++	unsigned long ip, flags, nat, um, cfm, rsc;
  49.131 + 	long err;
  49.132 + 
  49.133 + 	/* Always make any pending restarted system calls return -EINTR */
  49.134 +@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
  49.135 + 	err |= __get_user(ip, &sc->sc_ip);			/* instruction pointer */
  49.136 + 	err |= __get_user(cfm, &sc->sc_cfm);
  49.137 + 	err |= __get_user(um, &sc->sc_um);			/* user mask */
  49.138 +-	err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
  49.139 ++	err |= __get_user(rsc, &sc->sc_ar_rsc);
  49.140 + 	err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
  49.141 + 	err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
  49.142 + 	err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
  49.143 +@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
  49.144 + 	err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);	/* r15 */
  49.145 + 
  49.146 + 	scr->pt.cr_ifs = cfm | (1UL << 63);
  49.147 ++	scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
  49.148 + 
  49.149 + 	/* establish new instruction pointer: */
  49.150 + 	scr->pt.cr_iip = ip & ~0x3UL;
  49.151 +diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
  49.152 +--- a/arch/ppc/kernel/time.c
  49.153 ++++ b/arch/ppc/kernel/time.c
  49.154 +@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
  49.155 + 
  49.156 + extern unsigned long wall_jiffies;
  49.157 + 
  49.158 ++/* used for timezone offset */
  49.159 ++static long timezone_offset;
  49.160 ++
  49.161 + DEFINE_SPINLOCK(rtc_lock);
  49.162 + 
  49.163 + EXPORT_SYMBOL(rtc_lock);
  49.164 +@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
  49.165 + 		     xtime.tv_sec - last_rtc_update >= 659 &&
  49.166 + 		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
  49.167 + 		     jiffies - wall_jiffies == 1) {
  49.168 +-		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0)
  49.169 ++		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
  49.170 + 				last_rtc_update = xtime.tv_sec+1;
  49.171 + 			else
  49.172 + 				/* Try again one minute later */
  49.173 +@@ -286,7 +289,7 @@ void __init time_init(void)
  49.174 + 	unsigned old_stamp, stamp, elapsed;
  49.175 + 
  49.176 +         if (ppc_md.time_init != NULL)
  49.177 +-                time_offset = ppc_md.time_init();
  49.178 ++                timezone_offset = ppc_md.time_init();
  49.179 + 
  49.180 + 	if (__USE_RTC()) {
  49.181 + 		/* 601 processor: dec counts down by 128 every 128ns */
  49.182 +@@ -331,10 +334,10 @@ void __init time_init(void)
  49.183 + 	set_dec(tb_ticks_per_jiffy);
  49.184 + 
  49.185 + 	/* If platform provided a timezone (pmac), we correct the time */
  49.186 +-        if (time_offset) {
  49.187 +-		sys_tz.tz_minuteswest = -time_offset / 60;
  49.188 ++        if (timezone_offset) {
  49.189 ++		sys_tz.tz_minuteswest = -timezone_offset / 60;
  49.190 + 		sys_tz.tz_dsttime = 0;
  49.191 +-		xtime.tv_sec -= time_offset;
  49.192 ++		xtime.tv_sec -= timezone_offset;
  49.193 +         }
  49.194 +         set_normalized_timespec(&wall_to_monotonic,
  49.195 +                                 -xtime.tv_sec, -xtime.tv_nsec);
  49.196 +diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
  49.197 +--- a/arch/ppc64/boot/zlib.c
  49.198 ++++ b/arch/ppc64/boot/zlib.c
  49.199 +@@ -1307,7 +1307,7 @@ local int huft_build(
  49.200 +   {
  49.201 +     *t = (inflate_huft *)Z_NULL;
  49.202 +     *m = 0;
  49.203 +-    return Z_OK;
  49.204 ++    return Z_DATA_ERROR;
  49.205 +   }
  49.206 + 
  49.207 + 
  49.208 +@@ -1351,6 +1351,7 @@ local int huft_build(
  49.209 +     if ((j = *p++) != 0)
  49.210 +       v[x[j]++] = i;
  49.211 +   } while (++i < n);
  49.212 ++  n = x[g];			/* set n to length of v */
  49.213 + 
  49.214 + 
  49.215 +   /* Generate the Huffman codes and for each, make the table entries */
  49.216 +diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
  49.217 +--- a/arch/um/kernel/process.c
  49.218 ++++ b/arch/um/kernel/process.c
  49.219 +@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
  49.220 + 	return(arg.pid);
  49.221 + }
  49.222 + 
  49.223 +-static int ptrace_child(void)
  49.224 ++static int ptrace_child(void *arg)
  49.225 + {
  49.226 + 	int ret;
  49.227 + 	int pid = os_getpid(), ppid = getppid();
  49.228 +@@ -159,16 +159,20 @@ static int ptrace_child(void)
  49.229 + 	_exit(ret);
  49.230 + }
  49.231 + 
  49.232 +-static int start_ptraced_child(void)
  49.233 ++static int start_ptraced_child(void **stack_out)
  49.234 + {
  49.235 ++	void *stack;
  49.236 ++	unsigned long sp;
  49.237 + 	int pid, n, status;
  49.238 + 	
  49.239 +-	pid = fork();
  49.240 +-	if(pid == 0)
  49.241 +-		ptrace_child();
  49.242 +-
  49.243 ++	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
  49.244 ++		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  49.245 ++	if(stack == MAP_FAILED)
  49.246 ++		panic("check_ptrace : mmap failed, errno = %d", errno);
  49.247 ++	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
  49.248 ++	pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
  49.249 + 	if(pid < 0)
  49.250 +-		panic("check_ptrace : fork failed, errno = %d", errno);
  49.251 ++		panic("check_ptrace : clone failed, errno = %d", errno);
  49.252 + 	CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
  49.253 + 	if(n < 0)
  49.254 + 		panic("check_ptrace : wait failed, errno = %d", errno);
  49.255 +@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
  49.256 + 		panic("check_ptrace : expected SIGSTOP, got status = %d",
  49.257 + 		      status);
  49.258 + 
  49.259 ++	*stack_out = stack;
  49.260 + 	return(pid);
  49.261 + }
  49.262 + 
  49.263 +@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
  49.264 +  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
  49.265 +  * So only for SYSEMU features we test mustpanic, while normal host features
  49.266 +  * must work anyway!*/
  49.267 +-static int stop_ptraced_child(int pid, int exitcode, int mustexit)
  49.268 ++static int stop_ptraced_child(int pid, void *stack, int exitcode, int mustpanic)
  49.269 + {
  49.270 + 	int status, n, ret = 0;
  49.271 + 
  49.272 + 	if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
  49.273 +-		panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
  49.274 ++		panic("check_ptrace : ptrace failed, errno = %d", errno);
  49.275 + 	CATCH_EINTR(n = waitpid(pid, &status, 0));
  49.276 + 	if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
  49.277 + 		int exit_with = WEXITSTATUS(status);
  49.278 +@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
  49.279 + 		printk("check_ptrace : child exited with exitcode %d, while "
  49.280 + 		      "expecting %d; status 0x%x", exit_with,
  49.281 + 		      exitcode, status);
  49.282 +-		if (mustexit)
  49.283 ++		if (mustpanic)
  49.284 + 			panic("\n");
  49.285 + 		else
  49.286 + 			printk("\n");
  49.287 + 		ret = -1;
  49.288 + 	}
  49.289 + 
  49.290 ++	if(munmap(stack, PAGE_SIZE) < 0)
  49.291 ++		panic("check_ptrace : munmap failed, errno = %d", errno);
  49.292 + 	return ret;
  49.293 + }
  49.294 + 
  49.295 +@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
  49.296 + 
  49.297 + static void __init check_sysemu(void)
  49.298 + {
  49.299 ++	void *stack;
  49.300 + 	int pid, syscall, n, status, count=0;
  49.301 + 
  49.302 + 	printk("Checking syscall emulation patch for ptrace...");
  49.303 + 	sysemu_supported = 0;
  49.304 +-	pid = start_ptraced_child();
  49.305 ++	pid = start_ptraced_child(&stack);
  49.306 + 
  49.307 + 	if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
  49.308 + 		goto fail;
  49.309 +@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
  49.310 + 		panic("check_sysemu : failed to modify system "
  49.311 + 		      "call return, errno = %d", errno);
  49.312 + 
  49.313 +-	if (stop_ptraced_child(pid, 0, 0) < 0)
  49.314 ++	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
  49.315 + 		goto fail_stopped;
  49.316 + 
  49.317 + 	sysemu_supported = 1;
  49.318 +@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
  49.319 + 	set_using_sysemu(!force_sysemu_disabled);
  49.320 + 
  49.321 + 	printk("Checking advanced syscall emulation patch for ptrace...");
  49.322 +-	pid = start_ptraced_child();
  49.323 ++	pid = start_ptraced_child(&stack);
  49.324 + 	while(1){
  49.325 + 		count++;
  49.326 + 		if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
  49.327 +@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
  49.328 + 			break;
  49.329 + 		}
  49.330 + 	}
  49.331 +-	if (stop_ptraced_child(pid, 0, 0) < 0)
  49.332 ++	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
  49.333 + 		goto fail_stopped;
  49.334 + 
  49.335 + 	sysemu_supported = 2;
  49.336 +@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
  49.337 + 	return;
  49.338 + 
  49.339 + fail:
  49.340 +-	stop_ptraced_child(pid, 1, 0);
  49.341 ++	stop_ptraced_child(pid, stack, 1, 0);
  49.342 + fail_stopped:
  49.343 + 	printk("missing\n");
  49.344 + }
  49.345 + 
  49.346 + void __init check_ptrace(void)
  49.347 + {
  49.348 ++	void *stack;
  49.349 + 	int pid, syscall, n, status;
  49.350 + 
  49.351 + 	printk("Checking that ptrace can change system call numbers...");
  49.352 +-	pid = start_ptraced_child();
  49.353 ++	pid = start_ptraced_child(&stack);
  49.354 + 
  49.355 + 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) < 0)
  49.356 + 		panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", errno);
  49.357 +@@ -330,7 +339,7 @@ void __init check_ptrace(void)
  49.358 + 			break;
  49.359 + 		}
  49.360 + 	}
  49.361 +-	stop_ptraced_child(pid, 0, 1);
  49.362 ++	stop_ptraced_child(pid, stack, 0, 1);
  49.363 + 	printk("OK\n");
  49.364 + 	check_sysemu();
  49.365 + }
  49.366 +@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
  49.367 + static inline int check_skas3_ptrace_support(void)
  49.368 + {
  49.369 + 	struct ptrace_faultinfo fi;
  49.370 ++	void *stack;
  49.371 + 	int pid, n, ret = 1;
  49.372 + 
  49.373 + 	printf("Checking for the skas3 patch in the host...");
  49.374 +-	pid = start_ptraced_child();
  49.375 ++	pid = start_ptraced_child(&stack);
  49.376 + 
  49.377 + 	n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
  49.378 + 	if (n < 0) {
  49.379 +@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
  49.380 + 	}
  49.381 + 
  49.382 + 	init_registers(pid);
  49.383 +-	stop_ptraced_child(pid, 1, 1);
  49.384 ++	stop_ptraced_child(pid, stack, 1, 1);
  49.385 + 
  49.386 + 	return(ret);
  49.387 + }
  49.388 +diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
  49.389 +--- a/arch/x86_64/ia32/syscall32.c
  49.390 ++++ b/arch/x86_64/ia32/syscall32.c
  49.391 +@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
  49.392 + 	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
  49.393 + 	struct vm_area_struct *vma;
  49.394 + 	struct mm_struct *mm = current->mm;
  49.395 ++	int ret;
  49.396 + 
  49.397 + 	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  49.398 + 	if (!vma)
  49.399 +@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
  49.400 + 	vma->vm_mm = mm;
  49.401 + 
  49.402 + 	down_write(&mm->mmap_sem);
  49.403 +-	insert_vm_struct(mm, vma);
  49.404 ++	if ((ret = insert_vm_struct(mm, vma))) {
  49.405 ++		up_write(&mm->mmap_sem);
  49.406 ++		kmem_cache_free(vm_area_cachep, vma);
  49.407 ++		return ret;
  49.408 ++	}
  49.409 + 	mm->total_vm += npages;
  49.410 + 	up_write(&mm->mmap_sem);
  49.411 + 	return 0;
  49.412 +diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
  49.413 +--- a/arch/x86_64/kernel/setup.c
  49.414 ++++ b/arch/x86_64/kernel/setup.c
  49.415 +@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
  49.416 + 	int cpu = smp_processor_id();
  49.417 + 	int node = 0;
  49.418 + 	unsigned bits;
  49.419 +-	if (c->x86_num_cores == 1)
  49.420 +-		return;
  49.421 + 
  49.422 + 	bits = 0;
  49.423 + 	while ((1 << bits) < c->x86_num_cores)
  49.424 +diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
  49.425 +--- a/arch/x86_64/kernel/smp.c
  49.426 ++++ b/arch/x86_64/kernel/smp.c
  49.427 +@@ -284,6 +284,71 @@ struct call_data_struct {
  49.428 + static struct call_data_struct * call_data;
  49.429 + 
  49.430 + /*
  49.431 ++ * this function sends a 'generic call function' IPI to one other CPU
  49.432 ++ * in the system.
  49.433 ++ */
  49.434 ++static void __smp_call_function_single (int cpu, void (*func) (void *info), void *info,
  49.435 ++				int nonatomic, int wait)
  49.436 ++{
  49.437 ++	struct call_data_struct data;
  49.438 ++	int cpus = 1;
  49.439 ++
  49.440 ++	data.func = func;
  49.441 ++	data.info = info;
  49.442 ++	atomic_set(&data.started, 0);
  49.443 ++	data.wait = wait;
  49.444 ++	if (wait)
  49.445 ++		atomic_set(&data.finished, 0);
  49.446 ++
  49.447 ++	call_data = &data;
  49.448 ++	wmb();
  49.449 ++	/* Send a message to all other CPUs and wait for them to respond */
  49.450 ++	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
  49.451 ++
  49.452 ++	/* Wait for response */
  49.453 ++	while (atomic_read(&data.started) != cpus)
  49.454 ++		cpu_relax();
  49.455 ++
  49.456 ++	if (!wait)
  49.457 ++		return;
  49.458 ++
  49.459 ++	while (atomic_read(&data.finished) != cpus)
  49.460 ++		cpu_relax();
  49.461 ++}
  49.462 ++
  49.463 ++/*
  49.464 ++ * Run a function on another CPU
  49.465 ++ *  <func>	The function to run. This must be fast and non-blocking.
  49.466 ++ *  <info>	An arbitrary pointer to pass to the function.
  49.467 ++ *  <nonatomic>	Currently unused.
  49.468 ++ *  <wait>	If true, wait until function has completed on other CPUs.
  49.469 ++ *  [RETURNS]   0 on success, else a negative status code.
  49.470 ++ *
  49.471 ++ * Does not return until the remote CPU is nearly ready to execute <func>
  49.472 ++ * or is or has executed.
  49.473 ++ */
  49.474 ++
  49.475 ++int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
  49.476 ++	int nonatomic, int wait)
  49.477 ++{
  49.478 ++	
  49.479 ++	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
  49.480 ++
  49.481 ++	if (cpu == me) {
  49.482 ++		printk("%s: trying to call self\n", __func__);
  49.483 ++		put_cpu();
  49.484 ++		return -EBUSY;
  49.485 ++	}
  49.486 ++	spin_lock_bh(&call_lock);
  49.487 ++
  49.488 ++	__smp_call_function_single(cpu, func,info,nonatomic,wait);	
  49.489 ++
  49.490 ++	spin_unlock_bh(&call_lock);
  49.491 ++	put_cpu();
  49.492 ++	return 0;
  49.493 ++}
  49.494 ++
  49.495 ++/*
  49.496 +  * this function sends a 'generic call function' IPI to all other CPUs
  49.497 +  * in the system.
  49.498 +  */
  49.499 +diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
  49.500 +--- a/arch/x86_64/kernel/smpboot.c
  49.501 ++++ b/arch/x86_64/kernel/smpboot.c
  49.502 +@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
  49.503 + {
  49.504 + 	unsigned long flags, i;
  49.505 + 
  49.506 +-	if (smp_processor_id() != boot_cpu_id)
  49.507 +-		return;
  49.508 +-
  49.509 + 	go[MASTER] = 0;
  49.510 + 
  49.511 + 	local_irq_save(flags);
  49.512 +@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
  49.513 + 	return tcenter - best_tm;
  49.514 + }
  49.515 + 
  49.516 +-static __cpuinit void sync_tsc(void)
  49.517 ++static __cpuinit void sync_tsc(unsigned int master)
  49.518 + {
  49.519 + 	int i, done = 0;
  49.520 + 	long delta, adj, adjust_latency = 0;
  49.521 +@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
  49.522 + 	} t[NUM_ROUNDS] __cpuinitdata;
  49.523 + #endif
  49.524 + 
  49.525 ++	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
  49.526 ++		smp_processor_id(), master);
  49.527 ++
  49.528 + 	go[MASTER] = 1;
  49.529 + 
  49.530 +-	smp_call_function(sync_master, NULL, 1, 0);
  49.531 ++	/* It is dangerous to broadcast IPI as cpus are coming up,
  49.532 ++	 * as they may not be ready to accept them.  So since
  49.533 ++	 * we only need to send the ipi to the boot cpu direct
  49.534 ++	 * the message, and avoid the race.
  49.535 ++	 */
  49.536 ++	smp_call_function_single(master, sync_master, NULL, 1, 0);
  49.537 + 
  49.538 + 	while (go[MASTER])	/* wait for master to be ready */
  49.539 + 		no_cpu_relax();
  49.540 +@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
  49.541 + 	printk(KERN_INFO
  49.542 + 	       "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
  49.543 + 	       "maxerr %lu cycles)\n",
  49.544 +-	       smp_processor_id(), boot_cpu_id, delta, rt);
  49.545 ++	       smp_processor_id(), master, delta, rt);
  49.546 + }
  49.547 + 
  49.548 + static void __cpuinit tsc_sync_wait(void)
  49.549 + {
  49.550 + 	if (notscsync || !cpu_has_tsc)
  49.551 + 		return;
  49.552 +-	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
  49.553 +-			boot_cpu_id);
  49.554 +-	sync_tsc();
  49.555 ++	sync_tsc(0);
  49.556 + }
  49.557 + 
  49.558 + static __init int notscsync_setup(char *s)
  49.559 +diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
  49.560 +--- a/drivers/acpi/pci_irq.c
  49.561 ++++ b/drivers/acpi/pci_irq.c
  49.562 +@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
  49.563 + 		printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
  49.564 + 			pci_name(dev), ('A' + pin));
  49.565 + 		/* Interrupt Line values above 0xF are forbidden */
  49.566 +-		if (dev->irq >= 0 && (dev->irq <= 0xF)) {
  49.567 ++		if (dev->irq > 0 && (dev->irq <= 0xF)) {
  49.568 + 			printk(" - using IRQ %d\n", dev->irq);
  49.569 ++			acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
  49.570 + 			return_VALUE(0);
  49.571 + 		}
  49.572 + 		else {
  49.573 +diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
  49.574 +--- a/drivers/char/rocket.c
  49.575 ++++ b/drivers/char/rocket.c
  49.576 +@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
  49.577 + 		ToRecv = space;
  49.578 + 
  49.579 + 	if (ToRecv <= 0)
  49.580 +-		return;
  49.581 ++		goto done;
  49.582 + 
  49.583 + 	/*
  49.584 + 	 * if status indicates there are errored characters in the
  49.585 +@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
  49.586 + 	}
  49.587 + 	/*  Push the data up to the tty layer */
  49.588 + 	ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
  49.589 ++done:
  49.590 + 	tty_ldisc_deref(ld);
  49.591 + }
  49.592 + 
  49.593 +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
  49.594 +--- a/drivers/char/tpm/tpm.c
  49.595 ++++ b/drivers/char/tpm/tpm.c
  49.596 +@@ -32,12 +32,6 @@
  49.597 + 
  49.598 + #define	TPM_BUFSIZE			2048
  49.599 + 
  49.600 +-/* PCI configuration addresses */
  49.601 +-#define	PCI_GEN_PMCON_1			0xA0
  49.602 +-#define	PCI_GEN1_DEC			0xE4
  49.603 +-#define	PCI_LPC_EN			0xE6
  49.604 +-#define	PCI_GEN2_DEC			0xEC
  49.605 +-
  49.606 + static LIST_HEAD(tpm_chip_list);
  49.607 + static DEFINE_SPINLOCK(driver_lock);
  49.608 + static int dev_mask[32];
  49.609 +@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
  49.610 + EXPORT_SYMBOL_GPL(tpm_time_expired);
  49.611 + 
  49.612 + /*
  49.613 +- * Initialize the LPC bus and enable the TPM ports
  49.614 +- */
  49.615 +-int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
  49.616 +-{
  49.617 +-	u32 lpcenable, tmp;
  49.618 +-	int is_lpcm = 0;
  49.619 +-
  49.620 +-	switch (pci_dev->vendor) {
  49.621 +-	case PCI_VENDOR_ID_INTEL:
  49.622 +-		switch (pci_dev->device) {
  49.623 +-		case PCI_DEVICE_ID_INTEL_82801CA_12:
  49.624 +-		case PCI_DEVICE_ID_INTEL_82801DB_12:
  49.625 +-			is_lpcm = 1;
  49.626 +-			break;
  49.627 +-		}
  49.628 +-		/* init ICH (enable LPC) */
  49.629 +-		pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
  49.630 +-		lpcenable |= 0x20000000;
  49.631 +-		pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
  49.632 +-
  49.633 +-		if (is_lpcm) {
  49.634 +-			pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
  49.635 +-					      &lpcenable);
  49.636 +-			if ((lpcenable & 0x20000000) == 0) {
  49.637 +-				dev_err(&pci_dev->dev,
  49.638 +-					"cannot enable LPC\n");
  49.639 +-				return -ENODEV;
  49.640 +-			}
  49.641 +-		}
  49.642 +-
  49.643 +-		/* initialize TPM registers */
  49.644 +-		pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
  49.645 +-
  49.646 +-		if (!is_lpcm)
  49.647 +-			tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
  49.648 +-		else
  49.649 +-			tmp =
  49.650 +-			    (tmp & 0xFFFF0000) | (base & 0xFFF0) |
  49.651 +-			    0x00000001;
  49.652 +-
  49.653 +-		pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
  49.654 +-
  49.655 +-		if (is_lpcm) {
  49.656 +-			pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
  49.657 +-					      &tmp);
  49.658 +-			tmp |= 0x00000004;	/* enable CLKRUN */
  49.659 +-			pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
  49.660 +-					       tmp);
  49.661 +-		}
  49.662 +-		tpm_write_index(0x0D, 0x55);	/* unlock 4F */
  49.663 +-		tpm_write_index(0x0A, 0x00);	/* int disable */
  49.664 +-		tpm_write_index(0x08, base);	/* base addr lo */
  49.665 +-		tpm_write_index(0x09, (base & 0xFF00) >> 8);	/* base addr hi */
  49.666 +-		tpm_write_index(0x0D, 0xAA);	/* lock 4F */
  49.667 +-		break;
  49.668 +-	case PCI_VENDOR_ID_AMD:
  49.669 +-		/* nothing yet */
  49.670 +-		break;
  49.671 +-	}
  49.672 +-
  49.673 +-	return 0;
  49.674 +-}
  49.675 +-
  49.676 +-EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
  49.677 +-
  49.678 +-/*
  49.679 +  * Internal kernel interface to transmit TPM commands
  49.680 +  */
  49.681 + static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
  49.682 +@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
  49.683 + 	if (chip == NULL)
  49.684 + 		return -ENODEV;
  49.685 + 
  49.686 +-	spin_lock(&driver_lock);
  49.687 +-	tpm_lpc_bus_init(pci_dev, chip->vendor->base);
  49.688 +-	spin_unlock(&driver_lock);
  49.689 +-
  49.690 + 	return 0;
  49.691 + }
  49.692 + 
  49.693 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
  49.694 +--- a/drivers/char/tpm/tpm.h
  49.695 ++++ b/drivers/char/tpm/tpm.h
  49.696 +@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
  49.697 + }
  49.698 + 
  49.699 + extern void tpm_time_expired(unsigned long);
  49.700 +-extern int tpm_lpc_bus_init(struct pci_dev *, u16);
  49.701 +-
  49.702 + extern int tpm_register_hardware(struct pci_dev *,
  49.703 + 				 struct tpm_vendor_specific *);
  49.704 + extern int tpm_open(struct inode *, struct file *);
  49.705 +diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
  49.706 +--- a/drivers/char/tpm/tpm_atmel.c
  49.707 ++++ b/drivers/char/tpm/tpm_atmel.c
  49.708 +@@ -22,7 +22,10 @@
  49.709 + #include "tpm.h"
  49.710 + 
  49.711 + /* Atmel definitions */
  49.712 +-#define	TPM_ATML_BASE			0x400
  49.713 ++enum tpm_atmel_addr {
  49.714 ++	TPM_ATMEL_BASE_ADDR_LO = 0x08,
  49.715 ++	TPM_ATMEL_BASE_ADDR_HI = 0x09
  49.716 ++};
  49.717 + 
  49.718 + /* write status bits */
  49.719 + #define	ATML_STATUS_ABORT		0x01
  49.720 +@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
  49.721 + 	.cancel = tpm_atml_cancel,
  49.722 + 	.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
  49.723 + 	.req_complete_val = ATML_STATUS_DATA_AVAIL,
  49.724 +-	.base = TPM_ATML_BASE,
  49.725 + 	.miscdev = { .fops = &atmel_ops, },
  49.726 + };
  49.727 + 
  49.728 +@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
  49.729 + {
  49.730 + 	u8 version[4];
  49.731 + 	int rc = 0;
  49.732 ++	int lo, hi;
  49.733 + 
  49.734 + 	if (pci_enable_device(pci_dev))
  49.735 + 		return -EIO;
  49.736 + 
  49.737 +-	if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
  49.738 +-		rc = -ENODEV;
  49.739 +-		goto out_err;
  49.740 +-	}
  49.741 ++	lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
  49.742 ++	hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
  49.743 ++
  49.744 ++	tpm_atmel.base = (hi<<8)|lo;
  49.745 ++	dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
  49.746 + 
  49.747 + 	/* verify that it is an Atmel part */
  49.748 + 	if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
  49.749 +diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
  49.750 +--- a/drivers/char/tpm/tpm_nsc.c
  49.751 ++++ b/drivers/char/tpm/tpm_nsc.c
  49.752 +@@ -24,6 +24,10 @@
  49.753 + /* National definitions */
  49.754 + #define	TPM_NSC_BASE			0x360
  49.755 + #define	TPM_NSC_IRQ			0x07
  49.756 ++#define	TPM_NSC_BASE0_HI		0x60
  49.757 ++#define	TPM_NSC_BASE0_LO		0x61
  49.758 ++#define	TPM_NSC_BASE1_HI		0x62
  49.759 ++#define	TPM_NSC_BASE1_LO		0x63
  49.760 + 
  49.761 + #define	NSC_LDN_INDEX			0x07
  49.762 + #define	NSC_SID_INDEX			0x20
  49.763 +@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
  49.764 + 	.cancel = tpm_nsc_cancel,
  49.765 + 	.req_complete_mask = NSC_STATUS_OBF,
  49.766 + 	.req_complete_val = NSC_STATUS_OBF,
  49.767 +-	.base = TPM_NSC_BASE,
  49.768 + 	.miscdev = { .fops = &nsc_ops, },
  49.769 + 	
  49.770 + };
  49.771 +@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
  49.772 + 				  const struct pci_device_id *pci_id)
  49.773 + {
  49.774 + 	int rc = 0;
  49.775 ++	int lo, hi;
  49.776 ++
  49.777 ++	hi = tpm_read_index(TPM_NSC_BASE0_HI);
  49.778 ++	lo = tpm_read_index(TPM_NSC_BASE0_LO);
  49.779 ++
  49.780 ++	tpm_nsc.base = (hi<<8) | lo;
  49.781 + 
  49.782 + 	if (pci_enable_device(pci_dev))
  49.783 + 		return -EIO;
  49.784 + 
  49.785 +-	if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
  49.786 +-		rc = -ENODEV;
  49.787 +-		goto out_err;
  49.788 +-	}
  49.789 +-
  49.790 + 	/* verify that it is a National part (SID) */
  49.791 + 	if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
  49.792 + 		rc = -ENODEV;
  49.793 +diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
  49.794 +--- a/drivers/char/tty_ioctl.c
  49.795 ++++ b/drivers/char/tty_ioctl.c
  49.796 +@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
  49.797 + 			ld = tty_ldisc_ref(tty);
  49.798 + 			switch (arg) {
  49.799 + 			case TCIFLUSH:
  49.800 +-				if (ld->flush_buffer)
  49.801 ++				if (ld && ld->flush_buffer)
  49.802 + 					ld->flush_buffer(tty);
  49.803 + 				break;
  49.804 + 			case TCIOFLUSH:
  49.805 +-				if (ld->flush_buffer)
  49.806 ++				if (ld && ld->flush_buffer)
  49.807 + 					ld->flush_buffer(tty);
  49.808 + 				/* fall through */
  49.809 + 			case TCOFLUSH:
  49.810 +diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
  49.811 +--- a/drivers/media/video/cx88/cx88-video.c
  49.812 ++++ b/drivers/media/video/cx88/cx88-video.c
  49.813 +@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
  49.814 + 			.default_value = 0,
  49.815 + 			.type          = V4L2_CTRL_TYPE_INTEGER,
  49.816 + 		},
  49.817 +-		.off                   = 0,
  49.818 ++		.off                   = 128,
  49.819 + 		.reg                   = MO_HUE,
  49.820 + 		.mask                  = 0x00ff,
  49.821 + 		.shift                 = 0,
  49.822 +diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
  49.823 +--- a/drivers/net/e1000/e1000_main.c
  49.824 ++++ b/drivers/net/e1000/e1000_main.c
  49.825 +@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
  49.826 + 	tso = e1000_tso(adapter, skb);
  49.827 + 	if (tso < 0) {
  49.828 + 		dev_kfree_skb_any(skb);
  49.829 ++		spin_unlock_irqrestore(&adapter->tx_lock, flags);
  49.830 + 		return NETDEV_TX_OK;
  49.831 + 	}
  49.832 + 
  49.833 +diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
  49.834 +--- a/drivers/net/hamradio/Kconfig
  49.835 ++++ b/drivers/net/hamradio/Kconfig
  49.836 +@@ -17,7 +17,7 @@ config MKISS
  49.837 + 
  49.838 + config 6PACK
  49.839 + 	tristate "Serial port 6PACK driver"
  49.840 +-	depends on AX25 && BROKEN_ON_SMP
  49.841 ++	depends on AX25
  49.842 + 	---help---
  49.843 + 	  6pack is a transmission protocol for the data exchange between your
  49.844 + 	  PC and your TNC (the Terminal Node Controller acts as a kind of
  49.845 +diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
  49.846 +--- a/drivers/net/shaper.c
  49.847 ++++ b/drivers/net/shaper.c
  49.848 +@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
  49.849 + {
  49.850 + 	struct shaper *shaper = dev->priv;
  49.851 +  	struct sk_buff *ptr;
  49.852 +-   
  49.853 +-	if (down_trylock(&shaper->sem))
  49.854 +-		return -1;
  49.855 + 
  49.856 ++	spin_lock(&shaper->lock);
  49.857 +  	ptr=shaper->sendq.prev;
  49.858 +  	
  49.859 +  	/*
  49.860 +@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
  49.861 +                 shaper->stats.collisions++;
  49.862 +  	}
  49.863 + 	shaper_kick(shaper);
  49.864 +-	up(&shaper->sem);
  49.865 ++	spin_unlock(&shaper->lock);
  49.866 +  	return 0;
  49.867 + }
  49.868 + 
  49.869 +@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
  49.870 + {
  49.871 + 	struct shaper *shaper = (struct shaper *)data;
  49.872 + 
  49.873 +-	if (!down_trylock(&shaper->sem)) {
  49.874 +-		shaper_kick(shaper);
  49.875 +-		up(&shaper->sem);
  49.876 +-	} else
  49.877 +-		mod_timer(&shaper->timer, jiffies);
  49.878 ++	spin_lock(&shaper->lock);
  49.879 ++	shaper_kick(shaper);
  49.880 ++	spin_unlock(&shaper->lock);
  49.881 + }
  49.882 + 
  49.883 + /*
  49.884 +@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
  49.885 + 
  49.886 + 
  49.887 + /*
  49.888 +- *	Flush the shaper queues on a closedown
  49.889 +- */
  49.890 +- 
  49.891 +-static void shaper_flush(struct shaper *shaper)
  49.892 +-{
  49.893 +-	struct sk_buff *skb;
  49.894 +-
  49.895 +-	down(&shaper->sem);
  49.896 +-	while((skb=skb_dequeue(&shaper->sendq))!=NULL)
  49.897 +-		dev_kfree_skb(skb);
  49.898 +-	shaper_kick(shaper);
  49.899 +-	up(&shaper->sem);
  49.900 +-}
  49.901 +-
  49.902 +-/*
  49.903 +  *	Bring the interface up. We just disallow this until a 
  49.904 +  *	bind.
  49.905 +  */
  49.906 +@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
  49.907 + static int shaper_close(struct net_device *dev)
  49.908 + {
  49.909 + 	struct shaper *shaper=dev->priv;
  49.910 +-	shaper_flush(shaper);
  49.911 ++	struct sk_buff *skb;
  49.912 ++
  49.913 ++	while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
  49.914 ++		dev_kfree_skb(skb);
  49.915 ++
  49.916 ++	spin_lock_bh(&shaper->lock);
  49.917 ++	shaper_kick(shaper);
  49.918 ++	spin_unlock_bh(&shaper->lock);
  49.919 ++
  49.920 + 	del_timer_sync(&shaper->timer);
  49.921 + 	return 0;
  49.922 + }
  49.923 +@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
  49.924 + 	init_timer(&sh->timer);
  49.925 + 	sh->timer.function=shaper_timer;
  49.926 + 	sh->timer.data=(unsigned long)sh;
  49.927 ++	spin_lock_init(&sh->lock);
  49.928 + }
  49.929 + 
  49.930 + /*
  49.931 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
  49.932 +--- a/drivers/pci/pci-driver.c
  49.933 ++++ b/drivers/pci/pci-driver.c
  49.934 +@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
  49.935 + 	/* FIXME, once all of the existing PCI drivers have been fixed to set
  49.936 + 	 * the pci shutdown function, this test can go away. */
  49.937 + 	if (!drv->driver.shutdown)
  49.938 +-		drv->driver.shutdown = pci_device_shutdown,
  49.939 ++		drv->driver.shutdown = pci_device_shutdown;
  49.940 + 	drv->driver.owner = drv->owner;
  49.941 + 	drv->driver.kobj.ktype = &pci_driver_kobj_type;
  49.942 + 	pci_init_dynids(&drv->dynids);
  49.943 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
  49.944 +--- a/drivers/scsi/qla2xxx/qla_init.c
  49.945 ++++ b/drivers/scsi/qla2xxx/qla_init.c
  49.946 +@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
  49.947 + 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
  49.948 + 
  49.949 + 	fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
  49.950 +-	if (!rport)
  49.951 ++	if (!rport) {
  49.952 + 		qla_printk(KERN_WARNING, ha,
  49.953 + 		    "Unable to allocate fc remote port!\n");
  49.954 ++		return;
  49.955 ++	}
  49.956 + 
  49.957 + 	if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
  49.958 + 		fcport->os_target_id = rport->scsi_target_id;
  49.959 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
  49.960 +--- a/drivers/scsi/qla2xxx/qla_os.c
  49.961 ++++ b/drivers/scsi/qla2xxx/qla_os.c
  49.962 +@@ -1150,7 +1150,7 @@ iospace_error_exit:
  49.963 +  */
  49.964 + int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
  49.965 + {
  49.966 +-	int	ret;
  49.967 ++	int	ret = -ENODEV;
  49.968 + 	device_reg_t __iomem *reg;
  49.969 + 	struct Scsi_Host *host;
  49.970 + 	scsi_qla_host_t *ha;
  49.971 +@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
  49.972 + 	fc_port_t *fcport;
  49.973 + 
  49.974 + 	if (pci_enable_device(pdev))
  49.975 +-		return -1;
  49.976 ++		goto probe_out;
  49.977 + 
  49.978 + 	host = scsi_host_alloc(&qla2x00_driver_template,
  49.979 + 	    sizeof(scsi_qla_host_t));
  49.980 +@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
  49.981 + 
  49.982 + 	/* Configure PCI I/O space */
  49.983 + 	ret = qla2x00_iospace_config(ha);
  49.984 +-	if (ret != 0) {
  49.985 +-		goto probe_alloc_failed;
  49.986 +-	}
  49.987 ++	if (ret)
  49.988 ++		goto probe_failed;
  49.989 + 
  49.990 + 	/* Sanitize the information from PCI BIOS. */
  49.991 + 	host->irq = pdev->irq;
  49.992 +@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
  49.993 + 		qla_printk(KERN_WARNING, ha,
  49.994 + 		    "[ERROR] Failed to allocate memory for adapter\n");
  49.995 + 
  49.996 +-		goto probe_alloc_failed;
  49.997 ++		ret = -ENOMEM;
  49.998 ++		goto probe_failed;
  49.999 + 	}
 49.1000 + 
 49.1001 +-	pci_set_drvdata(pdev, ha);
 49.1002 +-	host->this_id = 255;
 49.1003 +-	host->cmd_per_lun = 3;
 49.1004 +-	host->unique_id = ha->instance;
 49.1005 +-	host->max_cmd_len = MAX_CMDSZ;
 49.1006 +-	host->max_channel = ha->ports - 1;
 49.1007 +-	host->max_id = ha->max_targets;
 49.1008 +-	host->max_lun = ha->max_luns;
 49.1009 +-	host->transportt = qla2xxx_transport_template;
 49.1010 +-	if (scsi_add_host(host, &pdev->dev))
 49.1011 +-		goto probe_alloc_failed;
 49.1012 +-
 49.1013 +-	qla2x00_alloc_sysfs_attr(ha);
 49.1014 +-
 49.1015 + 	if (qla2x00_initialize_adapter(ha) &&
 49.1016 + 	    !(ha->device_flags & DFLG_NO_CABLE)) {
 49.1017 + 
 49.1018 +@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
 49.1019 + 		    "Adapter flags %x.\n",
 49.1020 + 		    ha->host_no, ha->device_flags));
 49.1021 + 
 49.1022 ++		ret = -ENODEV;
 49.1023 + 		goto probe_failed;
 49.1024 + 	}
 49.1025 + 
 49.1026 +-	qla2x00_init_host_attr(ha);
 49.1027 +-
 49.1028 + 	/*
 49.1029 + 	 * Startup the kernel thread for this host adapter
 49.1030 + 	 */
 49.1031 +@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
 49.1032 + 		qla_printk(KERN_WARNING, ha,
 49.1033 + 		    "Unable to start DPC thread!\n");
 49.1034 + 
 49.1035 ++		ret = -ENODEV;
 49.1036 + 		goto probe_failed;
 49.1037 + 	}
 49.1038 + 	wait_for_completion(&ha->dpc_inited);
 49.1039 + 
 49.1040 ++	host->this_id = 255;
 49.1041 ++	host->cmd_per_lun = 3;
 49.1042 ++	host->unique_id = ha->instance;
 49.1043 ++	host->max_cmd_len = MAX_CMDSZ;
 49.1044 ++	host->max_channel = ha->ports - 1;
 49.1045 ++	host->max_lun = MAX_LUNS;
 49.1046 ++	host->transportt = qla2xxx_transport_template;
 49.1047 ++
 49.1048 + 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 49.1049 + 		ret = request_irq(host->irq, qla2100_intr_handler,
 49.1050 + 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
 49.1051 + 	else
 49.1052 + 		ret = request_irq(host->irq, qla2300_intr_handler,
 49.1053 + 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
 49.1054 +-	if (ret != 0) {
 49.1055 ++	if (ret) {
 49.1056 + 		qla_printk(KERN_WARNING, ha,
 49.1057 + 		    "Failed to reserve interrupt %d already in use.\n",
 49.1058 + 		    host->irq);
 49.1059 +@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
 49.1060 + 		msleep(10);
 49.1061 + 	}
 49.1062 + 
 49.1063 ++	pci_set_drvdata(pdev, ha);
 49.1064 + 	ha->flags.init_done = 1;
 49.1065 + 	num_hosts++;
 49.1066 + 
 49.1067 ++	ret = scsi_add_host(host, &pdev->dev);
 49.1068 ++	if (ret)
 49.1069 ++		goto probe_failed;
 49.1070 ++
 49.1071 ++	qla2x00_alloc_sysfs_attr(ha);
 49.1072 ++
 49.1073 ++	qla2x00_init_host_attr(ha);
 49.1074 ++
 49.1075 + 	qla_printk(KERN_INFO, ha, "\n"
 49.1076 + 	    " QLogic Fibre Channel HBA Driver: %s\n"
 49.1077 + 	    "  QLogic %s - %s\n"
 49.1078 +@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
 49.1079 + probe_failed:
 49.1080 + 	fc_remove_host(ha->host);
 49.1081 + 
 49.1082 +-	scsi_remove_host(host);
 49.1083 +-
 49.1084 +-probe_alloc_failed:
 49.1085 + 	qla2x00_free_device(ha);
 49.1086 + 
 49.1087 + 	scsi_host_put(host);
 49.1088 +@@ -1394,7 +1394,8 @@ probe_alloc_failed:
 49.1089 + probe_disable_device:
 49.1090 + 	pci_disable_device(pdev);
 49.1091 + 
 49.1092 +-	return -1;
 49.1093 ++probe_out:
 49.1094 ++	return ret;
 49.1095 + }
 49.1096 + EXPORT_SYMBOL_GPL(qla2x00_probe_one);
 49.1097 + 
 49.1098 +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
 49.1099 +--- a/drivers/scsi/sg.c
 49.1100 ++++ b/drivers/scsi/sg.c
 49.1101 +@@ -2969,23 +2969,22 @@ static void * dev_seq_start(struct seq_f
 49.1102 + {
 49.1103 + 	struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
 49.1104 + 
 49.1105 ++	s->private = it;
 49.1106 + 	if (! it)
 49.1107 + 		return NULL;
 49.1108 ++
 49.1109 + 	if (NULL == sg_dev_arr)
 49.1110 +-		goto err1;
 49.1111 ++		return NULL;
 49.1112 + 	it->index = *pos;
 49.1113 + 	it->max = sg_last_dev();
 49.1114 + 	if (it->index >= it->max)
 49.1115 +-		goto err1;
 49.1116 ++		return NULL;
 49.1117 + 	return it;
 49.1118 +-err1:
 49.1119 +-	kfree(it);
 49.1120 +-	return NULL;
 49.1121 + }
 49.1122 + 
 49.1123 + static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
 49.1124 + {
 49.1125 +-	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
 49.1126 ++	struct sg_proc_deviter * it = s->private;
 49.1127 + 
 49.1128 + 	*pos = ++it->index;
 49.1129 + 	return (it->index < it->max) ? it : NULL;
 49.1130 +@@ -2993,7 +2992,9 @@ static void * dev_seq_next(struct seq_fi
 49.1131 + 
 49.1132 + static void dev_seq_stop(struct seq_file *s, void *v)
 49.1133 + {
 49.1134 +-	kfree (v);
 49.1135 ++	struct sg_proc_deviter * it = s->private;
 49.1136 ++
 49.1137 ++	kfree (it);
 49.1138 + }
 49.1139 + 
 49.1140 + static int sg_proc_open_dev(struct inode *inode, struct file *file)
 49.1141 +diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
 49.1142 +--- a/drivers/usb/net/usbnet.c
 49.1143 ++++ b/drivers/usb/net/usbnet.c
 49.1144 +@@ -1922,7 +1922,7 @@ static int genelink_rx_fixup (struct usb
 49.1145 + 
 49.1146 + 			// copy the packet data to the new skb
 49.1147 + 			memcpy(skb_put(gl_skb, size), packet->packet_data, size);
 49.1148 +-			skb_return (dev, skb);
 49.1149 ++			skb_return (dev, gl_skb);
 49.1150 + 		}
 49.1151 + 
 49.1152 + 		// advance to the next packet
 49.1153 +diff --git a/fs/bio.c b/fs/bio.c
 49.1154 +--- a/fs/bio.c
 49.1155 ++++ b/fs/bio.c
 49.1156 +@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
 49.1157 + 	 */
 49.1158 + 	bio->bi_vcnt = bio_src->bi_vcnt;
 49.1159 + 	bio->bi_size = bio_src->bi_size;
 49.1160 ++	bio->bi_idx = bio_src->bi_idx;
 49.1161 + 	bio_phys_segments(q, bio);
 49.1162 + 	bio_hw_segments(q, bio);
 49.1163 + }
 49.1164 +diff --git a/fs/char_dev.c b/fs/char_dev.c
 49.1165 +--- a/fs/char_dev.c
 49.1166 ++++ b/fs/char_dev.c
 49.1167 +@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
 49.1168 + 	struct char_device_struct *cd = NULL, **cp;
 49.1169 + 	int i = major_to_index(major);
 49.1170 + 
 49.1171 +-	up(&chrdevs_lock);
 49.1172 ++	down(&chrdevs_lock);
 49.1173 + 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
 49.1174 + 		if ((*cp)->major == major &&
 49.1175 + 		    (*cp)->baseminor == baseminor &&
 49.1176 +diff --git a/fs/exec.c b/fs/exec.c
 49.1177 +--- a/fs/exec.c
 49.1178 ++++ b/fs/exec.c
 49.1179 +@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
 49.1180 + 	}
 49.1181 + 	sig->group_exit_task = NULL;
 49.1182 + 	sig->notify_count = 0;
 49.1183 ++	sig->real_timer.data = (unsigned long)current;
 49.1184 + 	spin_unlock_irq(lock);
 49.1185 + 
 49.1186 + 	/*
 49.1187 +diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
 49.1188 +--- a/fs/isofs/compress.c
 49.1189 ++++ b/fs/isofs/compress.c
 49.1190 +@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
 49.1191 + 	cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
 49.1192 + 	brelse(bh);
 49.1193 + 
 49.1194 ++	if (cstart > cend)
 49.1195 ++		goto eio;
 49.1196 ++		
 49.1197 + 	csize = cend-cstart;
 49.1198 + 
 49.1199 ++	if (csize > deflateBound(1UL << zisofs_block_shift))
 49.1200 ++		goto eio;
 49.1201 ++
 49.1202 + 	/* Now page[] contains an array of pages, any of which can be NULL,
 49.1203 + 	   and the locks on which we hold.  We should now read the data and
 49.1204 + 	   release the pages.  If the pages are NULL the decompressed data
 49.1205 +diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
 49.1206 +--- a/include/asm-i386/string.h
 49.1207 ++++ b/include/asm-i386/string.h
 49.1208 +@@ -116,7 +116,8 @@ __asm__ __volatile__(
 49.1209 + 	"orb $1,%%al\n"
 49.1210 + 	"3:"
 49.1211 + 	:"=a" (__res), "=&S" (d0), "=&D" (d1)
 49.1212 +-		     :"1" (cs),"2" (ct));
 49.1213 ++	:"1" (cs),"2" (ct)
 49.1214 ++	:"memory");
 49.1215 + return __res;
 49.1216 + }
 49.1217 + 
 49.1218 +@@ -138,8 +139,9 @@ __asm__ __volatile__(
 49.1219 + 	"3:\tsbbl %%eax,%%eax\n\t"
 49.1220 + 	"orb $1,%%al\n"
 49.1221 + 	"4:"
 49.1222 +-		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
 49.1223 +-		     :"1" (cs),"2" (ct),"3" (count));
 49.1224 ++	:"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
 49.1225 ++	:"1" (cs),"2" (ct),"3" (count)
 49.1226 ++	:"memory");
 49.1227 + return __res;
 49.1228 + }
 49.1229 + 
 49.1230 +@@ -158,7 +160,9 @@ __asm__ __volatile__(
 49.1231 + 	"movl $1,%1\n"
 49.1232 + 	"2:\tmovl %1,%0\n\t"
 49.1233 + 	"decl %0"
 49.1234 +-	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
 49.1235 ++	:"=a" (__res), "=&S" (d0)
 49.1236 ++	:"1" (s),"0" (c)
 49.1237 ++	:"memory");
 49.1238 + return __res;
 49.1239 + }
 49.1240 + 
 49.1241 +@@ -175,7 +179,9 @@ __asm__ __volatile__(
 49.1242 + 	"leal -1(%%esi),%0\n"
 49.1243 + 	"2:\ttestb %%al,%%al\n\t"
 49.1244 + 	"jne 1b"
 49.1245 +-	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
 49.1246 ++	:"=g" (__res), "=&S" (d0), "=&a" (d1)
 49.1247 ++	:"0" (0),"1" (s),"2" (c)
 49.1248 ++	:"memory");
 49.1249 + return __res;
 49.1250 + }
 49.1251 + 
 49.1252 +@@ -189,7 +195,9 @@ __asm__ __volatile__(
 49.1253 + 	"scasb\n\t"
 49.1254 + 	"notl %0\n\t"
 49.1255 + 	"decl %0"
 49.1256 +-	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
 49.1257 ++	:"=c" (__res), "=&D" (d0)
 49.1258 ++	:"1" (s),"a" (0), "0" (0xffffffffu)
 49.1259 ++	:"memory");
 49.1260 + return __res;
 49.1261 + }
 49.1262 + 
 49.1263 +@@ -333,7 +341,9 @@ __asm__ __volatile__(
 49.1264 + 	"je 1f\n\t"
 49.1265 + 	"movl $1,%0\n"
 49.1266 + 	"1:\tdecl %0"
 49.1267 +-	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
 49.1268 ++	:"=D" (__res), "=&c" (d0)
 49.1269 ++	:"a" (c),"0" (cs),"1" (count)
 49.1270 ++	:"memory");
 49.1271 + return __res;
 49.1272 + }
 49.1273 + 
 49.1274 +@@ -369,7 +379,7 @@ __asm__ __volatile__(
 49.1275 + 	"je 2f\n\t"
 49.1276 + 	"stosb\n"
 49.1277 + 	"2:"
 49.1278 +-	: "=&c" (d0), "=&D" (d1)
 49.1279 ++	:"=&c" (d0), "=&D" (d1)
 49.1280 + 	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
 49.1281 + 	:"memory");
 49.1282 + return (s);	
 49.1283 +@@ -392,7 +402,8 @@ __asm__ __volatile__(
 49.1284 + 	"jne 1b\n"
 49.1285 + 	"3:\tsubl %2,%0"
 49.1286 + 	:"=a" (__res), "=&d" (d0)
 49.1287 +-	:"c" (s),"1" (count));
 49.1288 ++	:"c" (s),"1" (count)
 49.1289 ++	:"memory");
 49.1290 + return __res;
 49.1291 + }
 49.1292 + /* end of additional stuff */
 49.1293 +@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
 49.1294 + 		"dec %%edi\n"
 49.1295 + 		"1:"
 49.1296 + 		: "=D" (addr), "=c" (size)
 49.1297 +-		: "0" (addr), "1" (size), "a" (c));
 49.1298 ++		: "0" (addr), "1" (size), "a" (c)
 49.1299 ++		: "memory");
 49.1300 + 	return addr;
 49.1301 + }
 49.1302 + 
 49.1303 +diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
 49.1304 +--- a/include/asm-x86_64/smp.h
 49.1305 ++++ b/include/asm-x86_64/smp.h
 49.1306 +@@ -46,6 +46,8 @@ extern int pic_mode;
 49.1307 + extern int smp_num_siblings;
 49.1308 + extern void smp_flush_tlb(void);
 49.1309 + extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
 49.1310 ++extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
 49.1311 ++				     int retry, int wait);
 49.1312 + extern void smp_send_reschedule(int cpu);
 49.1313 + extern void smp_invalidate_rcv(void);		/* Process an NMI */
 49.1314 + extern void zap_low_mappings(void);
 49.1315 +diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
 49.1316 +--- a/include/linux/if_shaper.h
 49.1317 ++++ b/include/linux/if_shaper.h
 49.1318 +@@ -23,7 +23,7 @@ struct shaper
 49.1319 + 	__u32 shapeclock;
 49.1320 + 	unsigned long recovery;	/* Time we can next clock a packet out on
 49.1321 + 				   an empty queue */
 49.1322 +-	struct semaphore sem;
 49.1323 ++	spinlock_t lock;
 49.1324 +         struct net_device_stats stats;
 49.1325 + 	struct net_device *dev;
 49.1326 + 	int  (*hard_start_xmit) (struct sk_buff *skb,
 49.1327 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
 49.1328 +--- a/include/linux/skbuff.h
 49.1329 ++++ b/include/linux/skbuff.h
 49.1330 +@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
 49.1331 + {
 49.1332 + 	int hlen = skb_headlen(skb);
 49.1333 + 
 49.1334 +-	if (offset + len <= hlen)
 49.1335 ++	if (hlen - offset >= len)
 49.1336 + 		return skb->data + offset;
 49.1337 + 
 49.1338 + 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
 49.1339 +diff --git a/include/linux/zlib.h b/include/linux/zlib.h
 49.1340 +--- a/include/linux/zlib.h
 49.1341 ++++ b/include/linux/zlib.h
 49.1342 +@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
 49.1343 +    stream state was inconsistent (such as zalloc or state being NULL).
 49.1344 + */
 49.1345 + 
 49.1346 ++static inline unsigned long deflateBound(unsigned long s)
 49.1347 ++{
 49.1348 ++	return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
 49.1349 ++}
 49.1350 ++
 49.1351 + extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
 49.1352 + /*
 49.1353 +      Dynamically update the compression level and compression strategy.  The
 49.1354 +diff --git a/kernel/module.c b/kernel/module.c
 49.1355 +--- a/kernel/module.c
 49.1356 ++++ b/kernel/module.c
 49.1357 +@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
 49.1358 + /* Created by linker magic */
 49.1359 + extern char __per_cpu_start[], __per_cpu_end[];
 49.1360 + 
 49.1361 +-static void *percpu_modalloc(unsigned long size, unsigned long align)
 49.1362 ++static void *percpu_modalloc(unsigned long size, unsigned long align,
 49.1363 ++			     const char *name)
 49.1364 + {
 49.1365 + 	unsigned long extra;
 49.1366 + 	unsigned int i;
 49.1367 + 	void *ptr;
 49.1368 + 
 49.1369 +-	BUG_ON(align > SMP_CACHE_BYTES);
 49.1370 ++	if (align > SMP_CACHE_BYTES) {
 49.1371 ++		printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
 49.1372 ++		       name, align, SMP_CACHE_BYTES);
 49.1373 ++		align = SMP_CACHE_BYTES;
 49.1374 ++	}
 49.1375 + 
 49.1376 + 	ptr = __per_cpu_start;
 49.1377 + 	for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
 49.1378 +@@ -347,7 +352,8 @@ static int percpu_modinit(void)
 49.1379 + }	
 49.1380 + __initcall(percpu_modinit);
 49.1381 + #else /* ... !CONFIG_SMP */
 49.1382 +-static inline void *percpu_modalloc(unsigned long size, unsigned long align)
 49.1383 ++static inline void *percpu_modalloc(unsigned long size, unsigned long align,
 49.1384 ++				    const char *name)
 49.1385 + {
 49.1386 + 	return NULL;
 49.1387 + }
 49.1388 +@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
 49.1389 + 	if (pcpuindex) {
 49.1390 + 		/* We have a special allocation for this section. */
 49.1391 + 		percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
 49.1392 +-					 sechdrs[pcpuindex].sh_addralign);
 49.1393 ++					 sechdrs[pcpuindex].sh_addralign,
 49.1394 ++					 mod->name);
 49.1395 + 		if (!percpu) {
 49.1396 + 			err = -ENOMEM;
 49.1397 + 			goto free_mod;
 49.1398 +diff --git a/kernel/signal.c b/kernel/signal.c
 49.1399 +--- a/kernel/signal.c
 49.1400 ++++ b/kernel/signal.c
 49.1401 +@@ -686,7 +686,7 @@ static void handle_stop_signal(int sig, 
 49.1402 + {
 49.1403 + 	struct task_struct *t;
 49.1404 + 
 49.1405 +-	if (p->flags & SIGNAL_GROUP_EXIT)
 49.1406 ++	if (p->signal->flags & SIGNAL_GROUP_EXIT)
 49.1407 + 		/*
 49.1408 + 		 * The process is in the middle of dying already.
 49.1409 + 		 */
 49.1410 +diff --git a/lib/inflate.c b/lib/inflate.c
 49.1411 +--- a/lib/inflate.c
 49.1412 ++++ b/lib/inflate.c
 49.1413 +@@ -326,7 +326,7 @@ DEBG("huft1 ");
 49.1414 +   {
 49.1415 +     *t = (struct huft *)NULL;
 49.1416 +     *m = 0;
 49.1417 +-    return 0;
 49.1418 ++    return 2;
 49.1419 +   }
 49.1420 + 
 49.1421 + DEBG("huft2 ");
 49.1422 +@@ -374,6 +374,7 @@ DEBG("huft5 ");
 49.1423 +     if ((j = *p++) != 0)
 49.1424 +       v[x[j]++] = i;
 49.1425 +   } while (++i < n);
 49.1426 ++  n = x[g];                   /* set n to length of v */
 49.1427 + 
 49.1428 + DEBG("h6 ");
 49.1429 + 
 49.1430 +@@ -410,12 +411,13 @@ DEBG1("1 ");
 49.1431 + DEBG1("2 ");
 49.1432 +           f -= a + 1;           /* deduct codes from patterns left */
 49.1433 +           xp = c + k;
 49.1434 +-          while (++j < z)       /* try smaller tables up to z bits */
 49.1435 +-          {
 49.1436 +-            if ((f <<= 1) <= *++xp)
 49.1437 +-              break;            /* enough codes to use up j bits */
 49.1438 +-            f -= *xp;           /* else deduct codes from patterns */
 49.1439 +-          }
 49.1440 ++          if (j < z)
 49.1441 ++            while (++j < z)       /* try smaller tables up to z bits */
 49.1442 ++            {
 49.1443 ++              if ((f <<= 1) <= *++xp)
 49.1444 ++                break;            /* enough codes to use up j bits */
 49.1445 ++              f -= *xp;           /* else deduct codes from patterns */
 49.1446 ++            }
 49.1447 +         }
 49.1448 + DEBG1("3 ");
 49.1449 +         z = 1 << j;             /* table entries for j-bit table */
 49.1450 +diff --git a/mm/memory.c b/mm/memory.c
 49.1451 +--- a/mm/memory.c
 49.1452 ++++ b/mm/memory.c
 49.1453 +@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
 49.1454 + {
 49.1455 + 	pgd_t *pgd;
 49.1456 + 	unsigned long next;
 49.1457 +-	unsigned long end = addr + size;
 49.1458 ++	unsigned long end = addr + PAGE_ALIGN(size);
 49.1459 + 	struct mm_struct *mm = vma->vm_mm;
 49.1460 + 	int err;
 49.1461 + 
 49.1462 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
 49.1463 +--- a/mm/mempolicy.c
 49.1464 ++++ b/mm/mempolicy.c
 49.1465 +@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
 49.1466 + 	struct mempolicy *new;
 49.1467 + 	DECLARE_BITMAP(nodes, MAX_NUMNODES);
 49.1468 + 
 49.1469 +-	if (mode > MPOL_MAX)
 49.1470 ++	if (mode < 0 || mode > MPOL_MAX)
 49.1471 + 		return -EINVAL;
 49.1472 + 	err = get_nodes(nodes, nmask, maxnode, mode);
 49.1473 + 	if (err)
 49.1474 +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
 49.1475 +--- a/net/8021q/vlan.c
 49.1476 ++++ b/net/8021q/vlan.c
 49.1477 +@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
 49.1478 + 			if (!vlandev)
 49.1479 + 				continue;
 49.1480 + 
 49.1481 ++			if (netif_carrier_ok(dev)) {
 49.1482 ++				if (!netif_carrier_ok(vlandev))
 49.1483 ++					netif_carrier_on(vlandev);
 49.1484 ++			} else {
 49.1485 ++				if (netif_carrier_ok(vlandev))
 49.1486 ++					netif_carrier_off(vlandev);
 49.1487 ++			}
 49.1488 ++
 49.1489 + 			if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
 49.1490 + 				vlandev->state = (vlandev->state &~ VLAN_LINK_STATE_MASK) 
 49.1491 + 					| flgs;
 49.1492 +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
 49.1493 +--- a/net/ipv4/icmp.c
 49.1494 ++++ b/net/ipv4/icmp.c
 49.1495 +@@ -349,12 +349,12 @@ static void icmp_push_reply(struct icmp_
 49.1496 + {
 49.1497 + 	struct sk_buff *skb;
 49.1498 + 
 49.1499 +-	ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
 49.1500 +-		       icmp_param->data_len+icmp_param->head_len,
 49.1501 +-		       icmp_param->head_len,
 49.1502 +-		       ipc, rt, MSG_DONTWAIT);
 49.1503 +-
 49.1504 +-	if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
 49.1505 ++	if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
 49.1506 ++		           icmp_param->data_len+icmp_param->head_len,
 49.1507 ++		           icmp_param->head_len,
 49.1508 ++		           ipc, rt, MSG_DONTWAIT) < 0)
 49.1509 ++		ip_flush_pending_frames(icmp_socket->sk);
 49.1510 ++	else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
 49.1511 + 		struct icmphdr *icmph = skb->h.icmph;
 49.1512 + 		unsigned int csum = 0;
 49.1513 + 		struct sk_buff *skb1;
 49.1514 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
 49.1515 +--- a/net/ipv4/ip_output.c
 49.1516 ++++ b/net/ipv4/ip_output.c
 49.1517 +@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
 49.1518 + #ifdef CONFIG_NETFILTER_DEBUG
 49.1519 + 	nf_debug_ip_loopback_xmit(newskb);
 49.1520 + #endif
 49.1521 +-	nf_reset(newskb);
 49.1522 + 	netif_rx(newskb);
 49.1523 + 	return 0;
 49.1524 + }
 49.1525 +@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
 49.1526 + 	nf_debug_ip_finish_output2(skb);
 49.1527 + #endif /*CONFIG_NETFILTER_DEBUG*/
 49.1528 + 
 49.1529 +-	nf_reset(skb);
 49.1530 +-
 49.1531 + 	if (hh) {
 49.1532 + 		int hh_alen;
 49.1533 + 
 49.1534 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
 49.1535 +--- a/net/ipv4/ip_sockglue.c
 49.1536 ++++ b/net/ipv4/ip_sockglue.c
 49.1537 +@@ -848,6 +848,9 @@ mc_msf_out:
 49.1538 +  
 49.1539 + 		case IP_IPSEC_POLICY:
 49.1540 + 		case IP_XFRM_POLICY:
 49.1541 ++			err = -EPERM;
 49.1542 ++			if (!capable(CAP_NET_ADMIN))
 49.1543 ++				break;
 49.1544 + 			err = xfrm_user_policy(sk, optname, optval, optlen);
 49.1545 + 			break;
 49.1546 + 
 49.1547 +diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
 49.1548 +--- a/net/ipv4/netfilter/ip_conntrack_core.c
 49.1549 ++++ b/net/ipv4/netfilter/ip_conntrack_core.c
 49.1550 +@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
 49.1551 + 		schedule();
 49.1552 + 		goto i_see_dead_people;
 49.1553 + 	}
 49.1554 ++	/* wait until all references to ip_conntrack_untracked are dropped */
 49.1555 ++	while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
 49.1556 ++		schedule();
 49.1557 + 
 49.1558 + 	kmem_cache_destroy(ip_conntrack_cachep);
 49.1559 + 	kmem_cache_destroy(ip_conntrack_expect_cachep);
 49.1560 +diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
 49.1561 +--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
 49.1562 ++++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
 49.1563 +@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
 49.1564 + 				        const struct net_device *out,
 49.1565 + 				        int (*okfn)(struct sk_buff *))
 49.1566 + {
 49.1567 ++#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
 49.1568 ++	/* Previously seen (loopback)?  Ignore.  Do this before
 49.1569 ++           fragment check. */
 49.1570 ++	if ((*pskb)->nfct)
 49.1571 ++		return NF_ACCEPT;
 49.1572 ++#endif
 49.1573 ++
 49.1574 + 	/* Gather fragments. */
 49.1575 + 	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
 49.1576 + 		*pskb = ip_ct_gather_frags(*pskb,
 49.1577 +diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
 49.1578 +--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
 49.1579 ++++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
 49.1580 +@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
 49.1581 + 		 enum ip_nat_manip_type maniptype,
 49.1582 + 		 const struct ip_conntrack *conntrack)
 49.1583 + {
 49.1584 +-	static u_int16_t port, *portptr;
 49.1585 ++	static u_int16_t port;
 49.1586 ++	u_int16_t *portptr;
 49.1587 + 	unsigned int range_size, min, i;
 49.1588 + 
 49.1589 + 	if (maniptype == IP_NAT_MANIP_SRC)
 49.1590 +diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
 49.1591 +--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
 49.1592 ++++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
 49.1593 +@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
 49.1594 + 		 enum ip_nat_manip_type maniptype,
 49.1595 + 		 const struct ip_conntrack *conntrack)
 49.1596 + {
 49.1597 +-	static u_int16_t port, *portptr;
 49.1598 ++	static u_int16_t port;
 49.1599 ++	u_int16_t *portptr;
 49.1600 + 	unsigned int range_size, min, i;
 49.1601 + 
 49.1602 + 	if (maniptype == IP_NAT_MANIP_SRC)
 49.1603 +diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
 49.1604 +--- a/net/ipv6/ip6_input.c
 49.1605 ++++ b/net/ipv6/ip6_input.c
 49.1606 +@@ -198,12 +198,13 @@ resubmit:
 49.1607 + 		if (!raw_sk) {
 49.1608 + 			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
 49.1609 + 				IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
 49.1610 +-				icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
 49.1611 ++				icmpv6_send(skb, ICMPV6_PARAMPROB,
 49.1612 ++				            ICMPV6_UNK_NEXTHDR, nhoff,
 49.1613 ++				            skb->dev);
 49.1614 + 			}
 49.1615 +-		} else {
 49.1616 ++		} else
 49.1617 + 			IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
 49.1618 +-			kfree_skb(skb);
 49.1619 +-		}
 49.1620 ++		kfree_skb(skb);
 49.1621 + 	}
 49.1622 + 	rcu_read_unlock();
 49.1623 + 	return 0;
 49.1624 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
 49.1625 +--- a/net/ipv6/ipv6_sockglue.c
 49.1626 ++++ b/net/ipv6/ipv6_sockglue.c
 49.1627 +@@ -503,6 +503,9 @@ done:
 49.1628 + 		break;
 49.1629 + 	case IPV6_IPSEC_POLICY:
 49.1630 + 	case IPV6_XFRM_POLICY:
 49.1631 ++		retv = -EPERM;
 49.1632 ++		if (!capable(CAP_NET_ADMIN))
 49.1633 ++			break;
 49.1634 + 		retv = xfrm_user_policy(sk, optname, optval, optlen);
 49.1635 + 		break;
 49.1636 + 
 49.1637 +diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
 49.1638 +--- a/net/ipv6/netfilter/ip6_queue.c
 49.1639 ++++ b/net/ipv6/netfilter/ip6_queue.c
 49.1640 +@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
 49.1641 + static void
 49.1642 + ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
 49.1643 + {
 49.1644 ++	local_bh_disable();
 49.1645 + 	nf_reinject(entry->skb, entry->info, verdict);
 49.1646 ++	local_bh_enable();
 49.1647 + 	kfree(entry);
 49.1648 + }
 49.1649 + 
 49.1650 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
 49.1651 +--- a/net/netlink/af_netlink.c
 49.1652 ++++ b/net/netlink/af_netlink.c
 49.1653 +@@ -315,8 +315,8 @@ err:
 49.1654 + static void netlink_remove(struct sock *sk)
 49.1655 + {
 49.1656 + 	netlink_table_grab();
 49.1657 +-	nl_table[sk->sk_protocol].hash.entries--;
 49.1658 +-	sk_del_node_init(sk);
 49.1659 ++	if (sk_del_node_init(sk))
 49.1660 ++		nl_table[sk->sk_protocol].hash.entries--;
 49.1661 + 	if (nlk_sk(sk)->groups)
 49.1662 + 		__sk_del_bind_node(sk);
 49.1663 + 	netlink_table_ungrab();
 49.1664 +@@ -429,7 +429,12 @@ retry:
 49.1665 + 	err = netlink_insert(sk, pid);
 49.1666 + 	if (err == -EADDRINUSE)
 49.1667 + 		goto retry;
 49.1668 +-	return 0;
 49.1669 ++
 49.1670 ++	/* If 2 threads race to autobind, that is fine.  */
 49.1671 ++	if (err == -EBUSY)
 49.1672 ++		err = 0;
 49.1673 ++
 49.1674 ++	return err;
 49.1675 + }
 49.1676 + 
 49.1677 + static inline int netlink_capable(struct socket *sock, unsigned int flag) 
 49.1678 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
 49.1679 +--- a/net/packet/af_packet.c
 49.1680 ++++ b/net/packet/af_packet.c
 49.1681 +@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
 49.1682 + 	dst_release(skb->dst);
 49.1683 + 	skb->dst = NULL;
 49.1684 + 
 49.1685 ++	/* drop conntrack reference */
 49.1686 ++	nf_reset(skb);
 49.1687 ++
 49.1688 + 	spkt = (struct sockaddr_pkt*)skb->cb;
 49.1689 + 
 49.1690 + 	skb_push(skb, skb->data-skb->mac.raw);
 49.1691 +@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
 49.1692 + 	dst_release(skb->dst);
 49.1693 + 	skb->dst = NULL;
 49.1694 + 
 49.1695 ++	/* drop conntrack reference */
 49.1696 ++	nf_reset(skb);
 49.1697 ++
 49.1698 + 	spin_lock(&sk->sk_receive_queue.lock);
 49.1699 + 	po->stats.tp_packets++;
 49.1700 + 	__skb_queue_tail(&sk->sk_receive_queue, skb);
 49.1701 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
 49.1702 +--- a/net/xfrm/xfrm_user.c
 49.1703 ++++ b/net/xfrm/xfrm_user.c
 49.1704 +@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
 49.1705 + 	if (nr > XFRM_MAX_DEPTH)
 49.1706 + 		return NULL;
 49.1707 + 
 49.1708 ++	if (p->dir > XFRM_POLICY_OUT)
 49.1709 ++		return NULL;
 49.1710 ++
 49.1711 + 	xp = xfrm_policy_alloc(GFP_KERNEL);
 49.1712 + 	if (xp == NULL) {
 49.1713 + 		*dir = -ENOBUFS;
 49.1714 +diff --git a/security/keys/keyring.c b/security/keys/keyring.c
 49.1715 +--- a/security/keys/keyring.c
 49.1716 ++++ b/security/keys/keyring.c
 49.1717 +@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
 49.1718 + 
 49.1719 + 	if (keyring->description) {
 49.1720 + 		write_lock(&keyring_name_lock);
 49.1721 +-		list_del(&keyring->type_data.link);
 49.1722 ++
 49.1723 ++		if (keyring->type_data.link.next != NULL &&
 49.1724 ++		    !list_empty(&keyring->type_data.link))
 49.1725 ++			list_del(&keyring->type_data.link);
 49.1726 ++
 49.1727 + 		write_unlock(&keyring_name_lock);
 49.1728 + 	}
 49.1729 + 
 49.1730 +diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
 49.1731 +--- a/security/keys/process_keys.c
 49.1732 ++++ b/security/keys/process_keys.c
 49.1733 +@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
 49.1734 + 		keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
 49.1735 + 		if (IS_ERR(keyring)) {
 49.1736 + 			ret = PTR_ERR(keyring);
 49.1737 +-			goto error;
 49.1738 ++			goto error2;
 49.1739 + 		}
 49.1740 + 	}
 49.1741 + 	else if (IS_ERR(keyring)) {
    50.1 --- a/patches/linux-2.6.12/net-csum.patch	Fri Oct 21 13:58:39 2005 -0600
    50.2 +++ b/patches/linux-2.6.12/net-csum.patch	Mon Oct 24 09:08:13 2005 -0600
    50.3 @@ -9,3 +9,48 @@ diff -ur linux-2.6.11/net/ipv4/netfilter
    50.4   	    && csum_tcpudp_magic(iph->saddr, iph->daddr, udplen, IPPROTO_UDP,
    50.5   			         skb->ip_summed == CHECKSUM_HW ? skb->csum
    50.6   			      	 : skb_checksum(skb, iph->ihl*4, udplen, 0))) {
    50.7 +
    50.8 +--- ../xen-unstable.hg/linux-2.6.12-xen0/net/ipv4/netfilter/ip_nat_proto_udp.c	2005-06-17 14:48:29.000000000 -0500
    50.9 ++++ linux-2.6-xen-sparse/net/ipv4/netfilter/ip_nat_proto_udp.c	2005-10-14 15:17:53.000000000 -0500
   50.10 +@@ -112,11 +112,19 @@ udp_manip_pkt(struct sk_buff **pskb,
   50.11 + 		newport = tuple->dst.u.udp.port;
   50.12 + 		portptr = &hdr->dest;
   50.13 + 	}
   50.14 +-	if (hdr->check) /* 0 is a special case meaning no checksum */
   50.15 +-		hdr->check = ip_nat_cheat_check(~oldip, newip,
   50.16 ++	
   50.17 ++	if (hdr->check) { /* 0 is a special case meaning no checksum */
   50.18 ++		if ((*pskb)->proto_csum_blank) {
   50.19 ++			hdr->check = ip_nat_cheat_check(oldip, ~newip, 
   50.20 ++					ip_nat_cheat_check(*portptr ^ 0xFFFF, 
   50.21 ++						newport, hdr->check));
   50.22 ++		} else {
   50.23 ++			hdr->check = ip_nat_cheat_check(~oldip, newip,
   50.24 + 					ip_nat_cheat_check(*portptr ^ 0xFFFF,
   50.25 + 							   newport,
   50.26 + 							   hdr->check));
   50.27 ++		}
   50.28 ++	}
   50.29 + 	*portptr = newport;
   50.30 + 	return 1;
   50.31 + }
   50.32 +--- ../xen-unstable.hg/linux-2.6.12-xen0/net/ipv4/netfilter/ip_nat_proto_tcp.c	2005-06-17 14:48:29.000000000 -0500
   50.33 ++++ linux-2.6-xen-sparse/net/ipv4/netfilter/ip_nat_proto_tcp.c	2005-10-14 16:41:20.000000000 -0500
   50.34 +@@ -127,10 +127,16 @@ tcp_manip_pkt(struct sk_buff **pskb,
   50.35 + 	if (hdrsize < sizeof(*hdr))
   50.36 + 		return 1;
   50.37 + 
   50.38 +-	hdr->check = ip_nat_cheat_check(~oldip, newip,
   50.39 ++	if ((*pskb)->proto_csum_blank) {
   50.40 ++		hdr->check = ip_nat_cheat_check(oldip, ~newip,
   50.41 ++				ip_nat_cheat_check(oldport ^ 0xFFFF,
   50.42 ++					newport, hdr->check));
   50.43 ++	} else { 
   50.44 ++		hdr->check = ip_nat_cheat_check(~oldip, newip,
   50.45 + 					ip_nat_cheat_check(oldport ^ 0xFFFF,
   50.46 + 							   newport,
   50.47 + 							   hdr->check));
   50.48 ++	}
   50.49 + 	return 1;
   50.50 + }
   50.51 + 
    51.1 --- a/patches/linux-2.6.12/patch-2.6.12.5	Fri Oct 21 13:58:39 2005 -0600
    51.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    51.3 @@ -1,1614 +0,0 @@
    51.4 -diff --git a/Makefile b/Makefile
    51.5 ---- a/Makefile
    51.6 -+++ b/Makefile
    51.7 -@@ -1,7 +1,7 @@
    51.8 - VERSION = 2
    51.9 - PATCHLEVEL = 6
   51.10 - SUBLEVEL = 12
   51.11 --EXTRAVERSION =
   51.12 -+EXTRAVERSION = .5
   51.13 - NAME=Woozy Numbat
   51.14 - 
   51.15 - # *DOCUMENTATION*
   51.16 -@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
   51.17 - #(which is the most common case IMHO) to avoid unneeded clutter in the big tags file.
   51.18 - #Adding $(srctree) adds about 20M on i386 to the size of the output file!
   51.19 - 
   51.20 --ifeq ($(KBUILD_OUTPUT),)
   51.21 -+ifeq ($(src),$(obj))
   51.22 - __srctree =
   51.23 - else
   51.24 - __srctree = $(srctree)/
   51.25 -diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
   51.26 ---- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
   51.27 -+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
   51.28 -@@ -44,7 +44,7 @@
   51.29 - 
   51.30 - #define PFX "powernow-k8: "
   51.31 - #define BFX PFX "BIOS error: "
   51.32 --#define VERSION "version 1.40.2"
   51.33 -+#define VERSION "version 1.40.4"
   51.34 - #include "powernow-k8.h"
   51.35 - 
   51.36 - /* serialize freq changes  */
   51.37 -@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
   51.38 - {
   51.39 - 	struct powernow_k8_data *data;
   51.40 - 	cpumask_t oldmask = CPU_MASK_ALL;
   51.41 --	int rc;
   51.42 -+	int rc, i;
   51.43 - 
   51.44 - 	if (!check_supported_cpu(pol->cpu))
   51.45 - 		return -ENODEV;
   51.46 -@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
   51.47 - 	printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
   51.48 - 	       data->currfid, data->currvid);
   51.49 - 
   51.50 --	powernow_data[pol->cpu] = data;
   51.51 -+	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
   51.52 -+		powernow_data[i] = data;
   51.53 -+	}
   51.54 - 
   51.55 - 	return 0;
   51.56 - 
   51.57 -diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
   51.58 ---- a/arch/i386/kernel/process.c
   51.59 -+++ b/arch/i386/kernel/process.c
   51.60 -@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
   51.61 - 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
   51.62 - 		return -EINVAL;
   51.63 - 
   51.64 -+	memset(&info, 0, sizeof(info));
   51.65 -+
   51.66 - 	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
   51.67 - 
   51.68 - 	info.entry_number = idx;
   51.69 -diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
   51.70 ---- a/arch/ia64/kernel/ptrace.c
   51.71 -+++ b/arch/ia64/kernel/ptrace.c
   51.72 -@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
   51.73 - 				*data = (pt->cr_ipsr & IPSR_MASK);
   51.74 - 			return 0;
   51.75 - 
   51.76 -+		      case PT_AR_RSC:
   51.77 -+			if (write_access)
   51.78 -+				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
   51.79 -+			else
   51.80 -+				*data = pt->ar_rsc;
   51.81 -+			return 0;
   51.82 -+
   51.83 - 		      case PT_AR_RNAT:
   51.84 - 			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
   51.85 - 			rnat_addr = (long) ia64_rse_rnat_addr((long *)
   51.86 -@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
   51.87 - 		      case PT_AR_BSPSTORE:
   51.88 - 			ptr = pt_reg_addr(pt, ar_bspstore);
   51.89 - 			break;
   51.90 --		      case PT_AR_RSC:
   51.91 --			ptr = pt_reg_addr(pt, ar_rsc);
   51.92 --			break;
   51.93 - 		      case PT_AR_UNAT:
   51.94 - 			ptr = pt_reg_addr(pt, ar_unat);
   51.95 - 			break;
   51.96 -@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
   51.97 - static long
   51.98 - ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
   51.99 - {
  51.100 --	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
  51.101 -+	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
  51.102 - 	struct unw_frame_info info;
  51.103 - 	struct switch_stack *sw;
  51.104 - 	struct ia64_fpreg fpval;
  51.105 -@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
  51.106 - 	/* app regs */
  51.107 - 
  51.108 - 	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
  51.109 --	retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
  51.110 -+	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
  51.111 - 	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
  51.112 - 	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
  51.113 - 	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
  51.114 -@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
  51.115 - 	retval |= __get_user(nat_bits, &ppr->nat);
  51.116 - 
  51.117 - 	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
  51.118 -+	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
  51.119 - 	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
  51.120 - 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
  51.121 - 	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
  51.122 -diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
  51.123 ---- a/arch/ia64/kernel/signal.c
  51.124 -+++ b/arch/ia64/kernel/signal.c
  51.125 -@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
  51.126 - static long
  51.127 - restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
  51.128 - {
  51.129 --	unsigned long ip, flags, nat, um, cfm;
  51.130 -+	unsigned long ip, flags, nat, um, cfm, rsc;
  51.131 - 	long err;
  51.132 - 
  51.133 - 	/* Always make any pending restarted system calls return -EINTR */
  51.134 -@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
  51.135 - 	err |= __get_user(ip, &sc->sc_ip);			/* instruction pointer */
  51.136 - 	err |= __get_user(cfm, &sc->sc_cfm);
  51.137 - 	err |= __get_user(um, &sc->sc_um);			/* user mask */
  51.138 --	err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
  51.139 -+	err |= __get_user(rsc, &sc->sc_ar_rsc);
  51.140 - 	err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
  51.141 - 	err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
  51.142 - 	err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
  51.143 -@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
  51.144 - 	err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);	/* r15 */
  51.145 - 
  51.146 - 	scr->pt.cr_ifs = cfm | (1UL << 63);
  51.147 -+	scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
  51.148 - 
  51.149 - 	/* establish new instruction pointer: */
  51.150 - 	scr->pt.cr_iip = ip & ~0x3UL;
  51.151 -diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
  51.152 ---- a/arch/ppc/kernel/time.c
  51.153 -+++ b/arch/ppc/kernel/time.c
  51.154 -@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
  51.155 - 
  51.156 - extern unsigned long wall_jiffies;
  51.157 - 
  51.158 -+/* used for timezone offset */
  51.159 -+static long timezone_offset;
  51.160 -+
  51.161 - DEFINE_SPINLOCK(rtc_lock);
  51.162 - 
  51.163 - EXPORT_SYMBOL(rtc_lock);
  51.164 -@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
  51.165 - 		     xtime.tv_sec - last_rtc_update >= 659 &&
  51.166 - 		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
  51.167 - 		     jiffies - wall_jiffies == 1) {
  51.168 --		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0)
  51.169 -+		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
  51.170 - 				last_rtc_update = xtime.tv_sec+1;
  51.171 - 			else
  51.172 - 				/* Try again one minute later */
  51.173 -@@ -286,7 +289,7 @@ void __init time_init(void)
  51.174 - 	unsigned old_stamp, stamp, elapsed;
  51.175 - 
  51.176 -         if (ppc_md.time_init != NULL)
  51.177 --                time_offset = ppc_md.time_init();
  51.178 -+                timezone_offset = ppc_md.time_init();
  51.179 - 
  51.180 - 	if (__USE_RTC()) {
  51.181 - 		/* 601 processor: dec counts down by 128 every 128ns */
  51.182 -@@ -331,10 +334,10 @@ void __init time_init(void)
  51.183 - 	set_dec(tb_ticks_per_jiffy);
  51.184 - 
  51.185 - 	/* If platform provided a timezone (pmac), we correct the time */
  51.186 --        if (time_offset) {
  51.187 --		sys_tz.tz_minuteswest = -time_offset / 60;
  51.188 -+        if (timezone_offset) {
  51.189 -+		sys_tz.tz_minuteswest = -timezone_offset / 60;
  51.190 - 		sys_tz.tz_dsttime = 0;
  51.191 --		xtime.tv_sec -= time_offset;
  51.192 -+		xtime.tv_sec -= timezone_offset;
  51.193 -         }
  51.194 -         set_normalized_timespec(&wall_to_monotonic,
  51.195 -                                 -xtime.tv_sec, -xtime.tv_nsec);
  51.196 -diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
  51.197 ---- a/arch/ppc64/boot/zlib.c
  51.198 -+++ b/arch/ppc64/boot/zlib.c
  51.199 -@@ -1307,7 +1307,7 @@ local int huft_build(
  51.200 -   {
  51.201 -     *t = (inflate_huft *)Z_NULL;
  51.202 -     *m = 0;
  51.203 --    return Z_OK;
  51.204 -+    return Z_DATA_ERROR;
  51.205 -   }
  51.206 - 
  51.207 - 
  51.208 -@@ -1351,6 +1351,7 @@ local int huft_build(
  51.209 -     if ((j = *p++) != 0)
  51.210 -       v[x[j]++] = i;
  51.211 -   } while (++i < n);
  51.212 -+  n = x[g];			/* set n to length of v */
  51.213 - 
  51.214 - 
  51.215 -   /* Generate the Huffman codes and for each, make the table entries */
  51.216 -diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
  51.217 ---- a/arch/um/kernel/process.c
  51.218 -+++ b/arch/um/kernel/process.c
  51.219 -@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
  51.220 - 	return(arg.pid);
  51.221 - }
  51.222 - 
  51.223 --static int ptrace_child(void)
  51.224 -+static int ptrace_child(void *arg)
  51.225 - {
  51.226 - 	int ret;
  51.227 - 	int pid = os_getpid(), ppid = getppid();
  51.228 -@@ -159,16 +159,20 @@ static int ptrace_child(void)
  51.229 - 	_exit(ret);
  51.230 - }
  51.231 - 
  51.232 --static int start_ptraced_child(void)
  51.233 -+static int start_ptraced_child(void **stack_out)
  51.234 - {
  51.235 -+	void *stack;
  51.236 -+	unsigned long sp;
  51.237 - 	int pid, n, status;
  51.238 - 	
  51.239 --	pid = fork();
  51.240 --	if(pid == 0)
  51.241 --		ptrace_child();
  51.242 --
  51.243 -+	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
  51.244 -+		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  51.245 -+	if(stack == MAP_FAILED)
  51.246 -+		panic("check_ptrace : mmap failed, errno = %d", errno);
  51.247 -+	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
  51.248 -+	pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
  51.249 - 	if(pid < 0)
  51.250 --		panic("check_ptrace : fork failed, errno = %d", errno);
  51.251 -+		panic("check_ptrace : clone failed, errno = %d", errno);
  51.252 - 	CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
  51.253 - 	if(n < 0)
  51.254 - 		panic("check_ptrace : wait failed, errno = %d", errno);
  51.255 -@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
  51.256 - 		panic("check_ptrace : expected SIGSTOP, got status = %d",
  51.257 - 		      status);
  51.258 - 
  51.259 -+	*stack_out = stack;
  51.260 - 	return(pid);
  51.261 - }
  51.262 - 
  51.263 -@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
  51.264 -  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
  51.265 -  * So only for SYSEMU features we test mustpanic, while normal host features
  51.266 -  * must work anyway!*/
  51.267 --static int stop_ptraced_child(int pid, int exitcode, int mustexit)
  51.268 -+static int stop_ptraced_child(int pid, void *stack, int exitcode, int mustpanic)
  51.269 - {
  51.270 - 	int status, n, ret = 0;
  51.271 - 
  51.272 - 	if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
  51.273 --		panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
  51.274 -+		panic("check_ptrace : ptrace failed, errno = %d", errno);
  51.275 - 	CATCH_EINTR(n = waitpid(pid, &status, 0));
  51.276 - 	if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
  51.277 - 		int exit_with = WEXITSTATUS(status);
  51.278 -@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
  51.279 - 		printk("check_ptrace : child exited with exitcode %d, while "
  51.280 - 		      "expecting %d; status 0x%x", exit_with,
  51.281 - 		      exitcode, status);
  51.282 --		if (mustexit)
  51.283 -+		if (mustpanic)
  51.284 - 			panic("\n");
  51.285 - 		else
  51.286 - 			printk("\n");
  51.287 - 		ret = -1;
  51.288 - 	}
  51.289 - 
  51.290 -+	if(munmap(stack, PAGE_SIZE) < 0)
  51.291 -+		panic("check_ptrace : munmap failed, errno = %d", errno);
  51.292 - 	return ret;
  51.293 - }
  51.294 - 
  51.295 -@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
  51.296 - 
  51.297 - static void __init check_sysemu(void)
  51.298 - {
  51.299 -+	void *stack;
  51.300 - 	int pid, syscall, n, status, count=0;
  51.301 - 
  51.302 - 	printk("Checking syscall emulation patch for ptrace...");
  51.303 - 	sysemu_supported = 0;
  51.304 --	pid = start_ptraced_child();
  51.305 -+	pid = start_ptraced_child(&stack);
  51.306 - 
  51.307 - 	if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
  51.308 - 		goto fail;
  51.309 -@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
  51.310 - 		panic("check_sysemu : failed to modify system "
  51.311 - 		      "call return, errno = %d", errno);
  51.312 - 
  51.313 --	if (stop_ptraced_child(pid, 0, 0) < 0)
  51.314 -+	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
  51.315 - 		goto fail_stopped;
  51.316 - 
  51.317 - 	sysemu_supported = 1;
  51.318 -@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
  51.319 - 	set_using_sysemu(!force_sysemu_disabled);
  51.320 - 
  51.321 - 	printk("Checking advanced syscall emulation patch for ptrace...");
  51.322 --	pid = start_ptraced_child();
  51.323 -+	pid = start_ptraced_child(&stack);
  51.324 - 	while(1){
  51.325 - 		count++;
  51.326 - 		if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
  51.327 -@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
  51.328 - 			break;
  51.329 - 		}
  51.330 - 	}
  51.331 --	if (stop_ptraced_child(pid, 0, 0) < 0)
  51.332 -+	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
  51.333 - 		goto fail_stopped;
  51.334 - 
  51.335 - 	sysemu_supported = 2;
  51.336 -@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
  51.337 - 	return;
  51.338 - 
  51.339 - fail:
  51.340 --	stop_ptraced_child(pid, 1, 0);
  51.341 -+	stop_ptraced_child(pid, stack, 1, 0);
  51.342 - fail_stopped:
  51.343 - 	printk("missing\n");
  51.344 - }
  51.345 - 
  51.346 - void __init check_ptrace(void)
  51.347 - {
  51.348 -+	void *stack;
  51.349 - 	int pid, syscall, n, status;
  51.350 - 
  51.351 - 	printk("Checking that ptrace can change system call numbers...");
  51.352 --	pid = start_ptraced_child();
  51.353 -+	pid = start_ptraced_child(&stack);
  51.354 - 
  51.355 - 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) < 0)
  51.356 - 		panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", errno);
  51.357 -@@ -330,7 +339,7 @@ void __init check_ptrace(void)
  51.358 - 			break;
  51.359 - 		}
  51.360 - 	}
  51.361 --	stop_ptraced_child(pid, 0, 1);
  51.362 -+	stop_ptraced_child(pid, stack, 0, 1);
  51.363 - 	printk("OK\n");
  51.364 - 	check_sysemu();
  51.365 - }
  51.366 -@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
  51.367 - static inline int check_skas3_ptrace_support(void)
  51.368 - {
  51.369 - 	struct ptrace_faultinfo fi;
  51.370 -+	void *stack;
  51.371 - 	int pid, n, ret = 1;
  51.372 - 
  51.373 - 	printf("Checking for the skas3 patch in the host...");
  51.374 --	pid = start_ptraced_child();
  51.375 -+	pid = start_ptraced_child(&stack);
  51.376 - 
  51.377 - 	n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
  51.378 - 	if (n < 0) {
  51.379 -@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
  51.380 - 	}
  51.381 - 
  51.382 - 	init_registers(pid);
  51.383 --	stop_ptraced_child(pid, 1, 1);
  51.384 -+	stop_ptraced_child(pid, stack, 1, 1);
  51.385 - 
  51.386 - 	return(ret);
  51.387 - }
  51.388 -diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
  51.389 ---- a/arch/x86_64/ia32/syscall32.c
  51.390 -+++ b/arch/x86_64/ia32/syscall32.c
  51.391 -@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
  51.392 - 	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
  51.393 - 	struct vm_area_struct *vma;
  51.394 - 	struct mm_struct *mm = current->mm;
  51.395 -+	int ret;
  51.396 - 
  51.397 - 	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  51.398 - 	if (!vma)
  51.399 -@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
  51.400 - 	vma->vm_mm = mm;
  51.401 - 
  51.402 - 	down_write(&mm->mmap_sem);
  51.403 --	insert_vm_struct(mm, vma);
  51.404 -+	if ((ret = insert_vm_struct(mm, vma))) {
  51.405 -+		up_write(&mm->mmap_sem);
  51.406 -+		kmem_cache_free(vm_area_cachep, vma);
  51.407 -+		return ret;
  51.408 -+	}
  51.409 - 	mm->total_vm += npages;
  51.410 - 	up_write(&mm->mmap_sem);
  51.411 - 	return 0;
  51.412 -diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
  51.413 ---- a/arch/x86_64/kernel/setup.c
  51.414 -+++ b/arch/x86_64/kernel/setup.c
  51.415 -@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
  51.416 - 	int cpu = smp_processor_id();
  51.417 - 	int node = 0;
  51.418 - 	unsigned bits;
  51.419 --	if (c->x86_num_cores == 1)
  51.420 --		return;
  51.421 - 
  51.422 - 	bits = 0;
  51.423 - 	while ((1 << bits) < c->x86_num_cores)
  51.424 -diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
  51.425 ---- a/arch/x86_64/kernel/smp.c
  51.426 -+++ b/arch/x86_64/kernel/smp.c
  51.427 -@@ -284,6 +284,71 @@ struct call_data_struct {
  51.428 - static struct call_data_struct * call_data;
  51.429 - 
  51.430 - /*
  51.431 -+ * this function sends a 'generic call function' IPI to one other CPU
  51.432 -+ * in the system.
  51.433 -+ */
  51.434 -+static void __smp_call_function_single (int cpu, void (*func) (void *info), void *info,
  51.435 -+				int nonatomic, int wait)
  51.436 -+{
  51.437 -+	struct call_data_struct data;
  51.438 -+	int cpus = 1;
  51.439 -+
  51.440 -+	data.func = func;
  51.441 -+	data.info = info;
  51.442 -+	atomic_set(&data.started, 0);
  51.443 -+	data.wait = wait;
  51.444 -+	if (wait)
  51.445 -+		atomic_set(&data.finished, 0);
  51.446 -+
  51.447 -+	call_data = &data;
  51.448 -+	wmb();
  51.449 -+	/* Send a message to all other CPUs and wait for them to respond */
  51.450 -+	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
  51.451 -+
  51.452 -+	/* Wait for response */
  51.453 -+	while (atomic_read(&data.started) != cpus)
  51.454 -+		cpu_relax();
  51.455 -+
  51.456 -+	if (!wait)
  51.457 -+		return;
  51.458 -+
  51.459 -+	while (atomic_read(&data.finished) != cpus)
  51.460 -+		cpu_relax();
  51.461 -+}
  51.462 -+
  51.463 -+/*
  51.464 -+ * Run a function on another CPU
  51.465 -+ *  <func>	The function to run. This must be fast and non-blocking.
  51.466 -+ *  <info>	An arbitrary pointer to pass to the function.
  51.467 -+ *  <nonatomic>	Currently unused.
  51.468 -+ *  <wait>	If true, wait until function has completed on other CPUs.
  51.469 -+ *  [RETURNS]   0 on success, else a negative status code.
  51.470 -+ *
  51.471 -+ * Does not return until the remote CPU is nearly ready to execute <func>
  51.472 -+ * or is or has executed.
  51.473 -+ */
  51.474 -+
  51.475 -+int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
  51.476 -+	int nonatomic, int wait)
  51.477 -+{
  51.478 -+	
  51.479 -+	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
  51.480 -+
  51.481 -+	if (cpu == me) {
  51.482 -+		printk("%s: trying to call self\n", __func__);
  51.483 -+		put_cpu();
  51.484 -+		return -EBUSY;
  51.485 -+	}
  51.486 -+	spin_lock_bh(&call_lock);
  51.487 -+
  51.488 -+	__smp_call_function_single(cpu, func,info,nonatomic,wait);	
  51.489 -+
  51.490 -+	spin_unlock_bh(&call_lock);
  51.491 -+	put_cpu();
  51.492 -+	return 0;
  51.493 -+}
  51.494 -+
  51.495 -+/*
  51.496 -  * this function sends a 'generic call function' IPI to all other CPUs
  51.497 -  * in the system.
  51.498 -  */
  51.499 -diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
  51.500 ---- a/arch/x86_64/kernel/smpboot.c
  51.501 -+++ b/arch/x86_64/kernel/smpboot.c
  51.502 -@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
  51.503 - {
  51.504 - 	unsigned long flags, i;
  51.505 - 
  51.506 --	if (smp_processor_id() != boot_cpu_id)
  51.507 --		return;
  51.508 --
  51.509 - 	go[MASTER] = 0;
  51.510 - 
  51.511 - 	local_irq_save(flags);
  51.512 -@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
  51.513 - 	return tcenter - best_tm;
  51.514 - }
  51.515 - 
  51.516 --static __cpuinit void sync_tsc(void)
  51.517 -+static __cpuinit void sync_tsc(unsigned int master)
  51.518 - {
  51.519 - 	int i, done = 0;
  51.520 - 	long delta, adj, adjust_latency = 0;
  51.521 -@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
  51.522 - 	} t[NUM_ROUNDS] __cpuinitdata;
  51.523 - #endif
  51.524 - 
  51.525 -+	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
  51.526 -+		smp_processor_id(), master);
  51.527 -+
  51.528 - 	go[MASTER] = 1;
  51.529 - 
  51.530 --	smp_call_function(sync_master, NULL, 1, 0);
  51.531 -+	/* It is dangerous to broadcast IPI as cpus are coming up,
  51.532 -+	 * as they may not be ready to accept them.  So since
  51.533 -+	 * we only need to send the ipi to the boot cpu direct
  51.534 -+	 * the message, and avoid the race.
  51.535 -+	 */
  51.536 -+	smp_call_function_single(master, sync_master, NULL, 1, 0);
  51.537 - 
  51.538 - 	while (go[MASTER])	/* wait for master to be ready */
  51.539 - 		no_cpu_relax();
  51.540 -@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
  51.541 - 	printk(KERN_INFO
  51.542 - 	       "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
  51.543 - 	       "maxerr %lu cycles)\n",
  51.544 --	       smp_processor_id(), boot_cpu_id, delta, rt);
  51.545 -+	       smp_processor_id(), master, delta, rt);
  51.546 - }
  51.547 - 
  51.548 - static void __cpuinit tsc_sync_wait(void)
  51.549 - {
  51.550 - 	if (notscsync || !cpu_has_tsc)
  51.551 - 		return;
  51.552 --	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
  51.553 --			boot_cpu_id);
  51.554 --	sync_tsc();
  51.555 -+	sync_tsc(0);
  51.556 - }
  51.557 - 
  51.558 - static __init int notscsync_setup(char *s)
  51.559 -diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
  51.560 ---- a/drivers/acpi/pci_irq.c
  51.561 -+++ b/drivers/acpi/pci_irq.c
  51.562 -@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
  51.563 - 		printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
  51.564 - 			pci_name(dev), ('A' + pin));
  51.565 - 		/* Interrupt Line values above 0xF are forbidden */
  51.566 --		if (dev->irq >= 0 && (dev->irq <= 0xF)) {
  51.567 -+		if (dev->irq > 0 && (dev->irq <= 0xF)) {
  51.568 - 			printk(" - using IRQ %d\n", dev->irq);
  51.569 -+			acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
  51.570 - 			return_VALUE(0);
  51.571 - 		}
  51.572 - 		else {
  51.573 -diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
  51.574 ---- a/drivers/char/rocket.c
  51.575 -+++ b/drivers/char/rocket.c
  51.576 -@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
  51.577 - 		ToRecv = space;
  51.578 - 
  51.579 - 	if (ToRecv <= 0)
  51.580 --		return;
  51.581 -+		goto done;
  51.582 - 
  51.583 - 	/*
  51.584 - 	 * if status indicates there are errored characters in the
  51.585 -@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
  51.586 - 	}
  51.587 - 	/*  Push the data up to the tty layer */
  51.588 - 	ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
  51.589 -+done:
  51.590 - 	tty_ldisc_deref(ld);
  51.591 - }
  51.592 - 
  51.593 -diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
  51.594 ---- a/drivers/char/tpm/tpm.c
  51.595 -+++ b/drivers/char/tpm/tpm.c
  51.596 -@@ -32,12 +32,6 @@
  51.597 - 
  51.598 - #define	TPM_BUFSIZE			2048
  51.599 - 
  51.600 --/* PCI configuration addresses */
  51.601 --#define	PCI_GEN_PMCON_1			0xA0
  51.602 --#define	PCI_GEN1_DEC			0xE4
  51.603 --#define	PCI_LPC_EN			0xE6
  51.604 --#define	PCI_GEN2_DEC			0xEC
  51.605 --
  51.606 - static LIST_HEAD(tpm_chip_list);
  51.607 - static DEFINE_SPINLOCK(driver_lock);
  51.608 - static int dev_mask[32];
  51.609 -@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
  51.610 - EXPORT_SYMBOL_GPL(tpm_time_expired);
  51.611 - 
  51.612 - /*
  51.613 -- * Initialize the LPC bus and enable the TPM ports
  51.614 -- */
  51.615 --int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
  51.616 --{
  51.617 --	u32 lpcenable, tmp;
  51.618 --	int is_lpcm = 0;
  51.619 --
  51.620 --	switch (pci_dev->vendor) {
  51.621 --	case PCI_VENDOR_ID_INTEL:
  51.622 --		switch (pci_dev->device) {
  51.623 --		case PCI_DEVICE_ID_INTEL_82801CA_12:
  51.624 --		case PCI_DEVICE_ID_INTEL_82801DB_12:
  51.625 --			is_lpcm = 1;
  51.626 --			break;
  51.627 --		}
  51.628 --		/* init ICH (enable LPC) */
  51.629 --		pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
  51.630 --		lpcenable |= 0x20000000;
  51.631 --		pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
  51.632 --
  51.633 --		if (is_lpcm) {
  51.634 --			pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
  51.635 --					      &lpcenable);
  51.636 --			if ((lpcenable & 0x20000000) == 0) {
  51.637 --				dev_err(&pci_dev->dev,
  51.638 --					"cannot enable LPC\n");
  51.639 --				return -ENODEV;
  51.640 --			}
  51.641 --		}
  51.642 --
  51.643 --		/* initialize TPM registers */
  51.644 --		pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
  51.645 --
  51.646 --		if (!is_lpcm)
  51.647 --			tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
  51.648 --		else
  51.649 --			tmp =
  51.650 --			    (tmp & 0xFFFF0000) | (base & 0xFFF0) |
  51.651 --			    0x00000001;
  51.652 --
  51.653 --		pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
  51.654 --
  51.655 --		if (is_lpcm) {
  51.656 --			pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
  51.657 --					      &tmp);
  51.658 --			tmp |= 0x00000004;	/* enable CLKRUN */
  51.659 --			pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
  51.660 --					       tmp);
  51.661 --		}
  51.662 --		tpm_write_index(0x0D, 0x55);	/* unlock 4F */
  51.663 --		tpm_write_index(0x0A, 0x00);	/* int disable */
  51.664 --		tpm_write_index(0x08, base);	/* base addr lo */
  51.665 --		tpm_write_index(0x09, (base & 0xFF00) >> 8);	/* base addr hi */
  51.666 --		tpm_write_index(0x0D, 0xAA);	/* lock 4F */
  51.667 --		break;
  51.668 --	case PCI_VENDOR_ID_AMD:
  51.669 --		/* nothing yet */
  51.670 --		break;
  51.671 --	}
  51.672 --
  51.673 --	return 0;
  51.674 --}
  51.675 --
  51.676 --EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
  51.677 --
  51.678 --/*
  51.679 -  * Internal kernel interface to transmit TPM commands
  51.680 -  */
  51.681 - static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
  51.682 -@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
  51.683 - 	if (chip == NULL)
  51.684 - 		return -ENODEV;
  51.685 - 
  51.686 --	spin_lock(&driver_lock);
  51.687 --	tpm_lpc_bus_init(pci_dev, chip->vendor->base);
  51.688 --	spin_unlock(&driver_lock);
  51.689 --
  51.690 - 	return 0;
  51.691 - }
  51.692 - 
  51.693 -diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
  51.694 ---- a/drivers/char/tpm/tpm.h
  51.695 -+++ b/drivers/char/tpm/tpm.h
  51.696 -@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
  51.697 - }
  51.698 - 
  51.699 - extern void tpm_time_expired(unsigned long);
  51.700 --extern int tpm_lpc_bus_init(struct pci_dev *, u16);
  51.701 --
  51.702 - extern int tpm_register_hardware(struct pci_dev *,
  51.703 - 				 struct tpm_vendor_specific *);
  51.704 - extern int tpm_open(struct inode *, struct file *);
  51.705 -diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
  51.706 ---- a/drivers/char/tpm/tpm_atmel.c
  51.707 -+++ b/drivers/char/tpm/tpm_atmel.c
  51.708 -@@ -22,7 +22,10 @@
  51.709 - #include "tpm.h"
  51.710 - 
  51.711 - /* Atmel definitions */
  51.712 --#define	TPM_ATML_BASE			0x400
  51.713 -+enum tpm_atmel_addr {
  51.714 -+	TPM_ATMEL_BASE_ADDR_LO = 0x08,
  51.715 -+	TPM_ATMEL_BASE_ADDR_HI = 0x09
  51.716 -+};
  51.717 - 
  51.718 - /* write status bits */
  51.719 - #define	ATML_STATUS_ABORT		0x01
  51.720 -@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
  51.721 - 	.cancel = tpm_atml_cancel,
  51.722 - 	.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
  51.723 - 	.req_complete_val = ATML_STATUS_DATA_AVAIL,
  51.724 --	.base = TPM_ATML_BASE,
  51.725 - 	.miscdev = { .fops = &atmel_ops, },
  51.726 - };
  51.727 - 
  51.728 -@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
  51.729 - {
  51.730 - 	u8 version[4];
  51.731 - 	int rc = 0;
  51.732 -+	int lo, hi;
  51.733 - 
  51.734 - 	if (pci_enable_device(pci_dev))
  51.735 - 		return -EIO;
  51.736 - 
  51.737 --	if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
  51.738 --		rc = -ENODEV;
  51.739 --		goto out_err;
  51.740 --	}
  51.741 -+	lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
  51.742 -+	hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
  51.743 -+
  51.744 -+	tpm_atmel.base = (hi<<8)|lo;
  51.745 -+	dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
  51.746 - 
  51.747 - 	/* verify that it is an Atmel part */
  51.748 - 	if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
  51.749 -diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
  51.750 ---- a/drivers/char/tpm/tpm_nsc.c
  51.751 -+++ b/drivers/char/tpm/tpm_nsc.c
  51.752 -@@ -24,6 +24,10 @@
  51.753 - /* National definitions */
  51.754 - #define	TPM_NSC_BASE			0x360
  51.755 - #define	TPM_NSC_IRQ			0x07
  51.756 -+#define	TPM_NSC_BASE0_HI		0x60
  51.757 -+#define	TPM_NSC_BASE0_LO		0x61
  51.758 -+#define	TPM_NSC_BASE1_HI		0x62
  51.759 -+#define	TPM_NSC_BASE1_LO		0x63
  51.760 - 
  51.761 - #define	NSC_LDN_INDEX			0x07
  51.762 - #define	NSC_SID_INDEX			0x20
  51.763 -@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
  51.764 - 	.cancel = tpm_nsc_cancel,
  51.765 - 	.req_complete_mask = NSC_STATUS_OBF,
  51.766 - 	.req_complete_val = NSC_STATUS_OBF,
  51.767 --	.base = TPM_NSC_BASE,
  51.768 - 	.miscdev = { .fops = &nsc_ops, },
  51.769 - 	
  51.770 - };
  51.771 -@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
  51.772 - 				  const struct pci_device_id *pci_id)
  51.773 - {
  51.774 - 	int rc = 0;
  51.775 -+	int lo, hi;
  51.776 -+
  51.777 -+	hi = tpm_read_index(TPM_NSC_BASE0_HI);
  51.778 -+	lo = tpm_read_index(TPM_NSC_BASE0_LO);
  51.779 -+
  51.780 -+	tpm_nsc.base = (hi<<8) | lo;
  51.781 - 
  51.782 - 	if (pci_enable_device(pci_dev))
  51.783 - 		return -EIO;
  51.784 - 
  51.785 --	if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
  51.786 --		rc = -ENODEV;
  51.787 --		goto out_err;
  51.788 --	}
  51.789 --
  51.790 - 	/* verify that it is a National part (SID) */
  51.791 - 	if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
  51.792 - 		rc = -ENODEV;
  51.793 -diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
  51.794 ---- a/drivers/char/tty_ioctl.c
  51.795 -+++ b/drivers/char/tty_ioctl.c
  51.796 -@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
  51.797 - 			ld = tty_ldisc_ref(tty);
  51.798 - 			switch (arg) {
  51.799 - 			case TCIFLUSH:
  51.800 --				if (ld->flush_buffer)
  51.801 -+				if (ld && ld->flush_buffer)
  51.802 - 					ld->flush_buffer(tty);
  51.803 - 				break;
  51.804 - 			case TCIOFLUSH:
  51.805 --				if (ld->flush_buffer)
  51.806 -+				if (ld && ld->flush_buffer)
  51.807 - 					ld->flush_buffer(tty);
  51.808 - 				/* fall through */
  51.809 - 			case TCOFLUSH:
  51.810 -diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
  51.811 ---- a/drivers/media/video/cx88/cx88-video.c
  51.812 -+++ b/drivers/media/video/cx88/cx88-video.c
  51.813 -@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
  51.814 - 			.default_value = 0,
  51.815 - 			.type          = V4L2_CTRL_TYPE_INTEGER,
  51.816 - 		},
  51.817 --		.off                   = 0,
  51.818 -+		.off                   = 128,
  51.819 - 		.reg                   = MO_HUE,
  51.820 - 		.mask                  = 0x00ff,
  51.821 - 		.shift                 = 0,
  51.822 -diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
  51.823 ---- a/drivers/net/e1000/e1000_main.c
  51.824 -+++ b/drivers/net/e1000/e1000_main.c
  51.825 -@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
  51.826 - 	tso = e1000_tso(adapter, skb);
  51.827 - 	if (tso < 0) {
  51.828 - 		dev_kfree_skb_any(skb);
  51.829 -+		spin_unlock_irqrestore(&adapter->tx_lock, flags);
  51.830 - 		return NETDEV_TX_OK;
  51.831 - 	}
  51.832 - 
  51.833 -diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
  51.834 ---- a/drivers/net/hamradio/Kconfig
  51.835 -+++ b/drivers/net/hamradio/Kconfig
  51.836 -@@ -17,7 +17,7 @@ config MKISS
  51.837 - 
  51.838 - config 6PACK
  51.839 - 	tristate "Serial port 6PACK driver"
  51.840 --	depends on AX25 && BROKEN_ON_SMP
  51.841 -+	depends on AX25
  51.842 - 	---help---
  51.843 - 	  6pack is a transmission protocol for the data exchange between your
  51.844 - 	  PC and your TNC (the Terminal Node Controller acts as a kind of
  51.845 -diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
  51.846 ---- a/drivers/net/shaper.c
  51.847 -+++ b/drivers/net/shaper.c
  51.848 -@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
  51.849 - {
  51.850 - 	struct shaper *shaper = dev->priv;
  51.851 -  	struct sk_buff *ptr;
  51.852 --   
  51.853 --	if (down_trylock(&shaper->sem))
  51.854 --		return -1;
  51.855 - 
  51.856 -+	spin_lock(&shaper->lock);
  51.857 -  	ptr=shaper->sendq.prev;
  51.858 -  	
  51.859 -  	/*
  51.860 -@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
  51.861 -                 shaper->stats.collisions++;
  51.862 -  	}
  51.863 - 	shaper_kick(shaper);
  51.864 --	up(&shaper->sem);
  51.865 -+	spin_unlock(&shaper->lock);
  51.866 -  	return 0;
  51.867 - }
  51.868 - 
  51.869 -@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
  51.870 - {
  51.871 - 	struct shaper *shaper = (struct shaper *)data;
  51.872 - 
  51.873 --	if (!down_trylock(&shaper->sem)) {
  51.874 --		shaper_kick(shaper);
  51.875 --		up(&shaper->sem);
  51.876 --	} else
  51.877 --		mod_timer(&shaper->timer, jiffies);
  51.878 -+	spin_lock(&shaper->lock);
  51.879 -+	shaper_kick(shaper);
  51.880 -+	spin_unlock(&shaper->lock);
  51.881 - }
  51.882 - 
  51.883 - /*
  51.884 -@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
  51.885 - 
  51.886 - 
  51.887 - /*
  51.888 -- *	Flush the shaper queues on a closedown
  51.889 -- */
  51.890 -- 
  51.891 --static void shaper_flush(struct shaper *shaper)
  51.892 --{
  51.893 --	struct sk_buff *skb;
  51.894 --
  51.895 --	down(&shaper->sem);
  51.896 --	while((skb=skb_dequeue(&shaper->sendq))!=NULL)
  51.897 --		dev_kfree_skb(skb);
  51.898 --	shaper_kick(shaper);
  51.899 --	up(&shaper->sem);
  51.900 --}
  51.901 --
  51.902 --/*
  51.903 -  *	Bring the interface up. We just disallow this until a 
  51.904 -  *	bind.
  51.905 -  */
  51.906 -@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
  51.907 - static int shaper_close(struct net_device *dev)
  51.908 - {
  51.909 - 	struct shaper *shaper=dev->priv;
  51.910 --	shaper_flush(shaper);
  51.911 -+	struct sk_buff *skb;
  51.912 -+
  51.913 -+	while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
  51.914 -+		dev_kfree_skb(skb);
  51.915 -+
  51.916 -+	spin_lock_bh(&shaper->lock);
  51.917 -+	shaper_kick(shaper);
  51.918 -+	spin_unlock_bh(&shaper->lock);
  51.919 -+
  51.920 - 	del_timer_sync(&shaper->timer);
  51.921 - 	return 0;
  51.922 - }
  51.923 -@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
  51.924 - 	init_timer(&sh->timer);
  51.925 - 	sh->timer.function=shaper_timer;
  51.926 - 	sh->timer.data=(unsigned long)sh;
  51.927 -+	spin_lock_init(&sh->lock);
  51.928 - }
  51.929 - 
  51.930 - /*
  51.931 -diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
  51.932 ---- a/drivers/pci/pci-driver.c
  51.933 -+++ b/drivers/pci/pci-driver.c
  51.934 -@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
  51.935 - 	/* FIXME, once all of the existing PCI drivers have been fixed to set
  51.936 - 	 * the pci shutdown function, this test can go away. */
  51.937 - 	if (!drv->driver.shutdown)
  51.938 --		drv->driver.shutdown = pci_device_shutdown,
  51.939 -+		drv->driver.shutdown = pci_device_shutdown;
  51.940 - 	drv->driver.owner = drv->owner;
  51.941 - 	drv->driver.kobj.ktype = &pci_driver_kobj_type;
  51.942 - 	pci_init_dynids(&drv->dynids);
  51.943 -diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
  51.944 ---- a/drivers/scsi/qla2xxx/qla_init.c
  51.945 -+++ b/drivers/scsi/qla2xxx/qla_init.c
  51.946 -@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
  51.947 - 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
  51.948 - 
  51.949 - 	fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
  51.950 --	if (!rport)
  51.951 -+	if (!rport) {
  51.952 - 		qla_printk(KERN_WARNING, ha,
  51.953 - 		    "Unable to allocate fc remote port!\n");
  51.954 -+		return;
  51.955 -+	}
  51.956 - 
  51.957 - 	if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
  51.958 - 		fcport->os_target_id = rport->scsi_target_id;
  51.959 -diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
  51.960 ---- a/drivers/scsi/qla2xxx/qla_os.c
  51.961 -+++ b/drivers/scsi/qla2xxx/qla_os.c
  51.962 -@@ -1150,7 +1150,7 @@ iospace_error_exit:
  51.963 -  */
  51.964 - int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
  51.965 - {
  51.966 --	int	ret;
  51.967 -+	int	ret = -ENODEV;
  51.968 - 	device_reg_t __iomem *reg;
  51.969 - 	struct Scsi_Host *host;
  51.970 - 	scsi_qla_host_t *ha;
  51.971 -@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
  51.972 - 	fc_port_t *fcport;
  51.973 - 
  51.974 - 	if (pci_enable_device(pdev))
  51.975 --		return -1;
  51.976 -+		goto probe_out;
  51.977 - 
  51.978 - 	host = scsi_host_alloc(&qla2x00_driver_template,
  51.979 - 	    sizeof(scsi_qla_host_t));
  51.980 -@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
  51.981 - 
  51.982 - 	/* Configure PCI I/O space */
  51.983 - 	ret = qla2x00_iospace_config(ha);
  51.984 --	if (ret != 0) {
  51.985 --		goto probe_alloc_failed;
  51.986 --	}
  51.987 -+	if (ret)
  51.988 -+		goto probe_failed;
  51.989 - 
  51.990 - 	/* Sanitize the information from PCI BIOS. */
  51.991 - 	host->irq = pdev->irq;
  51.992 -@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
  51.993 - 		qla_printk(KERN_WARNING, ha,
  51.994 - 		    "[ERROR] Failed to allocate memory for adapter\n");
  51.995 - 
  51.996 --		goto probe_alloc_failed;
  51.997 -+		ret = -ENOMEM;
  51.998 -+		goto probe_failed;
  51.999 - 	}
 51.1000 - 
 51.1001 --	pci_set_drvdata(pdev, ha);
 51.1002 --	host->this_id = 255;
 51.1003 --	host->cmd_per_lun = 3;
 51.1004 --	host->unique_id = ha->instance;
 51.1005 --	host->max_cmd_len = MAX_CMDSZ;
 51.1006 --	host->max_channel = ha->ports - 1;
 51.1007 --	host->max_id = ha->max_targets;
 51.1008 --	host->max_lun = ha->max_luns;
 51.1009 --	host->transportt = qla2xxx_transport_template;
 51.1010 --	if (scsi_add_host(host, &pdev->dev))
 51.1011 --		goto probe_alloc_failed;
 51.1012 --
 51.1013 --	qla2x00_alloc_sysfs_attr(ha);
 51.1014 --
 51.1015 - 	if (qla2x00_initialize_adapter(ha) &&
 51.1016 - 	    !(ha->device_flags & DFLG_NO_CABLE)) {
 51.1017 - 
 51.1018 -@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
 51.1019 - 		    "Adapter flags %x.\n",
 51.1020 - 		    ha->host_no, ha->device_flags));
 51.1021 - 
 51.1022 -+		ret = -ENODEV;
 51.1023 - 		goto probe_failed;
 51.1024 - 	}
 51.1025 - 
 51.1026 --	qla2x00_init_host_attr(ha);
 51.1027 --
 51.1028 - 	/*
 51.1029 - 	 * Startup the kernel thread for this host adapter
 51.1030 - 	 */
 51.1031 -@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
 51.1032 - 		qla_printk(KERN_WARNING, ha,
 51.1033 - 		    "Unable to start DPC thread!\n");
 51.1034 - 
 51.1035 -+		ret = -ENODEV;
 51.1036 - 		goto probe_failed;
 51.1037 - 	}
 51.1038 - 	wait_for_completion(&ha->dpc_inited);
 51.1039 - 
 51.1040 -+	host->this_id = 255;
 51.1041 -+	host->cmd_per_lun = 3;
 51.1042 -+	host->unique_id = ha->instance;
 51.1043 -+	host->max_cmd_len = MAX_CMDSZ;
 51.1044 -+	host->max_channel = ha->ports - 1;
 51.1045 -+	host->max_lun = MAX_LUNS;
 51.1046 -+	host->transportt = qla2xxx_transport_template;
 51.1047 -+
 51.1048 - 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 51.1049 - 		ret = request_irq(host->irq, qla2100_intr_handler,
 51.1050 - 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
 51.1051 - 	else
 51.1052 - 		ret = request_irq(host->irq, qla2300_intr_handler,
 51.1053 - 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
 51.1054 --	if (ret != 0) {
 51.1055 -+	if (ret) {
 51.1056 - 		qla_printk(KERN_WARNING, ha,
 51.1057 - 		    "Failed to reserve interrupt %d already in use.\n",
 51.1058 - 		    host->irq);
 51.1059 -@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
 51.1060 - 		msleep(10);
 51.1061 - 	}
 51.1062 - 
 51.1063 -+	pci_set_drvdata(pdev, ha);
 51.1064 - 	ha->flags.init_done = 1;
 51.1065 - 	num_hosts++;
 51.1066 - 
 51.1067 -+	ret = scsi_add_host(host, &pdev->dev);
 51.1068 -+	if (ret)
 51.1069 -+		goto probe_failed;
 51.1070 -+
 51.1071 -+	qla2x00_alloc_sysfs_attr(ha);
 51.1072 -+
 51.1073 -+	qla2x00_init_host_attr(ha);
 51.1074 -+
 51.1075 - 	qla_printk(KERN_INFO, ha, "\n"
 51.1076 - 	    " QLogic Fibre Channel HBA Driver: %s\n"
 51.1077 - 	    "  QLogic %s - %s\n"
 51.1078 -@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
 51.1079 - probe_failed:
 51.1080 - 	fc_remove_host(ha->host);
 51.1081 - 
 51.1082 --	scsi_remove_host(host);
 51.1083 --
 51.1084 --probe_alloc_failed:
 51.1085 - 	qla2x00_free_device(ha);
 51.1086 - 
 51.1087 - 	scsi_host_put(host);
 51.1088 -@@ -1394,7 +1394,8 @@ probe_alloc_failed:
 51.1089 - probe_disable_device:
 51.1090 - 	pci_disable_device(pdev);
 51.1091 - 
 51.1092 --	return -1;
 51.1093 -+probe_out:
 51.1094 -+	return ret;
 51.1095 - }
 51.1096 - EXPORT_SYMBOL_GPL(qla2x00_probe_one);
 51.1097 - 
 51.1098 -diff --git a/fs/bio.c b/fs/bio.c
 51.1099 ---- a/fs/bio.c
 51.1100 -+++ b/fs/bio.c
 51.1101 -@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
 51.1102 - 	 */
 51.1103 - 	bio->bi_vcnt = bio_src->bi_vcnt;
 51.1104 - 	bio->bi_size = bio_src->bi_size;
 51.1105 -+	bio->bi_idx = bio_src->bi_idx;
 51.1106 - 	bio_phys_segments(q, bio);
 51.1107 - 	bio_hw_segments(q, bio);
 51.1108 - }
 51.1109 -diff --git a/fs/char_dev.c b/fs/char_dev.c
 51.1110 ---- a/fs/char_dev.c
 51.1111 -+++ b/fs/char_dev.c
 51.1112 -@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
 51.1113 - 	struct char_device_struct *cd = NULL, **cp;
 51.1114 - 	int i = major_to_index(major);
 51.1115 - 
 51.1116 --	up(&chrdevs_lock);
 51.1117 -+	down(&chrdevs_lock);
 51.1118 - 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
 51.1119 - 		if ((*cp)->major == major &&
 51.1120 - 		    (*cp)->baseminor == baseminor &&
 51.1121 -diff --git a/fs/exec.c b/fs/exec.c
 51.1122 ---- a/fs/exec.c
 51.1123 -+++ b/fs/exec.c
 51.1124 -@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
 51.1125 - 	}
 51.1126 - 	sig->group_exit_task = NULL;
 51.1127 - 	sig->notify_count = 0;
 51.1128 -+	sig->real_timer.data = (unsigned long)current;
 51.1129 - 	spin_unlock_irq(lock);
 51.1130 - 
 51.1131 - 	/*
 51.1132 -diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
 51.1133 ---- a/fs/isofs/compress.c
 51.1134 -+++ b/fs/isofs/compress.c
 51.1135 -@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
 51.1136 - 	cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
 51.1137 - 	brelse(bh);
 51.1138 - 
 51.1139 -+	if (cstart > cend)
 51.1140 -+		goto eio;
 51.1141 -+		
 51.1142 - 	csize = cend-cstart;
 51.1143 - 
 51.1144 -+	if (csize > deflateBound(1UL << zisofs_block_shift))
 51.1145 -+		goto eio;
 51.1146 -+
 51.1147 - 	/* Now page[] contains an array of pages, any of which can be NULL,
 51.1148 - 	   and the locks on which we hold.  We should now read the data and
 51.1149 - 	   release the pages.  If the pages are NULL the decompressed data
 51.1150 -diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
 51.1151 ---- a/include/asm-i386/string.h
 51.1152 -+++ b/include/asm-i386/string.h
 51.1153 -@@ -116,7 +116,8 @@ __asm__ __volatile__(
 51.1154 - 	"orb $1,%%al\n"
 51.1155 - 	"3:"
 51.1156 - 	:"=a" (__res), "=&S" (d0), "=&D" (d1)
 51.1157 --		     :"1" (cs),"2" (ct));
 51.1158 -+	:"1" (cs),"2" (ct)
 51.1159 -+	:"memory");
 51.1160 - return __res;
 51.1161 - }
 51.1162 - 
 51.1163 -@@ -138,8 +139,9 @@ __asm__ __volatile__(
 51.1164 - 	"3:\tsbbl %%eax,%%eax\n\t"
 51.1165 - 	"orb $1,%%al\n"
 51.1166 - 	"4:"
 51.1167 --		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
 51.1168 --		     :"1" (cs),"2" (ct),"3" (count));
 51.1169 -+	:"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
 51.1170 -+	:"1" (cs),"2" (ct),"3" (count)
 51.1171 -+	:"memory");
 51.1172 - return __res;
 51.1173 - }
 51.1174 - 
 51.1175 -@@ -158,7 +160,9 @@ __asm__ __volatile__(
 51.1176 - 	"movl $1,%1\n"
 51.1177 - 	"2:\tmovl %1,%0\n\t"
 51.1178 - 	"decl %0"
 51.1179 --	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
 51.1180 -+	:"=a" (__res), "=&S" (d0)
 51.1181 -+	:"1" (s),"0" (c)
 51.1182 -+	:"memory");
 51.1183 - return __res;
 51.1184 - }
 51.1185 - 
 51.1186 -@@ -175,7 +179,9 @@ __asm__ __volatile__(
 51.1187 - 	"leal -1(%%esi),%0\n"
 51.1188 - 	"2:\ttestb %%al,%%al\n\t"
 51.1189 - 	"jne 1b"
 51.1190 --	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
 51.1191 -+	:"=g" (__res), "=&S" (d0), "=&a" (d1)
 51.1192 -+	:"0" (0),"1" (s),"2" (c)
 51.1193 -+	:"memory");
 51.1194 - return __res;
 51.1195 - }
 51.1196 - 
 51.1197 -@@ -189,7 +195,9 @@ __asm__ __volatile__(
 51.1198 - 	"scasb\n\t"
 51.1199 - 	"notl %0\n\t"
 51.1200 - 	"decl %0"
 51.1201 --	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
 51.1202 -+	:"=c" (__res), "=&D" (d0)
 51.1203 -+	:"1" (s),"a" (0), "0" (0xffffffffu)
 51.1204 -+	:"memory");
 51.1205 - return __res;
 51.1206 - }
 51.1207 - 
 51.1208 -@@ -333,7 +341,9 @@ __asm__ __volatile__(
 51.1209 - 	"je 1f\n\t"
 51.1210 - 	"movl $1,%0\n"
 51.1211 - 	"1:\tdecl %0"
 51.1212 --	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
 51.1213 -+	:"=D" (__res), "=&c" (d0)
 51.1214 -+	:"a" (c),"0" (cs),"1" (count)
 51.1215 -+	:"memory");
 51.1216 - return __res;
 51.1217 - }
 51.1218 - 
 51.1219 -@@ -369,7 +379,7 @@ __asm__ __volatile__(
 51.1220 - 	"je 2f\n\t"
 51.1221 - 	"stosb\n"
 51.1222 - 	"2:"
 51.1223 --	: "=&c" (d0), "=&D" (d1)
 51.1224 -+	:"=&c" (d0), "=&D" (d1)
 51.1225 - 	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
 51.1226 - 	:"memory");
 51.1227 - return (s);	
 51.1228 -@@ -392,7 +402,8 @@ __asm__ __volatile__(
 51.1229 - 	"jne 1b\n"
 51.1230 - 	"3:\tsubl %2,%0"
 51.1231 - 	:"=a" (__res), "=&d" (d0)
 51.1232 --	:"c" (s),"1" (count));
 51.1233 -+	:"c" (s),"1" (count)
 51.1234 -+	:"memory");
 51.1235 - return __res;
 51.1236 - }
 51.1237 - /* end of additional stuff */
 51.1238 -@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
 51.1239 - 		"dec %%edi\n"
 51.1240 - 		"1:"
 51.1241 - 		: "=D" (addr), "=c" (size)
 51.1242 --		: "0" (addr), "1" (size), "a" (c));
 51.1243 -+		: "0" (addr), "1" (size), "a" (c)
 51.1244 -+		: "memory");
 51.1245 - 	return addr;
 51.1246 - }
 51.1247 - 
 51.1248 -diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
 51.1249 ---- a/include/asm-x86_64/smp.h
 51.1250 -+++ b/include/asm-x86_64/smp.h
 51.1251 -@@ -46,6 +46,8 @@ extern int pic_mode;
 51.1252 - extern int smp_num_siblings;
 51.1253 - extern void smp_flush_tlb(void);
 51.1254 - extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
 51.1255 -+extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
 51.1256 -+				     int retry, int wait);
 51.1257 - extern void smp_send_reschedule(int cpu);
 51.1258 - extern void smp_invalidate_rcv(void);		/* Process an NMI */
 51.1259 - extern void zap_low_mappings(void);
 51.1260 -diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
 51.1261 ---- a/include/linux/if_shaper.h
 51.1262 -+++ b/include/linux/if_shaper.h
 51.1263 -@@ -23,7 +23,7 @@ struct shaper
 51.1264 - 	__u32 shapeclock;
 51.1265 - 	unsigned long recovery;	/* Time we can next clock a packet out on
 51.1266 - 				   an empty queue */
 51.1267 --	struct semaphore sem;
 51.1268 -+	spinlock_t lock;
 51.1269 -         struct net_device_stats stats;
 51.1270 - 	struct net_device *dev;
 51.1271 - 	int  (*hard_start_xmit) (struct sk_buff *skb,
 51.1272 -diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
 51.1273 ---- a/include/linux/skbuff.h
 51.1274 -+++ b/include/linux/skbuff.h
 51.1275 -@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
 51.1276 - {
 51.1277 - 	int hlen = skb_headlen(skb);
 51.1278 - 
 51.1279 --	if (offset + len <= hlen)
 51.1280 -+	if (hlen - offset >= len)
 51.1281 - 		return skb->data + offset;
 51.1282 - 
 51.1283 - 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
 51.1284 -diff --git a/include/linux/zlib.h b/include/linux/zlib.h
 51.1285 ---- a/include/linux/zlib.h
 51.1286 -+++ b/include/linux/zlib.h
 51.1287 -@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
 51.1288 -    stream state was inconsistent (such as zalloc or state being NULL).
 51.1289 - */
 51.1290 - 
 51.1291 -+static inline unsigned long deflateBound(unsigned long s)
 51.1292 -+{
 51.1293 -+	return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
 51.1294 -+}
 51.1295 -+
 51.1296 - extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
 51.1297 - /*
 51.1298 -      Dynamically update the compression level and compression strategy.  The
 51.1299 -diff --git a/kernel/module.c b/kernel/module.c
 51.1300 ---- a/kernel/module.c
 51.1301 -+++ b/kernel/module.c
 51.1302 -@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
 51.1303 - /* Created by linker magic */
 51.1304 - extern char __per_cpu_start[], __per_cpu_end[];
 51.1305 - 
 51.1306 --static void *percpu_modalloc(unsigned long size, unsigned long align)
 51.1307 -+static void *percpu_modalloc(unsigned long size, unsigned long align,
 51.1308 -+			     const char *name)
 51.1309 - {
 51.1310 - 	unsigned long extra;
 51.1311 - 	unsigned int i;
 51.1312 - 	void *ptr;
 51.1313 - 
 51.1314 --	BUG_ON(align > SMP_CACHE_BYTES);
 51.1315 -+	if (align > SMP_CACHE_BYTES) {
 51.1316 -+		printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
 51.1317 -+		       name, align, SMP_CACHE_BYTES);
 51.1318 -+		align = SMP_CACHE_BYTES;
 51.1319 -+	}
 51.1320 - 
 51.1321 - 	ptr = __per_cpu_start;
 51.1322 - 	for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
 51.1323 -@@ -347,7 +352,8 @@ static int percpu_modinit(void)
 51.1324 - }	
 51.1325 - __initcall(percpu_modinit);
 51.1326 - #else /* ... !CONFIG_SMP */
 51.1327 --static inline void *percpu_modalloc(unsigned long size, unsigned long align)
 51.1328 -+static inline void *percpu_modalloc(unsigned long size, unsigned long align,
 51.1329 -+				    const char *name)
 51.1330 - {
 51.1331 - 	return NULL;
 51.1332 - }
 51.1333 -@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
 51.1334 - 	if (pcpuindex) {
 51.1335 - 		/* We have a special allocation for this section. */
 51.1336 - 		percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
 51.1337 --					 sechdrs[pcpuindex].sh_addralign);
 51.1338 -+					 sechdrs[pcpuindex].sh_addralign,
 51.1339 -+					 mod->name);
 51.1340 - 		if (!percpu) {
 51.1341 - 			err = -ENOMEM;
 51.1342 - 			goto free_mod;
 51.1343 -diff --git a/lib/inflate.c b/lib/inflate.c
 51.1344 ---- a/lib/inflate.c
 51.1345 -+++ b/lib/inflate.c
 51.1346 -@@ -326,7 +326,7 @@ DEBG("huft1 ");
 51.1347 -   {
 51.1348 -     *t = (struct huft *)NULL;
 51.1349 -     *m = 0;
 51.1350 --    return 0;
 51.1351 -+    return 2;
 51.1352 -   }
 51.1353 - 
 51.1354 - DEBG("huft2 ");
 51.1355 -@@ -374,6 +374,7 @@ DEBG("huft5 ");
 51.1356 -     if ((j = *p++) != 0)
 51.1357 -       v[x[j]++] = i;
 51.1358 -   } while (++i < n);
 51.1359 -+  n = x[g];                   /* set n to length of v */
 51.1360 - 
 51.1361 - DEBG("h6 ");
 51.1362 - 
 51.1363 -@@ -410,12 +411,13 @@ DEBG1("1 ");
 51.1364 - DEBG1("2 ");
 51.1365 -           f -= a + 1;           /* deduct codes from patterns left */
 51.1366 -           xp = c + k;
 51.1367 --          while (++j < z)       /* try smaller tables up to z bits */
 51.1368 --          {
 51.1369 --            if ((f <<= 1) <= *++xp)
 51.1370 --              break;            /* enough codes to use up j bits */
 51.1371 --            f -= *xp;           /* else deduct codes from patterns */
 51.1372 --          }
 51.1373 -+          if (j < z)
 51.1374 -+            while (++j < z)       /* try smaller tables up to z bits */
 51.1375 -+            {
 51.1376 -+              if ((f <<= 1) <= *++xp)
 51.1377 -+                break;            /* enough codes to use up j bits */
 51.1378 -+              f -= *xp;           /* else deduct codes from patterns */
 51.1379 -+            }
 51.1380 -         }
 51.1381 - DEBG1("3 ");
 51.1382 -         z = 1 << j;             /* table entries for j-bit table */
 51.1383 -diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
 51.1384 ---- a/lib/zlib_inflate/inftrees.c
 51.1385 -+++ b/lib/zlib_inflate/inftrees.c
 51.1386 -@@ -141,7 +141,7 @@ static int huft_build(
 51.1387 -   {
 51.1388 -     *t = NULL;
 51.1389 -     *m = 0;
 51.1390 --    return Z_OK;
 51.1391 -+    return Z_DATA_ERROR;
 51.1392 -   }
 51.1393 - 
 51.1394 - 
 51.1395 -diff --git a/mm/memory.c b/mm/memory.c
 51.1396 ---- a/mm/memory.c
 51.1397 -+++ b/mm/memory.c
 51.1398 -@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
 51.1399 - {
 51.1400 - 	pgd_t *pgd;
 51.1401 - 	unsigned long next;
 51.1402 --	unsigned long end = addr + size;
 51.1403 -+	unsigned long end = addr + PAGE_ALIGN(size);
 51.1404 - 	struct mm_struct *mm = vma->vm_mm;
 51.1405 - 	int err;
 51.1406 - 
 51.1407 -diff --git a/mm/mempolicy.c b/mm/mempolicy.c
 51.1408 ---- a/mm/mempolicy.c
 51.1409 -+++ b/mm/mempolicy.c
 51.1410 -@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
 51.1411 - 	struct mempolicy *new;
 51.1412 - 	DECLARE_BITMAP(nodes, MAX_NUMNODES);
 51.1413 - 
 51.1414 --	if (mode > MPOL_MAX)
 51.1415 -+	if (mode < 0 || mode > MPOL_MAX)
 51.1416 - 		return -EINVAL;
 51.1417 - 	err = get_nodes(nodes, nmask, maxnode, mode);
 51.1418 - 	if (err)
 51.1419 -diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
 51.1420 ---- a/net/8021q/vlan.c
 51.1421 -+++ b/net/8021q/vlan.c
 51.1422 -@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
 51.1423 - 			if (!vlandev)
 51.1424 - 				continue;
 51.1425 - 
 51.1426 -+			if (netif_carrier_ok(dev)) {
 51.1427 -+				if (!netif_carrier_ok(vlandev))
 51.1428 -+					netif_carrier_on(vlandev);
 51.1429 -+			} else {
 51.1430 -+				if (netif_carrier_ok(vlandev))
 51.1431 -+					netif_carrier_off(vlandev);
 51.1432 -+			}
 51.1433 -+
 51.1434 - 			if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
 51.1435 - 				vlandev->state = (vlandev->state &~ VLAN_LINK_STATE_MASK) 
 51.1436 - 					| flgs;
 51.1437 -diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
 51.1438 ---- a/net/ipv4/ip_output.c
 51.1439 -+++ b/net/ipv4/ip_output.c
 51.1440 -@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
 51.1441 - #ifdef CONFIG_NETFILTER_DEBUG
 51.1442 - 	nf_debug_ip_loopback_xmit(newskb);
 51.1443 - #endif
 51.1444 --	nf_reset(newskb);
 51.1445 - 	netif_rx(newskb);
 51.1446 - 	return 0;
 51.1447 - }
 51.1448 -@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
 51.1449 - 	nf_debug_ip_finish_output2(skb);
 51.1450 - #endif /*CONFIG_NETFILTER_DEBUG*/
 51.1451 - 
 51.1452 --	nf_reset(skb);
 51.1453 --
 51.1454 - 	if (hh) {
 51.1455 - 		int hh_alen;
 51.1456 - 
 51.1457 -diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
 51.1458 ---- a/net/ipv4/netfilter/ip_conntrack_core.c
 51.1459 -+++ b/net/ipv4/netfilter/ip_conntrack_core.c
 51.1460 -@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
 51.1461 - 		schedule();
 51.1462 - 		goto i_see_dead_people;
 51.1463 - 	}
 51.1464 -+	/* wait until all references to ip_conntrack_untracked are dropped */
 51.1465 -+	while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
 51.1466 -+		schedule();
 51.1467 - 
 51.1468 - 	kmem_cache_destroy(ip_conntrack_cachep);
 51.1469 - 	kmem_cache_destroy(ip_conntrack_expect_cachep);
 51.1470 -diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
 51.1471 ---- a/net/ipv4/netfilter/ip_conntrack_standalone.c
 51.1472 -+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
 51.1473 -@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
 51.1474 - 				        const struct net_device *out,
 51.1475 - 				        int (*okfn)(struct sk_buff *))
 51.1476 - {
 51.1477 -+#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
 51.1478 -+	/* Previously seen (loopback)?  Ignore.  Do this before
 51.1479 -+           fragment check. */
 51.1480 -+	if ((*pskb)->nfct)
 51.1481 -+		return NF_ACCEPT;
 51.1482 -+#endif
 51.1483 -+
 51.1484 - 	/* Gather fragments. */
 51.1485 - 	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
 51.1486 - 		*pskb = ip_ct_gather_frags(*pskb,
 51.1487 -diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
 51.1488 ---- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
 51.1489 -+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
 51.1490 -@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
 51.1491 - 		 enum ip_nat_manip_type maniptype,
 51.1492 - 		 const struct ip_conntrack *conntrack)
 51.1493 - {
 51.1494 --	static u_int16_t port, *portptr;
 51.1495 -+	static u_int16_t port;
 51.1496 -+	u_int16_t *portptr;
 51.1497 - 	unsigned int range_size, min, i;
 51.1498 - 
 51.1499 - 	if (maniptype == IP_NAT_MANIP_SRC)
 51.1500 -diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
 51.1501 ---- a/net/ipv4/netfilter/ip_nat_proto_udp.c
 51.1502 -+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
 51.1503 -@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
 51.1504 - 		 enum ip_nat_manip_type maniptype,
 51.1505 - 		 const struct ip_conntrack *conntrack)
 51.1506 - {
 51.1507 --	static u_int16_t port, *portptr;
 51.1508 -+	static u_int16_t port;
 51.1509 -+	u_int16_t *portptr;
 51.1510 - 	unsigned int range_size, min, i;
 51.1511 - 
 51.1512 - 	if (maniptype == IP_NAT_MANIP_SRC)
 51.1513 -diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
 51.1514 ---- a/net/ipv6/netfilter/ip6_queue.c
 51.1515 -+++ b/net/ipv6/netfilter/ip6_queue.c
 51.1516 -@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
 51.1517 - static void
 51.1518 - ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
 51.1519 - {
 51.1520 -+	local_bh_disable();
 51.1521 - 	nf_reinject(entry->skb, entry->info, verdict);
 51.1522 -+	local_bh_enable();
 51.1523 - 	kfree(entry);
 51.1524 - }
 51.1525 - 
 51.1526 -diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
 51.1527 ---- a/net/netlink/af_netlink.c
 51.1528 -+++ b/net/netlink/af_netlink.c
 51.1529 -@@ -315,8 +315,8 @@ err:
 51.1530 - static void netlink_remove(struct sock *sk)
 51.1531 - {
 51.1532 - 	netlink_table_grab();
 51.1533 --	nl_table[sk->sk_protocol].hash.entries--;
 51.1534 --	sk_del_node_init(sk);
 51.1535 -+	if (sk_del_node_init(sk))
 51.1536 -+		nl_table[sk->sk_protocol].hash.entries--;
 51.1537 - 	if (nlk_sk(sk)->groups)
 51.1538 - 		__sk_del_bind_node(sk);
 51.1539 - 	netlink_table_ungrab();
 51.1540 -@@ -429,7 +429,12 @@ retry:
 51.1541 - 	err = netlink_insert(sk, pid);
 51.1542 - 	if (err == -EADDRINUSE)
 51.1543 - 		goto retry;
 51.1544 --	return 0;
 51.1545 -+
 51.1546 -+	/* If 2 threads race to autobind, that is fine.  */
 51.1547 -+	if (err == -EBUSY)
 51.1548 -+		err = 0;
 51.1549 -+
 51.1550 -+	return err;
 51.1551 - }
 51.1552 - 
 51.1553 - static inline int netlink_capable(struct socket *sock, unsigned int flag) 
 51.1554 -diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
 51.1555 ---- a/net/packet/af_packet.c
 51.1556 -+++ b/net/packet/af_packet.c
 51.1557 -@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
 51.1558 - 	dst_release(skb->dst);
 51.1559 - 	skb->dst = NULL;
 51.1560 - 
 51.1561 -+	/* drop conntrack reference */
 51.1562 -+	nf_reset(skb);
 51.1563 -+
 51.1564 - 	spkt = (struct sockaddr_pkt*)skb->cb;
 51.1565 - 
 51.1566 - 	skb_push(skb, skb->data-skb->mac.raw);
 51.1567 -@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
 51.1568 - 	dst_release(skb->dst);
 51.1569 - 	skb->dst = NULL;
 51.1570 - 
 51.1571 -+	/* drop conntrack reference */
 51.1572 -+	nf_reset(skb);
 51.1573 -+
 51.1574 - 	spin_lock(&sk->sk_receive_queue.lock);
 51.1575 - 	po->stats.tp_packets++;
 51.1576 - 	__skb_queue_tail(&sk->sk_receive_queue, skb);
 51.1577 -diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
 51.1578 ---- a/net/xfrm/xfrm_user.c
 51.1579 -+++ b/net/xfrm/xfrm_user.c
 51.1580 -@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
 51.1581 - 	if (nr > XFRM_MAX_DEPTH)
 51.1582 - 		return NULL;
 51.1583 - 
 51.1584 -+	if (p->dir > XFRM_POLICY_OUT)
 51.1585 -+		return NULL;
 51.1586 -+
 51.1587 - 	xp = xfrm_policy_alloc(GFP_KERNEL);
 51.1588 - 	if (xp == NULL) {
 51.1589 - 		*dir = -ENOBUFS;
 51.1590 -diff --git a/security/keys/keyring.c b/security/keys/keyring.c
 51.1591 ---- a/security/keys/keyring.c
 51.1592 -+++ b/security/keys/keyring.c
 51.1593 -@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
 51.1594 - 
 51.1595 - 	if (keyring->description) {
 51.1596 - 		write_lock(&keyring_name_lock);
 51.1597 --		list_del(&keyring->type_data.link);
 51.1598 -+
 51.1599 -+		if (keyring->type_data.link.next != NULL &&
 51.1600 -+		    !list_empty(&keyring->type_data.link))
 51.1601 -+			list_del(&keyring->type_data.link);
 51.1602 -+
 51.1603 - 		write_unlock(&keyring_name_lock);
 51.1604 - 	}
 51.1605 - 
 51.1606 -diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
 51.1607 ---- a/security/keys/process_keys.c
 51.1608 -+++ b/security/keys/process_keys.c
 51.1609 -@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
 51.1610 - 		keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
 51.1611 - 		if (IS_ERR(keyring)) {
 51.1612 - 			ret = PTR_ERR(keyring);
 51.1613 --			goto error;
 51.1614 -+			goto error2;
 51.1615 - 		}
 51.1616 - 	}
 51.1617 - 	else if (IS_ERR(keyring)) {
    52.1 --- a/tools/blktap/parallax/block-async.c	Fri Oct 21 13:58:39 2005 -0600
    52.2 +++ b/tools/blktap/parallax/block-async.c	Mon Oct 24 09:08:13 2005 -0600
    52.3 @@ -31,11 +31,11 @@
    52.4   */
    52.5  
    52.6  struct read_args {
    52.7 -    u64 addr;
    52.8 +    uint64_t addr;
    52.9  };
   52.10  
   52.11  struct write_args {
   52.12 -    u64   addr;
   52.13 +    uint64_t   addr;
   52.14      char *block;
   52.15  };
   52.16  
   52.17 @@ -94,7 +94,7 @@ static void init_pending_io(void)
   52.18  		
   52.19  } 
   52.20  
   52.21 -void block_read(u64 addr, io_cb_t cb, void *param)
   52.22 +void block_read(uint64_t addr, io_cb_t cb, void *param)
   52.23  {
   52.24      struct pending_io_req *req;
   52.25      
   52.26 @@ -113,7 +113,7 @@ void block_read(u64 addr, io_cb_t cb, vo
   52.27  }
   52.28  
   52.29  
   52.30 -void block_write(u64 addr, char *block, io_cb_t cb, void *param)
   52.31 +void block_write(uint64_t addr, char *block, io_cb_t cb, void *param)
   52.32  {
   52.33      struct pending_io_req *req;
   52.34      
    53.1 --- a/tools/blktap/parallax/block-async.h	Fri Oct 21 13:58:39 2005 -0600
    53.2 +++ b/tools/blktap/parallax/block-async.h	Mon Oct 24 09:08:13 2005 -0600
    53.3 @@ -14,7 +14,7 @@ struct io_ret
    53.4  {
    53.5      enum {IO_ADDR_T, IO_BLOCK_T, IO_INT_T} type;
    53.6      union {
    53.7 -        u64   a;
    53.8 +        uint64_t   a;
    53.9          char *b;
   53.10          int   i;
   53.11      } u;
   53.12 @@ -38,8 +38,8 @@ struct radix_lock {
   53.13  };
   53.14  void radix_lock_init(struct radix_lock *r);
   53.15  
   53.16 -void block_read(u64 addr, io_cb_t cb, void *param);
   53.17 -void block_write(u64 addr, char *block, io_cb_t cb, void *param);
   53.18 +void block_read(uint64_t addr, io_cb_t cb, void *param);
   53.19 +void block_write(uint64_t addr, char *block, io_cb_t cb, void *param);
   53.20  void block_alloc(char *block, io_cb_t cb, void *param);
   53.21  void block_rlock(struct radix_lock *r, int row, io_cb_t cb, void *param);
   53.22  void block_wlock(struct radix_lock *r, int row, io_cb_t cb, void *param);
   53.23 @@ -47,7 +47,7 @@ void block_runlock(struct radix_lock *r,
   53.24  void block_wunlock(struct radix_lock *r, int row, io_cb_t cb, void *param);
   53.25  void init_block_async(void);
   53.26  
   53.27 -static inline u64 IO_ADDR(struct io_ret r)
   53.28 +static inline uint64_t IO_ADDR(struct io_ret r)
   53.29  {
   53.30      assert(r.type == IO_ADDR_T);
   53.31      return r.u.a;
    54.1 --- a/tools/blktap/parallax/blockstore.c	Fri Oct 21 13:58:39 2005 -0600
    54.2 +++ b/tools/blktap/parallax/blockstore.c	Mon Oct 24 09:08:13 2005 -0600
    54.3 @@ -132,9 +132,9 @@ pthread_mutex_t ptmutex_luid;
    54.4  #define ENTER_LUID_CR pthread_mutex_lock(&ptmutex_luid)
    54.5  #define LEAVE_LUID_CR pthread_mutex_unlock(&ptmutex_luid)
    54.6  
    54.7 -static u64 luid_cnt = 0x1000ULL;
    54.8 -u64 new_luid(void) {
    54.9 -    u64 luid;
   54.10 +static uint64_t luid_cnt = 0x1000ULL;
   54.11 +uint64_t new_luid(void) {
   54.12 +    uint64_t luid;
   54.13      ENTER_LUID_CR;
   54.14      luid = luid_cnt++;
   54.15      LEAVE_LUID_CR;
   54.16 @@ -539,7 +539,7 @@ pthread_t pthread_recv;
   54.17   * Reading                                                                   *
   54.18   *****************************************************************************/
   54.19  
   54.20 -void *readblock_indiv(int server, u64 id) {
   54.21 +void *readblock_indiv(int server, uint64_t id) {
   54.22      void *block;
   54.23      bsq_t *qe;
   54.24      int len, rc;
   54.25 @@ -616,9 +616,9 @@ void *readblock_indiv(int server, u64 id
   54.26   *
   54.27   *   @return: pointer to block, NULL on error
   54.28   */
   54.29 -void *readblock(u64 id) {
   54.30 +void *readblock(uint64_t id) {
   54.31      int map = (int)BSID_MAP(id);
   54.32 -    u64 xid;
   54.33 +    uint64_t xid;
   54.34      static int i = CLUSTER_MAX_REPLICAS - 1;
   54.35      void *block = NULL;
   54.36  
   54.37 @@ -670,7 +670,7 @@ void *readblock(u64 id) {
   54.38   * Writing                                                                   *
   54.39   *****************************************************************************/
   54.40  
   54.41 -bsq_t *writeblock_indiv(int server, u64 id, void *block) {
   54.42 +bsq_t *writeblock_indiv(int server, uint64_t id, void *block) {
   54.43  
   54.44      bsq_t *qe;
   54.45      int len;
   54.46 @@ -709,7 +709,7 @@ bsq_t *writeblock_indiv(int server, u64 
   54.47   *
   54.48   *   @return: zero on success, -1 on failure
   54.49   */
   54.50 -int writeblock(u64 id, void *block) {
   54.51 +int writeblock(uint64_t id, void *block) {
   54.52      
   54.53      int map = (int)BSID_MAP(id);
   54.54      int rep0 = bsclusters[map].servers[0];
   54.55 @@ -805,11 +805,11 @@ int writeblock(u64 id, void *block) {
   54.56   *
   54.57   *   @return: new id of block on disk
   54.58   */
   54.59 -u64 allocblock(void *block) {
   54.60 +uint64_t allocblock(void *block) {
   54.61      return allocblock_hint(block, 0);
   54.62  }
   54.63  
   54.64 -bsq_t *allocblock_hint_indiv(int server, void *block, u64 hint) {
   54.65 +bsq_t *allocblock_hint_indiv(int server, void *block, uint64_t hint) {
   54.66      bsq_t *qe;
   54.67      int len;
   54.68  
   54.69 @@ -846,14 +846,14 @@ bsq_t *allocblock_hint_indiv(int server,
   54.70   *
   54.71   *   @return: new id of block on disk
   54.72   */
   54.73 -u64 allocblock_hint(void *block, u64 hint) {
   54.74 +uint64_t allocblock_hint(void *block, uint64_t hint) {
   54.75      int map = (int)hint;
   54.76      int rep0 = bsclusters[map].servers[0];
   54.77      int rep1 = bsclusters[map].servers[1];
   54.78      int rep2 = bsclusters[map].servers[2];
   54.79      bsq_t *reqs[3];
   54.80      int rc;
   54.81 -    u64 id0, id1, id2;
   54.82 +    uint64_t id0, id1, id2;
   54.83  
   54.84      reqs[0] = reqs[1] = reqs[2] = NULL;
   54.85  
   54.86 @@ -938,7 +938,7 @@ u64 allocblock_hint(void *block, u64 hin
   54.87   *   @return: pointer to block, NULL on error
   54.88   */
   54.89  
   54.90 -void *readblock(u64 id) {
   54.91 +void *readblock(uint64_t id) {
   54.92      void *block;
   54.93      int block_fp;
   54.94     
   54.95 @@ -980,7 +980,7 @@ err:
   54.96   *
   54.97   *   @return: zero on success, -1 on failure
   54.98   */
   54.99 -int writeblock(u64 id, void *block) {
  54.100 +int writeblock(uint64_t id, void *block) {
  54.101      
  54.102      int block_fp;
  54.103      
  54.104 @@ -1014,8 +1014,8 @@ err:
  54.105   *   @return: new id of block on disk
  54.106   */
  54.107  
  54.108 -u64 allocblock(void *block) {
  54.109 -    u64 lb;
  54.110 +uint64_t allocblock(void *block) {
  54.111 +    uint64_t lb;
  54.112      off64_t pos;
  54.113      int block_fp;
  54.114      
  54.115 @@ -1057,7 +1057,7 @@ err:
  54.116   *
  54.117   *   @return: new id of block on disk
  54.118   */
  54.119 -u64 allocblock_hint(void *block, u64 hint) {
  54.120 +uint64_t allocblock_hint(void *block, uint64_t hint) {
  54.121      return allocblock(block);
  54.122  }
  54.123  
  54.124 @@ -1109,7 +1109,7 @@ static freeblock_t *new_freeblock(void)
  54.125      return fb;
  54.126  }
  54.127  
  54.128 -void releaseblock(u64 id)
  54.129 +void releaseblock(uint64_t id)
  54.130  {
  54.131      blockstore_super_t *bs_super;
  54.132      freeblock_t *fl_current;
  54.133 @@ -1154,7 +1154,7 @@ void freelist_count(int print_each)
  54.134  {
  54.135      blockstore_super_t *bs_super;
  54.136      freeblock_t *fb;
  54.137 -    u64 total = 0, next;
  54.138 +    uint64_t total = 0, next;
  54.139      
  54.140      bs_super = (blockstore_super_t *) readblock(BLOCKSTORE_SUPER);
  54.141      
  54.142 @@ -1205,7 +1205,7 @@ int __init_blockstore(void)
  54.143  {
  54.144      int i;
  54.145      blockstore_super_t *bs_super;
  54.146 -    u64 ret;
  54.147 +    uint64_t ret;
  54.148      int block_fp;
  54.149      
  54.150  #ifdef BLOCKSTORE_REMOTE
    55.1 --- a/tools/blktap/parallax/blockstore.h	Fri Oct 21 13:58:39 2005 -0600
    55.2 +++ b/tools/blktap/parallax/blockstore.h	Mon Oct 24 09:08:13 2005 -0600
    55.3 @@ -21,33 +21,33 @@
    55.4  #define SECTOR_SHIFT   9 
    55.5  #endif
    55.6  
    55.7 -#define FREEBLOCK_SIZE  (BLOCK_SIZE / sizeof(u64)) - (3 * sizeof(u64))
    55.8 +#define FREEBLOCK_SIZE  (BLOCK_SIZE / sizeof(uint64_t)) - (3 * sizeof(uint64_t))
    55.9  #define FREEBLOCK_MAGIC 0x0fee0fee0fee0feeULL
   55.10  
   55.11  typedef struct {
   55.12 -    u64 magic;
   55.13 -    u64 next;
   55.14 -    u64 count;
   55.15 -    u64 list[FREEBLOCK_SIZE];
   55.16 +    uint64_t magic;
   55.17 +    uint64_t next;
   55.18 +    uint64_t count;
   55.19 +    uint64_t list[FREEBLOCK_SIZE];
   55.20  } freeblock_t; 
   55.21  
   55.22  #define BLOCKSTORE_MAGIC 0xaaaaaaa00aaaaaaaULL
   55.23  #define BLOCKSTORE_SUPER 1ULL
   55.24  
   55.25  typedef struct {
   55.26 -    u64 magic;
   55.27 -    u64 freelist_full;
   55.28 -    u64 freelist_current;
   55.29 +    uint64_t magic;
   55.30 +    uint64_t freelist_full;
   55.31 +    uint64_t freelist_current;
   55.32  } blockstore_super_t;
   55.33  
   55.34  extern void *newblock();
   55.35 -extern void *readblock(u64 id);
   55.36 -extern u64 allocblock(void *block);
   55.37 -extern u64 allocblock_hint(void *block, u64 hint);
   55.38 -extern int writeblock(u64 id, void *block);
   55.39 +extern void *readblock(uint64_t id);
   55.40 +extern uint64_t allocblock(void *block);
   55.41 +extern uint64_t allocblock_hint(void *block, uint64_t hint);
   55.42 +extern int writeblock(uint64_t id, void *block);
   55.43  
   55.44  /* Add this blockid to a freelist, to be recycled by the allocator. */
   55.45 -extern void releaseblock(u64 id);
   55.46 +extern void releaseblock(uint64_t id);
   55.47  
   55.48  /* this is a memory free() operation for block-sized allocations */
   55.49  extern void freeblock(void *block);
   55.50 @@ -55,17 +55,17 @@ extern int __init_blockstore(void);
   55.51  
   55.52  /* debug for freelist. */
   55.53  void freelist_count(int print_each);
   55.54 -#define ALLOCFAIL (((u64)(-1)))
   55.55 +#define ALLOCFAIL (((uint64_t)(-1)))
   55.56  
   55.57  /* Distribution
   55.58   */
   55.59  #define BLOCKSTORED_PORT 9346
   55.60  
   55.61  struct bshdr_t_struct {
   55.62 -    u32            operation;
   55.63 -    u32            flags;
   55.64 -    u64            id;
   55.65 -    u64            luid;
   55.66 +    uint32_t            operation;
   55.67 +    uint32_t            flags;
   55.68 +    uint64_t            id;
   55.69 +    uint64_t            luid;
   55.70  } __attribute__ ((packed));
   55.71  typedef struct bshdr_t_struct bshdr_t;
   55.72  
   55.73 @@ -76,9 +76,9 @@ struct bsmsg_t_struct {
   55.74  
   55.75  typedef struct bsmsg_t_struct bsmsg_t;
   55.76  
   55.77 -#define MSGBUFSIZE_OP    sizeof(u32)
   55.78 -#define MSGBUFSIZE_FLAGS (sizeof(u32) + sizeof(u32))
   55.79 -#define MSGBUFSIZE_ID    (sizeof(u32) + sizeof(u32) + sizeof(u64) + sizeof(u64))
   55.80 +#define MSGBUFSIZE_OP    sizeof(uint32_t)
   55.81 +#define MSGBUFSIZE_FLAGS (sizeof(uint32_t) + sizeof(uint32_t))
   55.82 +#define MSGBUFSIZE_ID    (sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint64_t) + sizeof(uint64_t))
   55.83  #define MSGBUFSIZE_BLOCK sizeof(bsmsg_t)
   55.84  
   55.85  #define BSOP_READBLOCK  0x01
   55.86 @@ -113,9 +113,9 @@ typedef struct bsmsg_t_struct bsmsg_t;
   55.87  #define BSID_REPLICA2(_id) (((_id)>>40)&0xfffffULL)
   55.88  #define BSID_MAP(_id)      (((_id)>>60)&0xfULL)
   55.89  
   55.90 -#define BSID(_map, _rep0, _rep1, _rep2) ((((u64)(_map))<<60) | \
   55.91 -                                         (((u64)(_rep2))<<40) | \
   55.92 -                                         (((u64)(_rep1))<<20) | ((u64)(_rep0)))
   55.93 +#define BSID(_map, _rep0, _rep1, _rep2) ((((uint64_t)(_map))<<60) | \
   55.94 +                                         (((uint64_t)(_rep2))<<40) | \
   55.95 +                                         (((uint64_t)(_rep1))<<20) | ((uint64_t)(_rep0)))
   55.96  
   55.97  typedef struct bsserver_t_struct {
   55.98      char              *hostname;
    56.1 --- a/tools/blktap/parallax/blockstored.c	Fri Oct 21 13:58:39 2005 -0600
    56.2 +++ b/tools/blktap/parallax/blockstored.c	Mon Oct 24 09:08:13 2005 -0600
    56.3 @@ -21,9 +21,9 @@
    56.4  
    56.5  //#define BSDEBUG
    56.6  
    56.7 -int readblock_into(u64 id, void *block);
    56.8 +int readblock_into(uint64_t id, void *block);
    56.9  
   56.10 -int open_socket(u16 port) {
   56.11 +int open_socket(uint16_t port) {
   56.12      
   56.13      struct sockaddr_in sn;
   56.14      int sock;
   56.15 @@ -75,7 +75,7 @@ void service_loop(void) {
   56.16          int rc, len;
   56.17          struct sockaddr_in from;
   56.18          size_t slen = sizeof(from);
   56.19 -        u64 bid;
   56.20 +        uint64_t bid;
   56.21  
   56.22          len = recvfrom(bssock, (void *)&msgbuf, sizeof(msgbuf), 0,
   56.23                         (struct sockaddr *)&from, &slen);
   56.24 @@ -155,7 +155,7 @@ void service_loop(void) {
   56.25   *   @return: 0 if OK, other on error
   56.26   */
   56.27  
   56.28 -int readblock_into(u64 id, void *block) {
   56.29 +int readblock_into(uint64_t id, void *block) {
   56.30      if (lseek64(block_fp, ((off64_t) id - 1LL) * BLOCK_SIZE, SEEK_SET) < 0) {
   56.31          printf ("%Ld\n", (id - 1) * BLOCK_SIZE);
   56.32          perror("readblock lseek");
   56.33 @@ -175,7 +175,7 @@ int readblock_into(u64 id, void *block) 
   56.34   *
   56.35   *   @return: zero on success, -1 on failure
   56.36   */
   56.37 -int writeblock(u64 id, void *block) {
   56.38 +int writeblock(uint64_t id, void *block) {
   56.39      if (lseek64(block_fp, ((off64_t) id - 1LL) * BLOCK_SIZE, SEEK_SET) < 0) {
   56.40          perror("writeblock lseek");
   56.41          return -1;
   56.42 @@ -193,10 +193,10 @@ int writeblock(u64 id, void *block) {
   56.43   *
   56.44   *   @return: new id of block on disk
   56.45   */
   56.46 -static u64 lastblock = 0;
   56.47 +static uint64_t lastblock = 0;
   56.48  
   56.49 -u64 allocblock(void *block) {
   56.50 -    u64 lb;
   56.51 +uint64_t allocblock(void *block) {
   56.52 +    uint64_t lb;
   56.53      off64_t pos;
   56.54  
   56.55      retry:
    57.1 --- a/tools/blktap/parallax/bstest.c	Fri Oct 21 13:58:39 2005 -0600
    57.2 +++ b/tools/blktap/parallax/bstest.c	Mon Oct 24 09:08:13 2005 -0600
    57.3 @@ -22,7 +22,7 @@
    57.4  #include <errno.h>
    57.5  #include "blockstore.h"
    57.6  
    57.7 -int direct(char *host, u32 op, u64 id, int len) {
    57.8 +int direct(char *host, uint32_t op, uint64_t id, int len) {
    57.9      struct sockaddr_in sn, peer;
   57.10      int sock;
   57.11      bsmsg_t msgbuf;
   57.12 @@ -99,8 +99,8 @@ int direct(char *host, u32 op, u64 id, i
   57.13  
   57.14  int main (int argc, char **argv) {
   57.15  
   57.16 -    u32 op = 0;
   57.17 -    u64 id = 0;
   57.18 +    uint32_t op = 0;
   57.19 +    uint64_t id = 0;
   57.20      int len = 0, rc;
   57.21      void *block;
   57.22  
    58.1 --- a/tools/blktap/parallax/parallax.c	Fri Oct 21 13:58:39 2005 -0600
    58.2 +++ b/tools/blktap/parallax/parallax.c	Mon Oct 24 09:08:13 2005 -0600
    58.3 @@ -330,8 +330,8 @@ pending_t pending_list[MAX_REQUESTS];
    58.4  struct cb_param {
    58.5      pending_t *pent;
    58.6      int       segment;
    58.7 -    u64       sector; 
    58.8 -    u64       vblock; /* for debug printing -- can be removed. */
    58.9 +    uint64_t       sector; 
   58.10 +    uint64_t       vblock; /* for debug printing -- can be removed. */
   58.11  };
   58.12  
   58.13  static void read_cb(struct io_ret r, void *in_param)
   58.14 @@ -393,9 +393,9 @@ static void read_cb(struct io_ret r, voi
   58.15  int parallax_read(blkif_request_t *req, blkif_t *blkif)
   58.16  {
   58.17      blkif_response_t *rsp;
   58.18 -    u64 vblock, gblock;
   58.19 +    uint64_t vblock, gblock;
   58.20      vdi_t *vdi;
   58.21 -    u64 sector;
   58.22 +    uint64_t sector;
   58.23      int i;
   58.24      char *dpage, *spage;
   58.25      pending_t *pent;
   58.26 @@ -475,9 +475,9 @@ static void write_cb(struct io_ret r, vo
   58.27  int parallax_write(blkif_request_t *req, blkif_t *blkif)
   58.28  {
   58.29      blkif_response_t *rsp;
   58.30 -    u64 sector;
   58.31 +    uint64_t sector;
   58.32      int i, writable = 0;
   58.33 -    u64 vblock, gblock;
   58.34 +    uint64_t vblock, gblock;
   58.35      char *spage;
   58.36      unsigned long size, offset, start;
   58.37      vdi_t *vdi;
    59.1 --- a/tools/blktap/parallax/radix.c	Fri Oct 21 13:58:39 2005 -0600
    59.2 +++ b/tools/blktap/parallax/radix.c	Mon Oct 24 09:08:13 2005 -0600
    59.3 @@ -33,7 +33,7 @@ static  int rcache_count = 0;
    59.4  
    59.5  typedef struct rcache_st {
    59.6      radix_tree_node  *node;
    59.7 -    u64               id;
    59.8 +    uint64_t               id;
    59.9      struct rcache_st *hash_next;
   59.10      struct rcache_st *cache_next;
   59.11      struct rcache_st *cache_prev;
   59.12 @@ -55,7 +55,7 @@ void __rcache_init(void)
   59.13  }
   59.14      
   59.15  
   59.16 -void rcache_write(u64 id, radix_tree_node *node)
   59.17 +void rcache_write(uint64_t id, radix_tree_node *node)
   59.18  {
   59.19      rcache_t *r, *tmp, **curs;
   59.20      
   59.21 @@ -135,7 +135,7 @@ done:
   59.22      pthread_mutex_unlock(&rcache_mutex);
   59.23  }
   59.24  
   59.25 -radix_tree_node *rcache_read(u64 id)
   59.26 +radix_tree_node *rcache_read(uint64_t id)
   59.27  {
   59.28      rcache_t *r, *tmp;
   59.29      radix_tree_node *node = NULL;
   59.30 @@ -181,7 +181,7 @@ done:
   59.31  }
   59.32  
   59.33  
   59.34 -void *rc_readblock(u64 id)
   59.35 +void *rc_readblock(uint64_t id)
   59.36  {
   59.37      void *ret;
   59.38      
   59.39 @@ -197,9 +197,9 @@ void *rc_readblock(u64 id)
   59.40      return(ret);
   59.41  }
   59.42  
   59.43 -u64 rc_allocblock(void *block)
   59.44 +uint64_t rc_allocblock(void *block)
   59.45  {
   59.46 -    u64 ret;
   59.47 +    uint64_t ret;
   59.48      
   59.49      ret = allocblock(block);
   59.50      
   59.51 @@ -209,7 +209,7 @@ u64 rc_allocblock(void *block)
   59.52      return(ret);
   59.53  }
   59.54  
   59.55 -int rc_writeblock(u64 id, void *block)
   59.56 +int rc_writeblock(uint64_t id, void *block)
   59.57  {
   59.58      int ret;
   59.59      
   59.60 @@ -233,9 +233,9 @@ radix_tree_node cloneblock(radix_tree_no
   59.61   * whether or not the block is writable, including the return
   59.62   * values of update and snapshot
   59.63   */
   59.64 -u64 lookup(int height, u64 root, u64 key);
   59.65 -u64 update(int height, u64 root, u64 key, u64 val);
   59.66 -u64 snapshot(u64 root);
   59.67 +uint64_t lookup(int height, uint64_t root, uint64_t key);
   59.68 +uint64_t update(int height, uint64_t root, uint64_t key, uint64_t val);
   59.69 +uint64_t snapshot(uint64_t root);
   59.70  
   59.71  /**
   59.72   * cloneblock: clone an existing block in memory
   59.73 @@ -264,9 +264,9 @@ radix_tree_node cloneblock(radix_tree_no
   59.74   *   @return: value on success, zero on error
   59.75   */
   59.76  
   59.77 -u64 lookup(int height, u64 root, u64 key) {
   59.78 +uint64_t lookup(int height, uint64_t root, uint64_t key) {
   59.79      radix_tree_node node;
   59.80 -    u64 mask = ONE;
   59.81 +    uint64_t mask = ONE;
   59.82      
   59.83      assert(key >> height == 0);
   59.84  
   59.85 @@ -275,7 +275,7 @@ u64 lookup(int height, u64 root, u64 key
   59.86  
   59.87      /* now carve off equal sized chunks at each step */
   59.88      for (;;) {
   59.89 -        u64 oldroot;
   59.90 +        uint64_t oldroot;
   59.91  
   59.92  #ifdef DEBUG
   59.93          printf("lookup: height=%3d root=%3Ld offset=%3d%s\n", height, root,
   59.94 @@ -314,9 +314,9 @@ u64 lookup(int height, u64 root, u64 key
   59.95   *   @returns: (possibly new) root id on success (with LSB=1), 0 on failure
   59.96   */
   59.97  
   59.98 -u64 update(int height, u64 root, u64 key, u64 val) {
   59.99 +uint64_t update(int height, uint64_t root, uint64_t key, uint64_t val) {
  59.100      int offset;
  59.101 -    u64 child;
  59.102 +    uint64_t child;
  59.103      radix_tree_node node;
  59.104      
  59.105      /* base case--return val */
  59.106 @@ -390,7 +390,7 @@ u64 update(int height, u64 root, u64 key
  59.107   *
  59.108   *   @return: new root node, 0 on error
  59.109   */
  59.110 -u64 snapshot(u64 root) {
  59.111 +uint64_t snapshot(uint64_t root) {
  59.112      radix_tree_node node, newnode;
  59.113  
  59.114      if ((node = rc_readblock(getid(root))) == NULL)
  59.115 @@ -418,7 +418,7 @@ u64 snapshot(u64 root) {
  59.116   * child are okay...)
  59.117   */
  59.118  
  59.119 -int collapse(int height, u64 proot, u64 croot)
  59.120 +int collapse(int height, uint64_t proot, uint64_t croot)
  59.121  {
  59.122      int i, numlinks, ret, total = 0;
  59.123      radix_tree_node pnode, cnode;
  59.124 @@ -480,7 +480,7 @@ int collapse(int height, u64 proot, u64 
  59.125  }
  59.126  
  59.127  
  59.128 -void print_root(u64 root, int height, FILE *dot_f)
  59.129 +void print_root(uint64_t root, int height, FILE *dot_f)
  59.130  {
  59.131      FILE *f;
  59.132      int i;
  59.133 @@ -558,9 +558,9 @@ void print_root(u64 root, int height, FI
  59.134  #ifdef RADIX_STANDALONE
  59.135  
  59.136  int main(int argc, char **argv) {
  59.137 -    u64 key = ZERO, val = ZERO;
  59.138 -    u64 root = writable(2ULL);
  59.139 -    u64 p = ZERO, c = ZERO;
  59.140 +    uint64_t key = ZERO, val = ZERO;
  59.141 +    uint64_t root = writable(2ULL);
  59.142 +    uint64_t p = ZERO, c = ZERO;
  59.143      int v;
  59.144      char buff[4096];
  59.145  
    60.1 --- a/tools/blktap/parallax/radix.h	Fri Oct 21 13:58:39 2005 -0600
    60.2 +++ b/tools/blktap/parallax/radix.h	Mon Oct 24 09:08:13 2005 -0600
    60.3 @@ -24,7 +24,7 @@
    60.4  #define RADIX_TREE_MAP_MASK 0x1ff
    60.5  #define RADIX_TREE_MAP_ENTRIES 512
    60.6  
    60.7 -typedef u64 *radix_tree_node;
    60.8 +typedef uint64_t *radix_tree_node;
    60.9  
   60.10  
   60.11  /*
   60.12 @@ -33,11 +33,11 @@ typedef u64 *radix_tree_node;
   60.13   * whether or not the block is writable, including the return
   60.14   * values of update and snapshot
   60.15   */
   60.16 -u64 lookup(int height, u64 root, u64 key);
   60.17 -u64 update(int height, u64 root, u64 key, u64 val);
   60.18 -u64 snapshot(u64 root);
   60.19 -int collapse(int height, u64 proot, u64 croot);
   60.20 -int isprivate(int height, u64 root, u64 key);
   60.21 +uint64_t lookup(int height, uint64_t root, uint64_t key);
   60.22 +uint64_t update(int height, uint64_t root, uint64_t key, uint64_t val);
   60.23 +uint64_t snapshot(uint64_t root);
   60.24 +int collapse(int height, uint64_t proot, uint64_t croot);
   60.25 +int isprivate(int height, uint64_t root, uint64_t key);
   60.26  
   60.27  
   60.28  void __rcache_init(void);
    61.1 --- a/tools/blktap/parallax/requests-async.c	Fri Oct 21 13:58:39 2005 -0600
    61.2 +++ b/tools/blktap/parallax/requests-async.c	Mon Oct 24 09:08:13 2005 -0600
    61.3 @@ -27,14 +27,14 @@
    61.4  #endif
    61.5  
    61.6  struct block_info {
    61.7 -    u32        crc;
    61.8 -    u32        unused;
    61.9 +    uint32_t        crc;
   61.10 +    uint32_t        unused;
   61.11  };
   61.12  
   61.13  struct io_req {
   61.14      enum { IO_OP_READ, IO_OP_WRITE } op;
   61.15 -    u64        root;
   61.16 -    u64        vaddr;
   61.17 +    uint64_t        root;
   61.18 +    uint64_t        vaddr;
   61.19      int        state;
   61.20      io_cb_t    cb;
   61.21      void      *param;
   61.22 @@ -44,7 +44,7 @@ struct io_req {
   61.23      struct io_ret     retval;/* holds the return while we unlock. */
   61.24      char             *block; /* the block to write */
   61.25      radix_tree_node   radix[3];
   61.26 -    u64               radix_addr[3];
   61.27 +    uint64_t               radix_addr[3];
   61.28      struct block_info bi;
   61.29  };
   61.30  
   61.31 @@ -129,7 +129,7 @@ enum radix_offsets {
   61.32  static void read_cb(struct io_ret ret, void *param);
   61.33  static void write_cb(struct io_ret ret, void *param);
   61.34  
   61.35 -int vdi_read(vdi_t *vdi, u64 vaddr, io_cb_t cb, void *param)
   61.36 +int vdi_read(vdi_t *vdi, uint64_t vaddr, io_cb_t cb, void *param)
   61.37  {
   61.38      struct io_req *req;
   61.39  
   61.40 @@ -156,7 +156,7 @@ int vdi_read(vdi_t *vdi, u64 vaddr, io_c
   61.41  }
   61.42  
   61.43  
   61.44 -int   vdi_write(vdi_t *vdi, u64 vaddr, char *block, 
   61.45 +int   vdi_write(vdi_t *vdi, uint64_t vaddr, char *block, 
   61.46                  io_cb_t cb, void *param)
   61.47  {
   61.48      struct io_req *req;
   61.49 @@ -177,8 +177,8 @@ int   vdi_write(vdi_t *vdi, u64 vaddr, c
   61.50      req->block  = block;
   61.51      /* Todo: add a pseodoheader to the block to include some location   */
   61.52      /* information in the CRC as well.                                  */
   61.53 -    req->bi.crc = (u32) crc32(0L, Z_NULL, 0); 
   61.54 -    req->bi.crc = (u32) crc32(req->bi.crc, block, BLOCK_SIZE); 
   61.55 +    req->bi.crc = (uint32_t) crc32(0L, Z_NULL, 0); 
   61.56 +    req->bi.crc = (uint32_t) crc32(req->bi.crc, block, BLOCK_SIZE); 
   61.57      req->bi.unused = 0xdeadbeef;
   61.58  
   61.59      req->cb     = cb;
   61.60 @@ -196,7 +196,7 @@ static void read_cb(struct io_ret ret, v
   61.61  {
   61.62      struct io_req *req = (struct io_req *)param;
   61.63      radix_tree_node node;
   61.64 -    u64 idx;
   61.65 +    uint64_t idx;
   61.66      char *block;
   61.67      void *req_param;
   61.68  
   61.69 @@ -268,15 +268,15 @@ static void read_cb(struct io_ret ret, v
   61.70      }
   61.71      case READ_DATA:
   61.72      {
   61.73 -        u32 crc;
   61.74 +        uint32_t crc;
   61.75  
   61.76          DPRINTF("READ_DATA\n");
   61.77          block = IO_BLOCK(ret);
   61.78          if (block == NULL) goto fail;
   61.79  
   61.80          /* crc check */
   61.81 -        crc = (u32) crc32(0L, Z_NULL, 0); 
   61.82 -        crc = (u32) crc32(crc, block, BLOCK_SIZE); 
   61.83 +        crc = (uint32_t) crc32(0L, Z_NULL, 0); 
   61.84 +        crc = (uint32_t) crc32(crc, block, BLOCK_SIZE); 
   61.85          if (crc != req->bi.crc) {
   61.86              /* TODO: add a retry loop here.                          */
   61.87              /* Do this after the cache is added -- make sure to      */
   61.88 @@ -359,7 +359,7 @@ static void write_cb(struct io_ret r, vo
   61.89  {
   61.90      struct io_req *req = (struct io_req *)param;
   61.91      radix_tree_node node;
   61.92 -    u64 a, addr;
   61.93 +    uint64_t a, addr;
   61.94      void *req_param;
   61.95      struct block_info *bi;
   61.96  
   61.97 @@ -721,7 +721,7 @@ static void write_cb(struct io_ret r, vo
   61.98      }
   61.99  }
  61.100  
  61.101 -char *vdi_read_s(vdi_t *vdi, u64 vaddr)
  61.102 +char *vdi_read_s(vdi_t *vdi, uint64_t vaddr)
  61.103  {
  61.104      pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
  61.105      char *block = NULL;
  61.106 @@ -742,7 +742,7 @@ char *vdi_read_s(vdi_t *vdi, u64 vaddr)
  61.107  }
  61.108  
  61.109  
  61.110 -int vdi_write_s(vdi_t *vdi, u64 vaddr, char *block)
  61.111 +int vdi_write_s(vdi_t *vdi, uint64_t vaddr, char *block)
  61.112  {
  61.113      pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
  61.114      int ret, result;
    62.1 --- a/tools/blktap/parallax/requests-async.h	Fri Oct 21 13:58:39 2005 -0600
    62.2 +++ b/tools/blktap/parallax/requests-async.h	Mon Oct 24 09:08:13 2005 -0600
    62.3 @@ -10,18 +10,18 @@
    62.4  #define getid(x) (((x)>>1)&0x7fffffffffffffffLLU)
    62.5  #define iswritable(x) (((x) & 1LLU) != 0)
    62.6  #define writable(x) (((x) << 1) | 1LLU)
    62.7 -#define readonly(x) ((u64)((x) << 1))
    62.8 +#define readonly(x) ((uint64_t)((x) << 1))
    62.9  */
   62.10  
   62.11  #define VADDR_MASK 0x0000000003ffffffLLU /* 26-bits = 256Gig */
   62.12  #define VALID_VADDR(x) (((x) & VADDR_MASK) == (x))
   62.13  
   62.14 -int vdi_read (vdi_t *vdi, u64 vaddr, io_cb_t cb, void *param);
   62.15 -int vdi_write(vdi_t *vdi, u64 vaddr, char *block, io_cb_t cb, void *param);
   62.16 +int vdi_read (vdi_t *vdi, uint64_t vaddr, io_cb_t cb, void *param);
   62.17 +int vdi_write(vdi_t *vdi, uint64_t vaddr, char *block, io_cb_t cb, void *param);
   62.18               
   62.19  /* synchronous versions: */
   62.20 -char *vdi_read_s (vdi_t *vdi, u64 vaddr);
   62.21 -int   vdi_write_s(vdi_t *vdi, u64 vaddr, char *block);
   62.22 +char *vdi_read_s (vdi_t *vdi, uint64_t vaddr);
   62.23 +int   vdi_write_s(vdi_t *vdi, uint64_t vaddr, char *block);
   62.24  
   62.25  #define ERR_BAD_VADDR  -1
   62.26  #define ERR_NOMEM      -2
    63.1 --- a/tools/blktap/parallax/snaplog.c	Fri Oct 21 13:58:39 2005 -0600
    63.2 +++ b/tools/blktap/parallax/snaplog.c	Mon Oct 24 09:08:13 2005 -0600
    63.3 @@ -24,7 +24,7 @@
    63.4  
    63.5  
    63.6  
    63.7 -snap_block_t *snap_get_block(u64 block)
    63.8 +snap_block_t *snap_get_block(uint64_t block)
    63.9  {
   63.10      snap_block_t *blk = (snap_block_t *)readblock(block);
   63.11      
    64.1 --- a/tools/blktap/parallax/snaplog.h	Fri Oct 21 13:58:39 2005 -0600
    64.2 +++ b/tools/blktap/parallax/snaplog.h	Mon Oct 24 09:08:13 2005 -0600
    64.3 @@ -13,12 +13,12 @@
    64.4  #define __SNAPLOG_H__
    64.5  
    64.6  typedef struct snap_id {
    64.7 -    u64            block;
    64.8 +    uint64_t            block;
    64.9      unsigned int   index;
   64.10  } snap_id_t;
   64.11  
   64.12  typedef struct snap_rec {
   64.13 -    u64            radix_root;
   64.14 +    uint64_t            radix_root;
   64.15      struct timeval timestamp;
   64.16      /* flags: */
   64.17      unsigned       deleted:1;
   64.18 @@ -38,7 +38,7 @@ int  snap_get_id(snap_id_t *id, snap_rec
   64.19  static const snap_id_t null_snap_id = { 0, 0 }; 
   64.20  
   64.21  typedef struct snap_block_hdr {
   64.22 -    u64            magic;
   64.23 +    uint64_t            magic;
   64.24      snap_id_t      parent_block; /* parent block within this chain */
   64.25      snap_id_t      fork_block;   /* where this log was forked */
   64.26      unsigned       log_entries;  /* total entries since forking */
   64.27 @@ -56,6 +56,6 @@ typedef struct snap_block {
   64.28  } snap_block_t;
   64.29      
   64.30  
   64.31 -snap_block_t *snap_get_block(u64 block);
   64.32 +snap_block_t *snap_get_block(uint64_t block);
   64.33  
   64.34  #endif /* __SNAPLOG_H__ */
    65.1 --- a/tools/blktap/parallax/vdi.c	Fri Oct 21 13:58:39 2005 -0600
    65.2 +++ b/tools/blktap/parallax/vdi.c	Mon Oct 24 09:08:13 2005 -0600
    65.3 @@ -132,9 +132,9 @@ vdi_t *vdi_create(snap_id_t *parent_snap
    65.4  /* vdi_get and vdi_put currently act more like alloc/free -- they don't 
    65.5   * do refcount-based allocation.  
    65.6   */
    65.7 -vdi_t *vdi_get(u64 vdi_id)
    65.8 +vdi_t *vdi_get(uint64_t vdi_id)
    65.9  {
   65.10 -    u64 vdi_blk;
   65.11 +    uint64_t vdi_blk;
   65.12      vdi_t *vdi;
   65.13      
   65.14      vdi_blk = lookup(VDI_REG_HEIGHT, VDI_RADIX_ROOT, vdi_id);
    66.1 --- a/tools/blktap/parallax/vdi.h	Fri Oct 21 13:58:39 2005 -0600
    66.2 +++ b/tools/blktap/parallax/vdi.h	Mon Oct 24 09:08:13 2005 -0600
    66.3 @@ -21,9 +21,9 @@
    66.4  
    66.5  
    66.6  typedef struct vdi {
    66.7 -    u64         id;               /* unique vdi id -- used by the registry   */
    66.8 -    u64         block;            /* block where this vdi lives (also unique)*/
    66.9 -    u64         radix_root;       /* radix root node for block mappings      */
   66.10 +    uint64_t         id;               /* unique vdi id -- used by the registry   */
   66.11 +    uint64_t         block;            /* block where this vdi lives (also unique)*/
   66.12 +    uint64_t         radix_root;       /* radix root node for block mappings      */
   66.13      snap_id_t   snap;             /* next snapshot slot for this VDI         */
   66.14      struct vdi *next;             /* used to hash-chain in blkif.            */
   66.15      blkif_vdev_t vdevice;         /* currently mounted as...                 */
   66.16 @@ -34,19 +34,19 @@ typedef struct vdi {
   66.17  #define VDI_REG_MAGIC   0xff00ff0bb0ff00ffLL
   66.18  
   66.19  typedef struct vdi_registry {
   66.20 -    u64     magic;
   66.21 -    u64     nr_vdis;
   66.22 +    uint64_t     magic;
   66.23 +    uint64_t     nr_vdis;
   66.24  } vdi_registry_t;
   66.25  
   66.26  
   66.27  int __init_vdi(void);
   66.28  
   66.29 -vdi_t *vdi_get(u64 vdi_id);
   66.30 +vdi_t *vdi_get(uint64_t vdi_id);
   66.31  void vdi_put(vdi_t *vdi);
   66.32  vdi_registry_t *get_vdi_registry(void);
   66.33  vdi_t *vdi_create(snap_id_t *parent_snap, char *name);
   66.34 -u64 vdi_lookup_block(vdi_t *vdi, u64 vdi_block, int *writable);
   66.35 -void vdi_update_block(vdi_t *vdi, u64 vdi_block, u64 g_block);
   66.36 +uint64_t vdi_lookup_block(vdi_t *vdi, uint64_t vdi_block, int *writable);
   66.37 +void vdi_update_block(vdi_t *vdi, uint64_t vdi_block, uint64_t g_block);
   66.38  void vdi_snapshot(vdi_t *vdi);
   66.39  
   66.40  
    67.1 --- a/tools/blktap/parallax/vdi_create.c	Fri Oct 21 13:58:39 2005 -0600
    67.2 +++ b/tools/blktap/parallax/vdi_create.c	Mon Oct 24 09:08:13 2005 -0600
    67.3 @@ -33,7 +33,7 @@ int main(int argc, char *argv[])
    67.4      name[VDI_NAME_SZ] = '\0';    
    67.5      
    67.6      if ( argc > 3 ) {
    67.7 -        id.block   = (u64)          atoll(argv[2]);
    67.8 +        id.block   = (uint64_t)          atoll(argv[2]);
    67.9          id.index   = (unsigned int) atol (argv[3]);
   67.10          from_snap  = 1;
   67.11      }
    68.1 --- a/tools/blktap/parallax/vdi_fill.c	Fri Oct 21 13:58:39 2005 -0600
    68.2 +++ b/tools/blktap/parallax/vdi_fill.c	Mon Oct 24 09:08:13 2005 -0600
    68.3 @@ -22,13 +22,13 @@
    68.4  int main(int argc, char *argv[])
    68.5  {
    68.6      vdi_t       *vdi;
    68.7 -    u64          id;
    68.8 +    uint64_t          id;
    68.9      int          fd;
   68.10      struct stat  st;
   68.11 -    u64          tot_size;
   68.12 +    uint64_t          tot_size;
   68.13      char         spage[BLOCK_SIZE];
   68.14      char        *dpage;
   68.15 -    u64          vblock = 0, count=0;
   68.16 +    uint64_t          vblock = 0, count=0;
   68.17      
   68.18      __init_blockstore();
   68.19      init_block_async();
   68.20 @@ -39,7 +39,7 @@ int main(int argc, char *argv[])
   68.21          exit(-1);
   68.22      }
   68.23          
   68.24 -    id = (u64) atoll(argv[1]);
   68.25 +    id = (uint64_t) atoll(argv[1]);
   68.26      
   68.27      vdi = vdi_get( id );
   68.28      
   68.29 @@ -60,7 +60,7 @@ int main(int argc, char *argv[])
   68.30          exit(-1);
   68.31      }
   68.32      
   68.33 -    tot_size = (u64) st.st_size;
   68.34 +    tot_size = (uint64_t) st.st_size;
   68.35      printf("Filling VDI %Ld with %Ld bytes.\n", id, tot_size);
   68.36      
   68.37      printf("%011Ld blocks total\n", tot_size / BLOCK_SIZE);    
    69.1 --- a/tools/blktap/parallax/vdi_snap.c	Fri Oct 21 13:58:39 2005 -0600
    69.2 +++ b/tools/blktap/parallax/vdi_snap.c	Mon Oct 24 09:08:13 2005 -0600
    69.3 @@ -17,7 +17,7 @@
    69.4  int main(int argc, char *argv[])
    69.5  {
    69.6      vdi_t  *vdi;
    69.7 -    u64     id;
    69.8 +    uint64_t     id;
    69.9      
   69.10      __init_blockstore();
   69.11      __init_vdi();
   69.12 @@ -27,7 +27,7 @@ int main(int argc, char *argv[])
   69.13          exit(-1);
   69.14      }
   69.15      
   69.16 -    id = (u64) atoll(argv[1]);
   69.17 +    id = (uint64_t) atoll(argv[1]);
   69.18      
   69.19      vdi = vdi_get(id);
   69.20      
    70.1 --- a/tools/blktap/parallax/vdi_snap_delete.c	Fri Oct 21 13:58:39 2005 -0600
    70.2 +++ b/tools/blktap/parallax/vdi_snap_delete.c	Mon Oct 24 09:08:13 2005 -0600
    70.3 @@ -34,7 +34,7 @@ int main(int argc, char *argv[])
    70.4          exit(-1);
    70.5      }
    70.6      
    70.7 -    id.block   = (u64)          atoll(argv[1]);
    70.8 +    id.block   = (uint64_t)          atoll(argv[1]);
    70.9      id.index   = (unsigned int) atol (argv[2]);
   70.10      
   70.11      c_id = id;
    71.1 --- a/tools/blktap/parallax/vdi_snap_list.c	Fri Oct 21 13:58:39 2005 -0600
    71.2 +++ b/tools/blktap/parallax/vdi_snap_list.c	Mon Oct 24 09:08:13 2005 -0600
    71.3 @@ -18,7 +18,7 @@
    71.4  int main(int argc, char *argv[])
    71.5  {
    71.6      vdi_t        *vdi;
    71.7 -    u64           id;
    71.8 +    uint64_t           id;
    71.9      int           i, max_snaps = -1;
   71.10      snap_block_t *blk;
   71.11      snap_id_t     sid;
   71.12 @@ -32,7 +32,7 @@ int main(int argc, char *argv[])
   71.13          exit(-1);
   71.14      }
   71.15      
   71.16 -    id = (u64) atoll(argv[1]);
   71.17 +    id = (uint64_t) atoll(argv[1]);
   71.18      
   71.19      if ( argc > 2 ) {
   71.20          max_snaps = atoi(argv[2]);
    72.1 --- a/tools/blktap/parallax/vdi_unittest.c	Fri Oct 21 13:58:39 2005 -0600
    72.2 +++ b/tools/blktap/parallax/vdi_unittest.c	Mon Oct 24 09:08:13 2005 -0600
    72.3 @@ -39,9 +39,9 @@ void fill_test_pages(void)
    72.4      zero_page = newblock();
    72.5  }
    72.6  
    72.7 -inline u64 make_vaddr(u64 L1, u64 L2, u64 L3)
    72.8 +inline uint64_t make_vaddr(uint64_t L1, uint64_t L2, uint64_t L3)
    72.9  {
   72.10 -    u64 ret = L1;
   72.11 +    uint64_t ret = L1;
   72.12  
   72.13      ret = (ret << 9) | L2;
   72.14      ret = (ret << 9) | L3;
   72.15 @@ -49,9 +49,9 @@ inline u64 make_vaddr(u64 L1, u64 L2, u6
   72.16      return ret;
   72.17  }
   72.18  
   72.19 -void touch_block(vdi_t *vdi, u64 L1, u64 L2, u64 L3)
   72.20 +void touch_block(vdi_t *vdi, uint64_t L1, uint64_t L2, uint64_t L3)
   72.21  {
   72.22 -    u64 vaddr;
   72.23 +    uint64_t vaddr;
   72.24      char *page = pages[next_page++];
   72.25      char *rpage = NULL;
   72.26  
   72.27 @@ -76,9 +76,9 @@ void touch_block(vdi_t *vdi, u64 L1, u64
   72.28      freeblock(rpage);
   72.29  }
   72.30  
   72.31 -void test_block(vdi_t *vdi, u64 L1, u64 L2, u64 L3, char *page)
   72.32 +void test_block(vdi_t *vdi, uint64_t L1, uint64_t L2, uint64_t L3, char *page)
   72.33  {
   72.34 -    u64 vaddr;
   72.35 +    uint64_t vaddr;
   72.36      char *rpage = NULL;
   72.37  
   72.38      printf("TEST  (%3Lu, %3Lu, %3Lu)\n", L1, L2, L3);
   72.39 @@ -103,7 +103,7 @@ void test_block(vdi_t *vdi, u64 L1, u64 
   72.40  
   72.41  void coverage_test(vdi_t *vdi)
   72.42  {
   72.43 -    u64 vaddr;
   72.44 +    uint64_t vaddr;
   72.45      int i, j, k;
   72.46  
   72.47      /* Do a series of writes and reads to test all paths through the 
   72.48 @@ -155,13 +155,13 @@ void coverage_test(vdi_t *vdi)
   72.49  int main(int argc, char *argv[])
   72.50  {
   72.51      vdi_t       *vdi;
   72.52 -    u64          id;
   72.53 +    uint64_t          id;
   72.54      int          fd;
   72.55      struct stat  st;
   72.56 -    u64          tot_size;
   72.57 +    uint64_t          tot_size;
   72.58      char         spage[BLOCK_SIZE];
   72.59      char        *dpage;
   72.60 -    u64          vblock = 0, count=0;
   72.61 +    uint64_t          vblock = 0, count=0;
   72.62      
   72.63      __init_blockstore();
   72.64      init_block_async();
    73.1 --- a/tools/blktap/parallax/vdi_validate.c	Fri Oct 21 13:58:39 2005 -0600
    73.2 +++ b/tools/blktap/parallax/vdi_validate.c	Mon Oct 24 09:08:13 2005 -0600
    73.3 @@ -23,13 +23,13 @@
    73.4  int main(int argc, char *argv[])
    73.5  {
    73.6      vdi_t       *vdi;
    73.7 -    u64          id;
    73.8 +    uint64_t          id;
    73.9      int          fd;
   73.10      struct stat  st;
   73.11 -    u64          tot_size;
   73.12 +    uint64_t          tot_size;
   73.13      char         spage[BLOCK_SIZE], *dpage;
   73.14      char        *vpage;
   73.15 -    u64          vblock = 0, count=0;
   73.16 +    uint64_t          vblock = 0, count=0;
   73.17      
   73.18      __init_blockstore();
   73.19      init_block_async();
   73.20 @@ -40,7 +40,7 @@ int main(int argc, char *argv[])
   73.21          exit(-1);
   73.22      }
   73.23          
   73.24 -    id = (u64) atoll(argv[1]);
   73.25 +    id = (uint64_t) atoll(argv[1]);
   73.26      
   73.27      vdi = vdi_get( id );
   73.28      
   73.29 @@ -61,7 +61,7 @@ int main(int argc, char *argv[])
   73.30          exit(-1);
   73.31      }
   73.32      
   73.33 -    tot_size = (u64) st.st_size;
   73.34 +    tot_size = (uint64_t) st.st_size;
   73.35      printf("Testing VDI %Ld (%Ld bytes).\n", id, tot_size);
   73.36      
   73.37      printf("           ");
    74.1 --- a/tools/blktap/ublkback/ublkbacklib.c	Fri Oct 21 13:58:39 2005 -0600
    74.2 +++ b/tools/blktap/ublkback/ublkbacklib.c	Mon Oct 24 09:08:13 2005 -0600
    74.3 @@ -112,7 +112,7 @@ int ublkback_response(blkif_t *blkif, bl
    74.4  typedef struct image {
    74.5      /* These need to turn into an array/rbtree for multi-disk support. */
    74.6      int  fd;
    74.7 -    u64  fsid;
    74.8 +    uint64_t  fsid;
    74.9      blkif_vdev_t   vdevice;
   74.10      long int size;
   74.11      long int secsize;
   74.12 @@ -190,7 +190,7 @@ int ublkback_new_blkif(blkif_t *blkif)
   74.13    }
   74.14  */
   74.15      if (image->size == 0)
   74.16 -        image->size =((u64) 16836057);
   74.17 +        image->size =((uint64_t) 16836057);
   74.18      image->secsize = 512;
   74.19      image->info = 0;
   74.20  
   74.21 @@ -215,7 +215,7 @@ static int batch_count = 0;
   74.22  int ublkback_request(blkif_t *blkif, blkif_request_t *req, int batch_done)
   74.23  {
   74.24      int fd;
   74.25 -    u64 sector;
   74.26 +    uint64_t sector;
   74.27      char *spage, *dpage;
   74.28      int ret, i, idx;
   74.29      blkif_response_t *rsp;
    75.1 --- a/tools/console/daemon/io.c	Fri Oct 21 13:58:39 2005 -0600
    75.2 +++ b/tools/console/daemon/io.c	Mon Oct 24 09:08:13 2005 -0600
    75.3 @@ -79,44 +79,43 @@ static void evtchn_notify(struct domain 
    75.4  static void buffer_append(struct domain *dom)
    75.5  {
    75.6  	struct buffer *buffer = &dom->buffer;
    75.7 -	size_t size;
    75.8 -	XENCONS_RING_IDX oldcons;
    75.9 -	int notify = 0;
   75.10 +	XENCONS_RING_IDX cons, prod, size;
   75.11  	struct xencons_interface *intf = dom->interface;
   75.12  
   75.13 -	while ((size = (intf->out_prod - intf->out_cons)) != 0) {
   75.14 -		notify = 1;
   75.15 -
   75.16 -		if ((buffer->capacity - buffer->size) < size) {
   75.17 -			buffer->capacity += (size + 1024);
   75.18 -			buffer->data = realloc(buffer->data, buffer->capacity);
   75.19 -			if (buffer->data == NULL) {
   75.20 -				dolog(LOG_ERR, "Memory allocation failed");
   75.21 -				exit(ENOMEM);
   75.22 -			}
   75.23 -		}
   75.24 +	cons = intf->out_cons;
   75.25 +	prod = intf->out_prod;
   75.26 +	mb();
   75.27  
   75.28 -		oldcons = intf->out_cons;
   75.29 -		while ((intf->out_cons - oldcons) < size) {
   75.30 -			buffer->data[buffer->size] = intf->out[
   75.31 -				MASK_XENCONS_IDX(intf->out_cons, intf->out)];
   75.32 -			buffer->size++;
   75.33 -			intf->out_cons++;
   75.34 -		}
   75.35 +	size = prod - cons;
   75.36 +	if ((size == 0) || (size > sizeof(intf->out)))
   75.37 +		return;
   75.38  
   75.39 -		if (buffer->max_capacity &&
   75.40 -		    buffer->size > buffer->max_capacity) {
   75.41 -			memmove(buffer->data + (buffer->size -
   75.42 -						buffer->max_capacity),
   75.43 -				buffer->data, buffer->max_capacity);
   75.44 -			buffer->data = realloc(buffer->data,
   75.45 -					       buffer->max_capacity);
   75.46 -			buffer->capacity = buffer->max_capacity;
   75.47 +	if ((buffer->capacity - buffer->size) < size) {
   75.48 +		buffer->capacity += (size + 1024);
   75.49 +		buffer->data = realloc(buffer->data, buffer->capacity);
   75.50 +		if (buffer->data == NULL) {
   75.51 +			dolog(LOG_ERR, "Memory allocation failed");
   75.52 +			exit(ENOMEM);
   75.53  		}
   75.54  	}
   75.55  
   75.56 -	if (notify)
   75.57 -		evtchn_notify(dom);
   75.58 +	while (cons != prod)
   75.59 +		buffer->data[buffer->size++] = intf->out[
   75.60 +			MASK_XENCONS_IDX(cons++, intf->out)];
   75.61 +
   75.62 +	mb();
   75.63 +	intf->out_cons = cons;
   75.64 +	evtchn_notify(dom);
   75.65 +
   75.66 +	if (buffer->max_capacity &&
   75.67 +	    buffer->size > buffer->max_capacity) {
   75.68 +		memmove(buffer->data + (buffer->size -
   75.69 +					buffer->max_capacity),
   75.70 +			buffer->data, buffer->max_capacity);
   75.71 +		buffer->data = realloc(buffer->data,
   75.72 +				       buffer->max_capacity);
   75.73 +		buffer->capacity = buffer->max_capacity;
   75.74 +	}
   75.75  }
   75.76  
   75.77  static bool buffer_empty(struct buffer *buffer)
   75.78 @@ -164,14 +163,6 @@ static int domain_create_tty(struct doma
   75.79  			tcsetattr(master, TCSAFLUSH, &term);
   75.80  		}
   75.81  
   75.82 -		success = asprintf(&path, "%s/tty", dom->conspath) != -1;
   75.83 -		if (!success)
   75.84 -			goto out;
   75.85 -		success = xs_write(xs, NULL, path, slave, strlen(slave));
   75.86 -		free(path);
   75.87 -		if (!success)
   75.88 -			goto out;
   75.89 -
   75.90  		success = asprintf(&path, "%s/limit", dom->conspath) != -1;
   75.91  		if (!success)
   75.92  			goto out;
   75.93 @@ -181,6 +172,14 @@ static int domain_create_tty(struct doma
   75.94  			free(data);
   75.95  		}
   75.96  		free(path);
   75.97 +
   75.98 +		success = asprintf(&path, "%s/tty", dom->conspath) != -1;
   75.99 +		if (!success)
  75.100 +			goto out;
  75.101 +		success = xs_write(xs, NULL, path, slave, strlen(slave));
  75.102 +		free(path);
  75.103 +		if (!success)
  75.104 +			goto out;
  75.105  	}
  75.106  
  75.107  	return master;
  75.108 @@ -270,6 +269,18 @@ static int domain_create_ring(struct dom
  75.109  	}
  75.110  	dom->local_port = rc;
  75.111  
  75.112 +	if (dom->tty_fd == -1) {
  75.113 +		dom->tty_fd = domain_create_tty(dom);
  75.114 +
  75.115 +		if (dom->tty_fd == -1) {
  75.116 +			err = errno;
  75.117 +			close(dom->evtchn_fd);
  75.118 +			dom->evtchn_fd = -1;
  75.119 +			dom->local_port = -1;
  75.120 +			goto out;
  75.121 +		}
  75.122 +	}
  75.123 +
  75.124   out:
  75.125  	return err;
  75.126  }
  75.127 @@ -302,10 +313,7 @@ static struct domain *create_domain(int 
  75.128  	}
  75.129  
  75.130  	dom->domid = domid;
  75.131 -
  75.132  	dom->conspath = xs_get_domain_path(xs, dom->domid);
  75.133 -	if (dom->conspath == NULL)
  75.134 -		goto out;
  75.135  	s = realloc(dom->conspath, strlen(dom->conspath) +
  75.136  		    strlen("/console") + 1);
  75.137  	if (s == NULL)
  75.138 @@ -313,7 +321,7 @@ static struct domain *create_domain(int 
  75.139  	dom->conspath = s;
  75.140  	strcat(dom->conspath, "/console");
  75.141  
  75.142 -	dom->tty_fd = domain_create_tty(dom);
  75.143 +	dom->tty_fd = -1;
  75.144  	dom->is_dead = false;
  75.145  	dom->buffer.data = 0;
  75.146  	dom->buffer.size = 0;
  75.147 @@ -419,10 +427,14 @@ static void handle_tty_read(struct domai
  75.148  	char msg[80];
  75.149  	int i;
  75.150  	struct xencons_interface *intf = dom->interface;
  75.151 -	XENCONS_RING_IDX filled = intf->in_prod - intf->in_cons;
  75.152 +	XENCONS_RING_IDX cons, prod;
  75.153  
  75.154 -	if (sizeof(intf->in) > filled)
  75.155 -		len = sizeof(intf->in) - filled;
  75.156 +	cons = intf->in_cons;
  75.157 +	prod = intf->in_prod;
  75.158 +	mb();
  75.159 +
  75.160 +	if (sizeof(intf->in) > (prod - cons))
  75.161 +		len = sizeof(intf->in) - (prod - cons);
  75.162  	if (len > sizeof(msg))
  75.163  		len = sizeof(msg);
  75.164  
  75.165 @@ -441,10 +453,11 @@ static void handle_tty_read(struct domai
  75.166  		}
  75.167  	} else if (domain_is_valid(dom->domid)) {
  75.168  		for (i = 0; i < len; i++) {
  75.169 -			intf->in[MASK_XENCONS_IDX(intf->in_prod, intf->in)] =
  75.170 +			intf->in[MASK_XENCONS_IDX(prod++, intf->in)] =
  75.171  				msg[i];
  75.172 -			intf->in_prod++;
  75.173  		}
  75.174 +		wmb();
  75.175 +		intf->in_prod = prod;
  75.176  		evtchn_notify(dom);
  75.177  	} else {
  75.178  		close(dom->tty_fd);
  75.179 @@ -474,7 +487,7 @@ static void handle_tty_write(struct doma
  75.180  
  75.181  static void handle_ring_read(struct domain *dom)
  75.182  {
  75.183 -	u16 v;
  75.184 +	uint16_t v;
  75.185  
  75.186  	if (!read_sync(dom->evtchn_fd, &v, sizeof(v)))
  75.187  		return;
    76.1 --- a/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c	Fri Oct 21 13:58:39 2005 -0600
    76.2 +++ b/tools/debugger/gdb/gdb-6.2.1-xen-sparse/gdb/gdbserver/linux-xen-low.c	Mon Oct 24 09:08:13 2005 -0600
    76.3 @@ -38,7 +38,7 @@
    76.4  #include <xenctrl.h>
    76.5  #define TRACE_ENTER /* printf("enter %s\n", __FUNCTION__) */
    76.6  
    76.7 -long (*myptrace)(int xc_handle, enum __ptrace_request, u32, long, long);
    76.8 +long (*myptrace)(int xc_handle, enum __ptrace_request, uint32_t, long, long);
    76.9  int (*myxcwait)(int xc_handle, int domain, int *status, int options) ;
   76.10  static int xc_handle;
   76.11  
    77.1 --- a/tools/debugger/libxendebug/xendebug.c	Fri Oct 21 13:58:39 2005 -0600
    77.2 +++ b/tools/debugger/libxendebug/xendebug.c	Mon Oct 24 09:08:13 2005 -0600
    77.3 @@ -41,8 +41,8 @@ typedef struct bwcpoint                 
    77.4  {
    77.5      struct list_head list;
    77.6      unsigned long address;
    77.7 -    u32 domain;
    77.8 -    u8 old_value;                             /* old value for software bkpt */
    77.9 +    uint32_t domain;
   77.10 +    uint8_t old_value;                             /* old value for software bkpt */
   77.11  } bwcpoint_t, *bwcpoint_p;
   77.12  
   77.13  static bwcpoint_t bwcpoint_list;
   77.14 @@ -52,7 +52,7 @@ static bwcpoint_t bwcpoint_list;
   77.15  typedef struct domain_context                 /* local cache of domain state */
   77.16  {
   77.17      struct list_head     list;
   77.18 -    u32                  domid;
   77.19 +    uint32_t                  domid;
   77.20      boolean              valid[MAX_VIRT_CPUS];
   77.21      vcpu_guest_context_t context[MAX_VIRT_CPUS];
   77.22  
   77.23 @@ -92,7 +92,7 @@ xendebug_initialize()
   77.24  /**************/
   77.25  
   77.26  static domain_context_p
   77.27 -xendebug_domain_context_search (u32 domid)
   77.28 +xendebug_domain_context_search (uint32_t domid)
   77.29  {
   77.30      struct list_head *entry;
   77.31      domain_context_p  ctxt;
   77.32 @@ -107,7 +107,7 @@ xendebug_domain_context_search (u32 domi
   77.33  }
   77.34  
   77.35  static __inline__ domain_context_p
   77.36 -xendebug_get_context (int xc_handle, u32 domid, u32 vcpu)
   77.37 +xendebug_get_context (int xc_handle, uint32_t domid, uint32_t vcpu)
   77.38  {
   77.39      int rc;
   77.40      domain_context_p ctxt;
   77.41 @@ -130,7 +130,7 @@ xendebug_get_context (int xc_handle, u32
   77.42  }
   77.43  
   77.44  static __inline__ int
   77.45 -xendebug_set_context (int xc_handle, domain_context_p ctxt, u32 vcpu)
   77.46 +xendebug_set_context (int xc_handle, domain_context_p ctxt, uint32_t vcpu)
   77.47  {
   77.48      dom0_op_t op;
   77.49      int rc;
   77.50 @@ -157,8 +157,8 @@ xendebug_set_context (int xc_handle, dom
   77.51  
   77.52  int
   77.53  xendebug_attach(int xc_handle,
   77.54 -                u32 domid,
   77.55 -                u32 vcpu)
   77.56 +                uint32_t domid,
   77.57 +                uint32_t vcpu)
   77.58  {
   77.59      domain_context_p ctxt;
   77.60  
   77.61 @@ -176,8 +176,8 @@ xendebug_attach(int xc_handle,
   77.62  
   77.63  int
   77.64  xendebug_detach(int xc_handle,
   77.65 -                u32 domid,
   77.66 -                u32 vcpu)
   77.67 +                uint32_t domid,
   77.68 +                uint32_t vcpu)
   77.69  {
   77.70      domain_context_p ctxt;
   77.71      
   77.72 @@ -197,8 +197,8 @@ xendebug_detach(int xc_handle,
   77.73  
   77.74  int
   77.75  xendebug_read_registers(int xc_handle,
   77.76 -                        u32 domid,
   77.77 -                        u32 vcpu,
   77.78 +                        uint32_t domid,
   77.79 +                        uint32_t vcpu,
   77.80                          cpu_user_regs_t **regs)
   77.81  {
   77.82      domain_context_p ctxt;
   77.83 @@ -218,8 +218,8 @@ xendebug_read_registers(int xc_handle,
   77.84  
   77.85  int
   77.86  xendebug_read_fpregisters (int xc_handle,
   77.87 -                           u32 domid,
   77.88 -                           u32 vcpu,
   77.89 +                           uint32_t domid,
   77.90 +                           uint32_t vcpu,
   77.91                             char **regs)
   77.92  {
   77.93      domain_context_p ctxt;
   77.94 @@ -239,8 +239,8 @@ xendebug_read_fpregisters (int xc_handle
   77.95  
   77.96  int
   77.97  xendebug_write_registers(int xc_handle,
   77.98 -                         u32 domid,
   77.99 -                         u32 vcpu,
  77.100 +                         uint32_t domid,
  77.101 +                         uint32_t vcpu,
  77.102                           cpu_user_regs_t *regs)
  77.103  {
  77.104      domain_context_p ctxt;
  77.105 @@ -260,8 +260,8 @@ xendebug_write_registers(int xc_handle,
  77.106  
  77.107  int
  77.108  xendebug_step(int xc_handle,
  77.109 -              u32 domid,
  77.110 -              u32 vcpu)
  77.111 +              uint32_t domid,
  77.112 +              uint32_t vcpu)
  77.113  {
  77.114      domain_context_p ctxt;
  77.115      int rc;
  77.116 @@ -282,8 +282,8 @@ xendebug_step(int xc_handle,
  77.117  
  77.118  int
  77.119  xendebug_continue(int xc_handle,
  77.120 -                  u32 domid,
  77.121 -                  u32 vcpu)
  77.122 +                  uint32_t domid,
  77.123 +                  uint32_t vcpu)
  77.124  {
  77.125      domain_context_p ctxt;
  77.126      int rc;
  77.127 @@ -310,8 +310,8 @@ xendebug_continue(int xc_handle,
  77.128  
  77.129  /* access to one page */
  77.130  static int
  77.131 -xendebug_memory_page (domain_context_p ctxt, int xc_handle, u32 vcpu,
  77.132 -                      int protection, unsigned long address, int length, u8 *buffer)
  77.133 +xendebug_memory_page (domain_context_p ctxt, int xc_handle, uint32_t vcpu,
  77.134 +                      int protection, unsigned long address, int length, uint8_t *buffer)
  77.135  {
  77.136      vcpu_guest_context_t *vcpu_ctxt = &ctxt->context[vcpu];
  77.137      unsigned long pde, page;
  77.138 @@ -406,8 +406,8 @@ xendebug_memory_page (domain_context_p c
  77.139  
  77.140  /* divide a memory operation into accesses to individual pages */
  77.141  static int
  77.142 -xendebug_memory_op (domain_context_p ctxt, int xc_handle, u32 vcpu,
  77.143 -                    int protection, unsigned long address, int length, u8 *buffer)
  77.144 +xendebug_memory_op (domain_context_p ctxt, int xc_handle, uint32_t vcpu,
  77.145 +                    int protection, unsigned long address, int length, uint8_t *buffer)
  77.146  {
  77.147      int      remain;              /* number of bytes to touch past this page */
  77.148      int      bytes   = 0;
  77.149 @@ -429,11 +429,11 @@ xendebug_memory_op (domain_context_p ctx
  77.150  
  77.151  int
  77.152  xendebug_read_memory(int xc_handle,
  77.153 -                     u32 domid,
  77.154 -                     u32 vcpu,
  77.155 +                     uint32_t domid,
  77.156 +                     uint32_t vcpu,
  77.157                       unsigned long address,
  77.158 -                     u32 length,
  77.159 -                     u8 *data)
  77.160 +                     uint32_t length,
  77.161 +                     uint8_t *data)
  77.162  {
  77.163      domain_context_p ctxt;
  77.164  
  77.165 @@ -449,11 +449,11 @@ xendebug_read_memory(int xc_handle,
  77.166  
  77.167  int
  77.168  xendebug_write_memory(int xc_handle,
  77.169 -                      u32 domid,
  77.170 -                      u32 vcpu,
  77.171 +                      uint32_t domid,
  77.172 +                      uint32_t vcpu,
  77.173                        unsigned long address,
  77.174 -                      u32 length,
  77.175 -                      u8 *data)
  77.176 +                      uint32_t length,
  77.177 +                      uint8_t *data)
  77.178  {
  77.179      domain_context_p ctxt;
  77.180  
  77.181 @@ -469,13 +469,13 @@ xendebug_write_memory(int xc_handle,
  77.182  
  77.183  int
  77.184  xendebug_insert_memory_breakpoint(int xc_handle,
  77.185 -                                  u32 domid,
  77.186 -                                  u32 vcpu,
  77.187 +                                  uint32_t domid,
  77.188 +                                  uint32_t vcpu,
  77.189                                    unsigned long address,
  77.190 -                                  u32 length)
  77.191 +                                  uint32_t length)
  77.192  {
  77.193      bwcpoint_p bkpt;
  77.194 -    u8 breakpoint_opcode = 0xcc;
  77.195 +    uint8_t breakpoint_opcode = 0xcc;
  77.196  
  77.197      printf("insert breakpoint %d:%lx %d\n",
  77.198              domid, address, length);
  77.199 @@ -515,10 +515,10 @@ xendebug_insert_memory_breakpoint(int xc
  77.200  
  77.201  int
  77.202  xendebug_remove_memory_breakpoint(int xc_handle,
  77.203 -                                  u32 domid,
  77.204 -                                  u32 vcpu,
  77.205