ia64/xen-unstable

changeset 7461:899f7b4b19fc

Upgrade tree to 2.6.12.6.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Oct 21 10:46:30 2005 +0100 (2005-10-21)
parents 349b302f29e2
children c3a0f492644c
files patches/linux-2.6.12/2.6.12.6.patch
line diff
     1.1 --- a/patches/linux-2.6.12/2.6.12.5.patch	Fri Oct 21 10:24:35 2005 +0100
     1.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.3 @@ -1,1614 +0,0 @@
     1.4 -diff --git a/Makefile b/Makefile
     1.5 ---- a/Makefile
     1.6 -+++ b/Makefile
     1.7 -@@ -1,7 +1,7 @@
     1.8 - VERSION = 2
     1.9 - PATCHLEVEL = 6
    1.10 - SUBLEVEL = 12
    1.11 --EXTRAVERSION =
    1.12 -+EXTRAVERSION = .5
    1.13 - NAME=Woozy Numbat
    1.14 - 
    1.15 - # *DOCUMENTATION*
    1.16 -@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
    1.17 - #(which is the most common case IMHO) to avoid unneeded clutter in the big tags file.
    1.18 - #Adding $(srctree) adds about 20M on i386 to the size of the output file!
    1.19 - 
    1.20 --ifeq ($(KBUILD_OUTPUT),)
    1.21 -+ifeq ($(src),$(obj))
    1.22 - __srctree =
    1.23 - else
    1.24 - __srctree = $(srctree)/
    1.25 -diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    1.26 ---- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    1.27 -+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    1.28 -@@ -44,7 +44,7 @@
    1.29 - 
    1.30 - #define PFX "powernow-k8: "
    1.31 - #define BFX PFX "BIOS error: "
    1.32 --#define VERSION "version 1.40.2"
    1.33 -+#define VERSION "version 1.40.4"
    1.34 - #include "powernow-k8.h"
    1.35 - 
    1.36 - /* serialize freq changes  */
    1.37 -@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
    1.38 - {
    1.39 - 	struct powernow_k8_data *data;
    1.40 - 	cpumask_t oldmask = CPU_MASK_ALL;
    1.41 --	int rc;
    1.42 -+	int rc, i;
    1.43 - 
    1.44 - 	if (!check_supported_cpu(pol->cpu))
    1.45 - 		return -ENODEV;
    1.46 -@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
    1.47 - 	printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
    1.48 - 	       data->currfid, data->currvid);
    1.49 - 
    1.50 --	powernow_data[pol->cpu] = data;
    1.51 -+	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
    1.52 -+		powernow_data[i] = data;
    1.53 -+	}
    1.54 - 
    1.55 - 	return 0;
    1.56 - 
    1.57 -diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
    1.58 ---- a/arch/i386/kernel/process.c
    1.59 -+++ b/arch/i386/kernel/process.c
    1.60 -@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
    1.61 - 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
    1.62 - 		return -EINVAL;
    1.63 - 
    1.64 -+	memset(&info, 0, sizeof(info));
    1.65 -+
    1.66 - 	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
    1.67 - 
    1.68 - 	info.entry_number = idx;
    1.69 -diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
    1.70 ---- a/arch/ia64/kernel/ptrace.c
    1.71 -+++ b/arch/ia64/kernel/ptrace.c
    1.72 -@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
    1.73 - 				*data = (pt->cr_ipsr & IPSR_MASK);
    1.74 - 			return 0;
    1.75 - 
    1.76 -+		      case PT_AR_RSC:
    1.77 -+			if (write_access)
    1.78 -+				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
    1.79 -+			else
    1.80 -+				*data = pt->ar_rsc;
    1.81 -+			return 0;
    1.82 -+
    1.83 - 		      case PT_AR_RNAT:
    1.84 - 			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
    1.85 - 			rnat_addr = (long) ia64_rse_rnat_addr((long *)
    1.86 -@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
    1.87 - 		      case PT_AR_BSPSTORE:
    1.88 - 			ptr = pt_reg_addr(pt, ar_bspstore);
    1.89 - 			break;
    1.90 --		      case PT_AR_RSC:
    1.91 --			ptr = pt_reg_addr(pt, ar_rsc);
    1.92 --			break;
    1.93 - 		      case PT_AR_UNAT:
    1.94 - 			ptr = pt_reg_addr(pt, ar_unat);
    1.95 - 			break;
    1.96 -@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
    1.97 - static long
    1.98 - ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
    1.99 - {
   1.100 --	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
   1.101 -+	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
   1.102 - 	struct unw_frame_info info;
   1.103 - 	struct switch_stack *sw;
   1.104 - 	struct ia64_fpreg fpval;
   1.105 -@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
   1.106 - 	/* app regs */
   1.107 - 
   1.108 - 	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
   1.109 --	retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
   1.110 -+	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
   1.111 - 	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
   1.112 - 	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
   1.113 - 	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
   1.114 -@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
   1.115 - 	retval |= __get_user(nat_bits, &ppr->nat);
   1.116 - 
   1.117 - 	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
   1.118 -+	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
   1.119 - 	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
   1.120 - 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
   1.121 - 	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
   1.122 -diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
   1.123 ---- a/arch/ia64/kernel/signal.c
   1.124 -+++ b/arch/ia64/kernel/signal.c
   1.125 -@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
   1.126 - static long
   1.127 - restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
   1.128 - {
   1.129 --	unsigned long ip, flags, nat, um, cfm;
   1.130 -+	unsigned long ip, flags, nat, um, cfm, rsc;
   1.131 - 	long err;
   1.132 - 
   1.133 - 	/* Always make any pending restarted system calls return -EINTR */
   1.134 -@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
   1.135 - 	err |= __get_user(ip, &sc->sc_ip);			/* instruction pointer */
   1.136 - 	err |= __get_user(cfm, &sc->sc_cfm);
   1.137 - 	err |= __get_user(um, &sc->sc_um);			/* user mask */
   1.138 --	err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
   1.139 -+	err |= __get_user(rsc, &sc->sc_ar_rsc);
   1.140 - 	err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
   1.141 - 	err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
   1.142 - 	err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
   1.143 -@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
   1.144 - 	err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);	/* r15 */
   1.145 - 
   1.146 - 	scr->pt.cr_ifs = cfm | (1UL << 63);
   1.147 -+	scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
   1.148 - 
   1.149 - 	/* establish new instruction pointer: */
   1.150 - 	scr->pt.cr_iip = ip & ~0x3UL;
   1.151 -diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
   1.152 ---- a/arch/ppc/kernel/time.c
   1.153 -+++ b/arch/ppc/kernel/time.c
   1.154 -@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
   1.155 - 
   1.156 - extern unsigned long wall_jiffies;
   1.157 - 
   1.158 -+/* used for timezone offset */
   1.159 -+static long timezone_offset;
   1.160 -+
   1.161 - DEFINE_SPINLOCK(rtc_lock);
   1.162 - 
   1.163 - EXPORT_SYMBOL(rtc_lock);
   1.164 -@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
   1.165 - 		     xtime.tv_sec - last_rtc_update >= 659 &&
   1.166 - 		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
   1.167 - 		     jiffies - wall_jiffies == 1) {
   1.168 --		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0)
   1.169 -+		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
   1.170 - 				last_rtc_update = xtime.tv_sec+1;
   1.171 - 			else
   1.172 - 				/* Try again one minute later */
   1.173 -@@ -286,7 +289,7 @@ void __init time_init(void)
   1.174 - 	unsigned old_stamp, stamp, elapsed;
   1.175 - 
   1.176 -         if (ppc_md.time_init != NULL)
   1.177 --                time_offset = ppc_md.time_init();
   1.178 -+                timezone_offset = ppc_md.time_init();
   1.179 - 
   1.180 - 	if (__USE_RTC()) {
   1.181 - 		/* 601 processor: dec counts down by 128 every 128ns */
   1.182 -@@ -331,10 +334,10 @@ void __init time_init(void)
   1.183 - 	set_dec(tb_ticks_per_jiffy);
   1.184 - 
   1.185 - 	/* If platform provided a timezone (pmac), we correct the time */
   1.186 --        if (time_offset) {
   1.187 --		sys_tz.tz_minuteswest = -time_offset / 60;
   1.188 -+        if (timezone_offset) {
   1.189 -+		sys_tz.tz_minuteswest = -timezone_offset / 60;
   1.190 - 		sys_tz.tz_dsttime = 0;
   1.191 --		xtime.tv_sec -= time_offset;
   1.192 -+		xtime.tv_sec -= timezone_offset;
   1.193 -         }
   1.194 -         set_normalized_timespec(&wall_to_monotonic,
   1.195 -                                 -xtime.tv_sec, -xtime.tv_nsec);
   1.196 -diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
   1.197 ---- a/arch/ppc64/boot/zlib.c
   1.198 -+++ b/arch/ppc64/boot/zlib.c
   1.199 -@@ -1307,7 +1307,7 @@ local int huft_build(
   1.200 -   {
   1.201 -     *t = (inflate_huft *)Z_NULL;
   1.202 -     *m = 0;
   1.203 --    return Z_OK;
   1.204 -+    return Z_DATA_ERROR;
   1.205 -   }
   1.206 - 
   1.207 - 
   1.208 -@@ -1351,6 +1351,7 @@ local int huft_build(
   1.209 -     if ((j = *p++) != 0)
   1.210 -       v[x[j]++] = i;
   1.211 -   } while (++i < n);
   1.212 -+  n = x[g];			/* set n to length of v */
   1.213 - 
   1.214 - 
   1.215 -   /* Generate the Huffman codes and for each, make the table entries */
   1.216 -diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
   1.217 ---- a/arch/um/kernel/process.c
   1.218 -+++ b/arch/um/kernel/process.c
   1.219 -@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
   1.220 - 	return(arg.pid);
   1.221 - }
   1.222 - 
   1.223 --static int ptrace_child(void)
   1.224 -+static int ptrace_child(void *arg)
   1.225 - {
   1.226 - 	int ret;
   1.227 - 	int pid = os_getpid(), ppid = getppid();
   1.228 -@@ -159,16 +159,20 @@ static int ptrace_child(void)
   1.229 - 	_exit(ret);
   1.230 - }
   1.231 - 
   1.232 --static int start_ptraced_child(void)
   1.233 -+static int start_ptraced_child(void **stack_out)
   1.234 - {
   1.235 -+	void *stack;
   1.236 -+	unsigned long sp;
   1.237 - 	int pid, n, status;
   1.238 - 	
   1.239 --	pid = fork();
   1.240 --	if(pid == 0)
   1.241 --		ptrace_child();
   1.242 --
   1.243 -+	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
   1.244 -+		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   1.245 -+	if(stack == MAP_FAILED)
   1.246 -+		panic("check_ptrace : mmap failed, errno = %d", errno);
   1.247 -+	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
   1.248 -+	pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
   1.249 - 	if(pid < 0)
   1.250 --		panic("check_ptrace : fork failed, errno = %d", errno);
   1.251 -+		panic("check_ptrace : clone failed, errno = %d", errno);
   1.252 - 	CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
   1.253 - 	if(n < 0)
   1.254 - 		panic("check_ptrace : wait failed, errno = %d", errno);
   1.255 -@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
   1.256 - 		panic("check_ptrace : expected SIGSTOP, got status = %d",
   1.257 - 		      status);
   1.258 - 
   1.259 -+	*stack_out = stack;
   1.260 - 	return(pid);
   1.261 - }
   1.262 - 
   1.263 -@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
   1.264 -  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
   1.265 -  * So only for SYSEMU features we test mustpanic, while normal host features
   1.266 -  * must work anyway!*/
   1.267 --static int stop_ptraced_child(int pid, int exitcode, int mustexit)
   1.268 -+static int stop_ptraced_child(int pid, void *stack, int exitcode, int mustpanic)
   1.269 - {
   1.270 - 	int status, n, ret = 0;
   1.271 - 
   1.272 - 	if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
   1.273 --		panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
   1.274 -+		panic("check_ptrace : ptrace failed, errno = %d", errno);
   1.275 - 	CATCH_EINTR(n = waitpid(pid, &status, 0));
   1.276 - 	if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
   1.277 - 		int exit_with = WEXITSTATUS(status);
   1.278 -@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
   1.279 - 		printk("check_ptrace : child exited with exitcode %d, while "
   1.280 - 		      "expecting %d; status 0x%x", exit_with,
   1.281 - 		      exitcode, status);
   1.282 --		if (mustexit)
   1.283 -+		if (mustpanic)
   1.284 - 			panic("\n");
   1.285 - 		else
   1.286 - 			printk("\n");
   1.287 - 		ret = -1;
   1.288 - 	}
   1.289 - 
   1.290 -+	if(munmap(stack, PAGE_SIZE) < 0)
   1.291 -+		panic("check_ptrace : munmap failed, errno = %d", errno);
   1.292 - 	return ret;
   1.293 - }
   1.294 - 
   1.295 -@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
   1.296 - 
   1.297 - static void __init check_sysemu(void)
   1.298 - {
   1.299 -+	void *stack;
   1.300 - 	int pid, syscall, n, status, count=0;
   1.301 - 
   1.302 - 	printk("Checking syscall emulation patch for ptrace...");
   1.303 - 	sysemu_supported = 0;
   1.304 --	pid = start_ptraced_child();
   1.305 -+	pid = start_ptraced_child(&stack);
   1.306 - 
   1.307 - 	if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
   1.308 - 		goto fail;
   1.309 -@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
   1.310 - 		panic("check_sysemu : failed to modify system "
   1.311 - 		      "call return, errno = %d", errno);
   1.312 - 
   1.313 --	if (stop_ptraced_child(pid, 0, 0) < 0)
   1.314 -+	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
   1.315 - 		goto fail_stopped;
   1.316 - 
   1.317 - 	sysemu_supported = 1;
   1.318 -@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
   1.319 - 	set_using_sysemu(!force_sysemu_disabled);
   1.320 - 
   1.321 - 	printk("Checking advanced syscall emulation patch for ptrace...");
   1.322 --	pid = start_ptraced_child();
   1.323 -+	pid = start_ptraced_child(&stack);
   1.324 - 	while(1){
   1.325 - 		count++;
   1.326 - 		if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
   1.327 -@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
   1.328 - 			break;
   1.329 - 		}
   1.330 - 	}
   1.331 --	if (stop_ptraced_child(pid, 0, 0) < 0)
   1.332 -+	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
   1.333 - 		goto fail_stopped;
   1.334 - 
   1.335 - 	sysemu_supported = 2;
   1.336 -@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
   1.337 - 	return;
   1.338 - 
   1.339 - fail:
   1.340 --	stop_ptraced_child(pid, 1, 0);
   1.341 -+	stop_ptraced_child(pid, stack, 1, 0);
   1.342 - fail_stopped:
   1.343 - 	printk("missing\n");
   1.344 - }
   1.345 - 
   1.346 - void __init check_ptrace(void)
   1.347 - {
   1.348 -+	void *stack;
   1.349 - 	int pid, syscall, n, status;
   1.350 - 
   1.351 - 	printk("Checking that ptrace can change system call numbers...");
   1.352 --	pid = start_ptraced_child();
   1.353 -+	pid = start_ptraced_child(&stack);
   1.354 - 
   1.355 - 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) < 0)
   1.356 - 		panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", errno);
   1.357 -@@ -330,7 +339,7 @@ void __init check_ptrace(void)
   1.358 - 			break;
   1.359 - 		}
   1.360 - 	}
   1.361 --	stop_ptraced_child(pid, 0, 1);
   1.362 -+	stop_ptraced_child(pid, stack, 0, 1);
   1.363 - 	printk("OK\n");
   1.364 - 	check_sysemu();
   1.365 - }
   1.366 -@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
   1.367 - static inline int check_skas3_ptrace_support(void)
   1.368 - {
   1.369 - 	struct ptrace_faultinfo fi;
   1.370 -+	void *stack;
   1.371 - 	int pid, n, ret = 1;
   1.372 - 
   1.373 - 	printf("Checking for the skas3 patch in the host...");
   1.374 --	pid = start_ptraced_child();
   1.375 -+	pid = start_ptraced_child(&stack);
   1.376 - 
   1.377 - 	n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
   1.378 - 	if (n < 0) {
   1.379 -@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
   1.380 - 	}
   1.381 - 
   1.382 - 	init_registers(pid);
   1.383 --	stop_ptraced_child(pid, 1, 1);
   1.384 -+	stop_ptraced_child(pid, stack, 1, 1);
   1.385 - 
   1.386 - 	return(ret);
   1.387 - }
   1.388 -diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
   1.389 ---- a/arch/x86_64/ia32/syscall32.c
   1.390 -+++ b/arch/x86_64/ia32/syscall32.c
   1.391 -@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
   1.392 - 	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
   1.393 - 	struct vm_area_struct *vma;
   1.394 - 	struct mm_struct *mm = current->mm;
   1.395 -+	int ret;
   1.396 - 
   1.397 - 	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
   1.398 - 	if (!vma)
   1.399 -@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
   1.400 - 	vma->vm_mm = mm;
   1.401 - 
   1.402 - 	down_write(&mm->mmap_sem);
   1.403 --	insert_vm_struct(mm, vma);
   1.404 -+	if ((ret = insert_vm_struct(mm, vma))) {
   1.405 -+		up_write(&mm->mmap_sem);
   1.406 -+		kmem_cache_free(vm_area_cachep, vma);
   1.407 -+		return ret;
   1.408 -+	}
   1.409 - 	mm->total_vm += npages;
   1.410 - 	up_write(&mm->mmap_sem);
   1.411 - 	return 0;
   1.412 -diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
   1.413 ---- a/arch/x86_64/kernel/setup.c
   1.414 -+++ b/arch/x86_64/kernel/setup.c
   1.415 -@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
   1.416 - 	int cpu = smp_processor_id();
   1.417 - 	int node = 0;
   1.418 - 	unsigned bits;
   1.419 --	if (c->x86_num_cores == 1)
   1.420 --		return;
   1.421 - 
   1.422 - 	bits = 0;
   1.423 - 	while ((1 << bits) < c->x86_num_cores)
   1.424 -diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
   1.425 ---- a/arch/x86_64/kernel/smp.c
   1.426 -+++ b/arch/x86_64/kernel/smp.c
   1.427 -@@ -284,6 +284,71 @@ struct call_data_struct {
   1.428 - static struct call_data_struct * call_data;
   1.429 - 
   1.430 - /*
   1.431 -+ * this function sends a 'generic call function' IPI to one other CPU
   1.432 -+ * in the system.
   1.433 -+ */
   1.434 -+static void __smp_call_function_single (int cpu, void (*func) (void *info), void *info,
   1.435 -+				int nonatomic, int wait)
   1.436 -+{
   1.437 -+	struct call_data_struct data;
   1.438 -+	int cpus = 1;
   1.439 -+
   1.440 -+	data.func = func;
   1.441 -+	data.info = info;
   1.442 -+	atomic_set(&data.started, 0);
   1.443 -+	data.wait = wait;
   1.444 -+	if (wait)
   1.445 -+		atomic_set(&data.finished, 0);
   1.446 -+
   1.447 -+	call_data = &data;
   1.448 -+	wmb();
   1.449 -+	/* Send a message to all other CPUs and wait for them to respond */
   1.450 -+	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
   1.451 -+
   1.452 -+	/* Wait for response */
   1.453 -+	while (atomic_read(&data.started) != cpus)
   1.454 -+		cpu_relax();
   1.455 -+
   1.456 -+	if (!wait)
   1.457 -+		return;
   1.458 -+
   1.459 -+	while (atomic_read(&data.finished) != cpus)
   1.460 -+		cpu_relax();
   1.461 -+}
   1.462 -+
   1.463 -+/*
   1.464 -+ * Run a function on another CPU
   1.465 -+ *  <func>	The function to run. This must be fast and non-blocking.
   1.466 -+ *  <info>	An arbitrary pointer to pass to the function.
   1.467 -+ *  <nonatomic>	Currently unused.
   1.468 -+ *  <wait>	If true, wait until function has completed on other CPUs.
   1.469 -+ *  [RETURNS]   0 on success, else a negative status code.
   1.470 -+ *
   1.471 -+ * Does not return until the remote CPU is nearly ready to execute <func>
   1.472 -+ * or is or has executed.
   1.473 -+ */
   1.474 -+
   1.475 -+int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
   1.476 -+	int nonatomic, int wait)
   1.477 -+{
   1.478 -+	
   1.479 -+	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
   1.480 -+
   1.481 -+	if (cpu == me) {
   1.482 -+		printk("%s: trying to call self\n", __func__);
   1.483 -+		put_cpu();
   1.484 -+		return -EBUSY;
   1.485 -+	}
   1.486 -+	spin_lock_bh(&call_lock);
   1.487 -+
   1.488 -+	__smp_call_function_single(cpu, func,info,nonatomic,wait);	
   1.489 -+
   1.490 -+	spin_unlock_bh(&call_lock);
   1.491 -+	put_cpu();
   1.492 -+	return 0;
   1.493 -+}
   1.494 -+
   1.495 -+/*
   1.496 -  * this function sends a 'generic call function' IPI to all other CPUs
   1.497 -  * in the system.
   1.498 -  */
   1.499 -diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
   1.500 ---- a/arch/x86_64/kernel/smpboot.c
   1.501 -+++ b/arch/x86_64/kernel/smpboot.c
   1.502 -@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
   1.503 - {
   1.504 - 	unsigned long flags, i;
   1.505 - 
   1.506 --	if (smp_processor_id() != boot_cpu_id)
   1.507 --		return;
   1.508 --
   1.509 - 	go[MASTER] = 0;
   1.510 - 
   1.511 - 	local_irq_save(flags);
   1.512 -@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
   1.513 - 	return tcenter - best_tm;
   1.514 - }
   1.515 - 
   1.516 --static __cpuinit void sync_tsc(void)
   1.517 -+static __cpuinit void sync_tsc(unsigned int master)
   1.518 - {
   1.519 - 	int i, done = 0;
   1.520 - 	long delta, adj, adjust_latency = 0;
   1.521 -@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
   1.522 - 	} t[NUM_ROUNDS] __cpuinitdata;
   1.523 - #endif
   1.524 - 
   1.525 -+	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
   1.526 -+		smp_processor_id(), master);
   1.527 -+
   1.528 - 	go[MASTER] = 1;
   1.529 - 
   1.530 --	smp_call_function(sync_master, NULL, 1, 0);
   1.531 -+	/* It is dangerous to broadcast IPI as cpus are coming up,
   1.532 -+	 * as they may not be ready to accept them.  So since
   1.533 -+	 * we only need to send the ipi to the boot cpu direct
   1.534 -+	 * the message, and avoid the race.
   1.535 -+	 */
   1.536 -+	smp_call_function_single(master, sync_master, NULL, 1, 0);
   1.537 - 
   1.538 - 	while (go[MASTER])	/* wait for master to be ready */
   1.539 - 		no_cpu_relax();
   1.540 -@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
   1.541 - 	printk(KERN_INFO
   1.542 - 	       "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
   1.543 - 	       "maxerr %lu cycles)\n",
   1.544 --	       smp_processor_id(), boot_cpu_id, delta, rt);
   1.545 -+	       smp_processor_id(), master, delta, rt);
   1.546 - }
   1.547 - 
   1.548 - static void __cpuinit tsc_sync_wait(void)
   1.549 - {
   1.550 - 	if (notscsync || !cpu_has_tsc)
   1.551 - 		return;
   1.552 --	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
   1.553 --			boot_cpu_id);
   1.554 --	sync_tsc();
   1.555 -+	sync_tsc(0);
   1.556 - }
   1.557 - 
   1.558 - static __init int notscsync_setup(char *s)
   1.559 -diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
   1.560 ---- a/drivers/acpi/pci_irq.c
   1.561 -+++ b/drivers/acpi/pci_irq.c
   1.562 -@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
   1.563 - 		printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
   1.564 - 			pci_name(dev), ('A' + pin));
   1.565 - 		/* Interrupt Line values above 0xF are forbidden */
   1.566 --		if (dev->irq >= 0 && (dev->irq <= 0xF)) {
   1.567 -+		if (dev->irq > 0 && (dev->irq <= 0xF)) {
   1.568 - 			printk(" - using IRQ %d\n", dev->irq);
   1.569 -+			acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
   1.570 - 			return_VALUE(0);
   1.571 - 		}
   1.572 - 		else {
   1.573 -diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
   1.574 ---- a/drivers/char/rocket.c
   1.575 -+++ b/drivers/char/rocket.c
   1.576 -@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
   1.577 - 		ToRecv = space;
   1.578 - 
   1.579 - 	if (ToRecv <= 0)
   1.580 --		return;
   1.581 -+		goto done;
   1.582 - 
   1.583 - 	/*
   1.584 - 	 * if status indicates there are errored characters in the
   1.585 -@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
   1.586 - 	}
   1.587 - 	/*  Push the data up to the tty layer */
   1.588 - 	ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
   1.589 -+done:
   1.590 - 	tty_ldisc_deref(ld);
   1.591 - }
   1.592 - 
   1.593 -diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
   1.594 ---- a/drivers/char/tpm/tpm.c
   1.595 -+++ b/drivers/char/tpm/tpm.c
   1.596 -@@ -32,12 +32,6 @@
   1.597 - 
   1.598 - #define	TPM_BUFSIZE			2048
   1.599 - 
   1.600 --/* PCI configuration addresses */
   1.601 --#define	PCI_GEN_PMCON_1			0xA0
   1.602 --#define	PCI_GEN1_DEC			0xE4
   1.603 --#define	PCI_LPC_EN			0xE6
   1.604 --#define	PCI_GEN2_DEC			0xEC
   1.605 --
   1.606 - static LIST_HEAD(tpm_chip_list);
   1.607 - static DEFINE_SPINLOCK(driver_lock);
   1.608 - static int dev_mask[32];
   1.609 -@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
   1.610 - EXPORT_SYMBOL_GPL(tpm_time_expired);
   1.611 - 
   1.612 - /*
   1.613 -- * Initialize the LPC bus and enable the TPM ports
   1.614 -- */
   1.615 --int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
   1.616 --{
   1.617 --	u32 lpcenable, tmp;
   1.618 --	int is_lpcm = 0;
   1.619 --
   1.620 --	switch (pci_dev->vendor) {
   1.621 --	case PCI_VENDOR_ID_INTEL:
   1.622 --		switch (pci_dev->device) {
   1.623 --		case PCI_DEVICE_ID_INTEL_82801CA_12:
   1.624 --		case PCI_DEVICE_ID_INTEL_82801DB_12:
   1.625 --			is_lpcm = 1;
   1.626 --			break;
   1.627 --		}
   1.628 --		/* init ICH (enable LPC) */
   1.629 --		pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
   1.630 --		lpcenable |= 0x20000000;
   1.631 --		pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
   1.632 --
   1.633 --		if (is_lpcm) {
   1.634 --			pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
   1.635 --					      &lpcenable);
   1.636 --			if ((lpcenable & 0x20000000) == 0) {
   1.637 --				dev_err(&pci_dev->dev,
   1.638 --					"cannot enable LPC\n");
   1.639 --				return -ENODEV;
   1.640 --			}
   1.641 --		}
   1.642 --
   1.643 --		/* initialize TPM registers */
   1.644 --		pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
   1.645 --
   1.646 --		if (!is_lpcm)
   1.647 --			tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
   1.648 --		else
   1.649 --			tmp =
   1.650 --			    (tmp & 0xFFFF0000) | (base & 0xFFF0) |
   1.651 --			    0x00000001;
   1.652 --
   1.653 --		pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
   1.654 --
   1.655 --		if (is_lpcm) {
   1.656 --			pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
   1.657 --					      &tmp);
   1.658 --			tmp |= 0x00000004;	/* enable CLKRUN */
   1.659 --			pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
   1.660 --					       tmp);
   1.661 --		}
   1.662 --		tpm_write_index(0x0D, 0x55);	/* unlock 4F */
   1.663 --		tpm_write_index(0x0A, 0x00);	/* int disable */
   1.664 --		tpm_write_index(0x08, base);	/* base addr lo */
   1.665 --		tpm_write_index(0x09, (base & 0xFF00) >> 8);	/* base addr hi */
   1.666 --		tpm_write_index(0x0D, 0xAA);	/* lock 4F */
   1.667 --		break;
   1.668 --	case PCI_VENDOR_ID_AMD:
   1.669 --		/* nothing yet */
   1.670 --		break;
   1.671 --	}
   1.672 --
   1.673 --	return 0;
   1.674 --}
   1.675 --
   1.676 --EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
   1.677 --
   1.678 --/*
   1.679 -  * Internal kernel interface to transmit TPM commands
   1.680 -  */
   1.681 - static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
   1.682 -@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
   1.683 - 	if (chip == NULL)
   1.684 - 		return -ENODEV;
   1.685 - 
   1.686 --	spin_lock(&driver_lock);
   1.687 --	tpm_lpc_bus_init(pci_dev, chip->vendor->base);
   1.688 --	spin_unlock(&driver_lock);
   1.689 --
   1.690 - 	return 0;
   1.691 - }
   1.692 - 
   1.693 -diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
   1.694 ---- a/drivers/char/tpm/tpm.h
   1.695 -+++ b/drivers/char/tpm/tpm.h
   1.696 -@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
   1.697 - }
   1.698 - 
   1.699 - extern void tpm_time_expired(unsigned long);
   1.700 --extern int tpm_lpc_bus_init(struct pci_dev *, u16);
   1.701 --
   1.702 - extern int tpm_register_hardware(struct pci_dev *,
   1.703 - 				 struct tpm_vendor_specific *);
   1.704 - extern int tpm_open(struct inode *, struct file *);
   1.705 -diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
   1.706 ---- a/drivers/char/tpm/tpm_atmel.c
   1.707 -+++ b/drivers/char/tpm/tpm_atmel.c
   1.708 -@@ -22,7 +22,10 @@
   1.709 - #include "tpm.h"
   1.710 - 
   1.711 - /* Atmel definitions */
   1.712 --#define	TPM_ATML_BASE			0x400
   1.713 -+enum tpm_atmel_addr {
   1.714 -+	TPM_ATMEL_BASE_ADDR_LO = 0x08,
   1.715 -+	TPM_ATMEL_BASE_ADDR_HI = 0x09
   1.716 -+};
   1.717 - 
   1.718 - /* write status bits */
   1.719 - #define	ATML_STATUS_ABORT		0x01
   1.720 -@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
   1.721 - 	.cancel = tpm_atml_cancel,
   1.722 - 	.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
   1.723 - 	.req_complete_val = ATML_STATUS_DATA_AVAIL,
   1.724 --	.base = TPM_ATML_BASE,
   1.725 - 	.miscdev = { .fops = &atmel_ops, },
   1.726 - };
   1.727 - 
   1.728 -@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
   1.729 - {
   1.730 - 	u8 version[4];
   1.731 - 	int rc = 0;
   1.732 -+	int lo, hi;
   1.733 - 
   1.734 - 	if (pci_enable_device(pci_dev))
   1.735 - 		return -EIO;
   1.736 - 
   1.737 --	if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
   1.738 --		rc = -ENODEV;
   1.739 --		goto out_err;
   1.740 --	}
   1.741 -+	lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
   1.742 -+	hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
   1.743 -+
   1.744 -+	tpm_atmel.base = (hi<<8)|lo;
   1.745 -+	dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
   1.746 - 
   1.747 - 	/* verify that it is an Atmel part */
   1.748 - 	if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
   1.749 -diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
   1.750 ---- a/drivers/char/tpm/tpm_nsc.c
   1.751 -+++ b/drivers/char/tpm/tpm_nsc.c
   1.752 -@@ -24,6 +24,10 @@
   1.753 - /* National definitions */
   1.754 - #define	TPM_NSC_BASE			0x360
   1.755 - #define	TPM_NSC_IRQ			0x07
   1.756 -+#define	TPM_NSC_BASE0_HI		0x60
   1.757 -+#define	TPM_NSC_BASE0_LO		0x61
   1.758 -+#define	TPM_NSC_BASE1_HI		0x62
   1.759 -+#define	TPM_NSC_BASE1_LO		0x63
   1.760 - 
   1.761 - #define	NSC_LDN_INDEX			0x07
   1.762 - #define	NSC_SID_INDEX			0x20
   1.763 -@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
   1.764 - 	.cancel = tpm_nsc_cancel,
   1.765 - 	.req_complete_mask = NSC_STATUS_OBF,
   1.766 - 	.req_complete_val = NSC_STATUS_OBF,
   1.767 --	.base = TPM_NSC_BASE,
   1.768 - 	.miscdev = { .fops = &nsc_ops, },
   1.769 - 	
   1.770 - };
   1.771 -@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
   1.772 - 				  const struct pci_device_id *pci_id)
   1.773 - {
   1.774 - 	int rc = 0;
   1.775 -+	int lo, hi;
   1.776 -+
   1.777 -+	hi = tpm_read_index(TPM_NSC_BASE0_HI);
   1.778 -+	lo = tpm_read_index(TPM_NSC_BASE0_LO);
   1.779 -+
   1.780 -+	tpm_nsc.base = (hi<<8) | lo;
   1.781 - 
   1.782 - 	if (pci_enable_device(pci_dev))
   1.783 - 		return -EIO;
   1.784 - 
   1.785 --	if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
   1.786 --		rc = -ENODEV;
   1.787 --		goto out_err;
   1.788 --	}
   1.789 --
   1.790 - 	/* verify that it is a National part (SID) */
   1.791 - 	if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
   1.792 - 		rc = -ENODEV;
   1.793 -diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
   1.794 ---- a/drivers/char/tty_ioctl.c
   1.795 -+++ b/drivers/char/tty_ioctl.c
   1.796 -@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
   1.797 - 			ld = tty_ldisc_ref(tty);
   1.798 - 			switch (arg) {
   1.799 - 			case TCIFLUSH:
   1.800 --				if (ld->flush_buffer)
   1.801 -+				if (ld && ld->flush_buffer)
   1.802 - 					ld->flush_buffer(tty);
   1.803 - 				break;
   1.804 - 			case TCIOFLUSH:
   1.805 --				if (ld->flush_buffer)
   1.806 -+				if (ld && ld->flush_buffer)
   1.807 - 					ld->flush_buffer(tty);
   1.808 - 				/* fall through */
   1.809 - 			case TCOFLUSH:
   1.810 -diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
   1.811 ---- a/drivers/media/video/cx88/cx88-video.c
   1.812 -+++ b/drivers/media/video/cx88/cx88-video.c
   1.813 -@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
   1.814 - 			.default_value = 0,
   1.815 - 			.type          = V4L2_CTRL_TYPE_INTEGER,
   1.816 - 		},
   1.817 --		.off                   = 0,
   1.818 -+		.off                   = 128,
   1.819 - 		.reg                   = MO_HUE,
   1.820 - 		.mask                  = 0x00ff,
   1.821 - 		.shift                 = 0,
   1.822 -diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
   1.823 ---- a/drivers/net/e1000/e1000_main.c
   1.824 -+++ b/drivers/net/e1000/e1000_main.c
   1.825 -@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
   1.826 - 	tso = e1000_tso(adapter, skb);
   1.827 - 	if (tso < 0) {
   1.828 - 		dev_kfree_skb_any(skb);
   1.829 -+		spin_unlock_irqrestore(&adapter->tx_lock, flags);
   1.830 - 		return NETDEV_TX_OK;
   1.831 - 	}
   1.832 - 
   1.833 -diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
   1.834 ---- a/drivers/net/hamradio/Kconfig
   1.835 -+++ b/drivers/net/hamradio/Kconfig
   1.836 -@@ -17,7 +17,7 @@ config MKISS
   1.837 - 
   1.838 - config 6PACK
   1.839 - 	tristate "Serial port 6PACK driver"
   1.840 --	depends on AX25 && BROKEN_ON_SMP
   1.841 -+	depends on AX25
   1.842 - 	---help---
   1.843 - 	  6pack is a transmission protocol for the data exchange between your
   1.844 - 	  PC and your TNC (the Terminal Node Controller acts as a kind of
   1.845 -diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
   1.846 ---- a/drivers/net/shaper.c
   1.847 -+++ b/drivers/net/shaper.c
   1.848 -@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
   1.849 - {
   1.850 - 	struct shaper *shaper = dev->priv;
   1.851 -  	struct sk_buff *ptr;
   1.852 --   
   1.853 --	if (down_trylock(&shaper->sem))
   1.854 --		return -1;
   1.855 - 
   1.856 -+	spin_lock(&shaper->lock);
   1.857 -  	ptr=shaper->sendq.prev;
   1.858 -  	
   1.859 -  	/*
   1.860 -@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
   1.861 -                 shaper->stats.collisions++;
   1.862 -  	}
   1.863 - 	shaper_kick(shaper);
   1.864 --	up(&shaper->sem);
   1.865 -+	spin_unlock(&shaper->lock);
   1.866 -  	return 0;
   1.867 - }
   1.868 - 
   1.869 -@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
   1.870 - {
   1.871 - 	struct shaper *shaper = (struct shaper *)data;
   1.872 - 
   1.873 --	if (!down_trylock(&shaper->sem)) {
   1.874 --		shaper_kick(shaper);
   1.875 --		up(&shaper->sem);
   1.876 --	} else
   1.877 --		mod_timer(&shaper->timer, jiffies);
   1.878 -+	spin_lock(&shaper->lock);
   1.879 -+	shaper_kick(shaper);
   1.880 -+	spin_unlock(&shaper->lock);
   1.881 - }
   1.882 - 
   1.883 - /*
   1.884 -@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
   1.885 - 
   1.886 - 
   1.887 - /*
   1.888 -- *	Flush the shaper queues on a closedown
   1.889 -- */
   1.890 -- 
   1.891 --static void shaper_flush(struct shaper *shaper)
   1.892 --{
   1.893 --	struct sk_buff *skb;
   1.894 --
   1.895 --	down(&shaper->sem);
   1.896 --	while((skb=skb_dequeue(&shaper->sendq))!=NULL)
   1.897 --		dev_kfree_skb(skb);
   1.898 --	shaper_kick(shaper);
   1.899 --	up(&shaper->sem);
   1.900 --}
   1.901 --
   1.902 --/*
   1.903 -  *	Bring the interface up. We just disallow this until a 
   1.904 -  *	bind.
   1.905 -  */
   1.906 -@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
   1.907 - static int shaper_close(struct net_device *dev)
   1.908 - {
   1.909 - 	struct shaper *shaper=dev->priv;
   1.910 --	shaper_flush(shaper);
   1.911 -+	struct sk_buff *skb;
   1.912 -+
   1.913 -+	while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
   1.914 -+		dev_kfree_skb(skb);
   1.915 -+
   1.916 -+	spin_lock_bh(&shaper->lock);
   1.917 -+	shaper_kick(shaper);
   1.918 -+	spin_unlock_bh(&shaper->lock);
   1.919 -+
   1.920 - 	del_timer_sync(&shaper->timer);
   1.921 - 	return 0;
   1.922 - }
   1.923 -@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
   1.924 - 	init_timer(&sh->timer);
   1.925 - 	sh->timer.function=shaper_timer;
   1.926 - 	sh->timer.data=(unsigned long)sh;
   1.927 -+	spin_lock_init(&sh->lock);
   1.928 - }
   1.929 - 
   1.930 - /*
   1.931 -diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
   1.932 ---- a/drivers/pci/pci-driver.c
   1.933 -+++ b/drivers/pci/pci-driver.c
   1.934 -@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
   1.935 - 	/* FIXME, once all of the existing PCI drivers have been fixed to set
   1.936 - 	 * the pci shutdown function, this test can go away. */
   1.937 - 	if (!drv->driver.shutdown)
   1.938 --		drv->driver.shutdown = pci_device_shutdown,
   1.939 -+		drv->driver.shutdown = pci_device_shutdown;
   1.940 - 	drv->driver.owner = drv->owner;
   1.941 - 	drv->driver.kobj.ktype = &pci_driver_kobj_type;
   1.942 - 	pci_init_dynids(&drv->dynids);
   1.943 -diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
   1.944 ---- a/drivers/scsi/qla2xxx/qla_init.c
   1.945 -+++ b/drivers/scsi/qla2xxx/qla_init.c
   1.946 -@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
   1.947 - 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
   1.948 - 
   1.949 - 	fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
   1.950 --	if (!rport)
   1.951 -+	if (!rport) {
   1.952 - 		qla_printk(KERN_WARNING, ha,
   1.953 - 		    "Unable to allocate fc remote port!\n");
   1.954 -+		return;
   1.955 -+	}
   1.956 - 
   1.957 - 	if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
   1.958 - 		fcport->os_target_id = rport->scsi_target_id;
   1.959 -diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
   1.960 ---- a/drivers/scsi/qla2xxx/qla_os.c
   1.961 -+++ b/drivers/scsi/qla2xxx/qla_os.c
   1.962 -@@ -1150,7 +1150,7 @@ iospace_error_exit:
   1.963 -  */
   1.964 - int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
   1.965 - {
   1.966 --	int	ret;
   1.967 -+	int	ret = -ENODEV;
   1.968 - 	device_reg_t __iomem *reg;
   1.969 - 	struct Scsi_Host *host;
   1.970 - 	scsi_qla_host_t *ha;
   1.971 -@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
   1.972 - 	fc_port_t *fcport;
   1.973 - 
   1.974 - 	if (pci_enable_device(pdev))
   1.975 --		return -1;
   1.976 -+		goto probe_out;
   1.977 - 
   1.978 - 	host = scsi_host_alloc(&qla2x00_driver_template,
   1.979 - 	    sizeof(scsi_qla_host_t));
   1.980 -@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
   1.981 - 
   1.982 - 	/* Configure PCI I/O space */
   1.983 - 	ret = qla2x00_iospace_config(ha);
   1.984 --	if (ret != 0) {
   1.985 --		goto probe_alloc_failed;
   1.986 --	}
   1.987 -+	if (ret)
   1.988 -+		goto probe_failed;
   1.989 - 
   1.990 - 	/* Sanitize the information from PCI BIOS. */
   1.991 - 	host->irq = pdev->irq;
   1.992 -@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
   1.993 - 		qla_printk(KERN_WARNING, ha,
   1.994 - 		    "[ERROR] Failed to allocate memory for adapter\n");
   1.995 - 
   1.996 --		goto probe_alloc_failed;
   1.997 -+		ret = -ENOMEM;
   1.998 -+		goto probe_failed;
   1.999 - 	}
  1.1000 - 
  1.1001 --	pci_set_drvdata(pdev, ha);
  1.1002 --	host->this_id = 255;
  1.1003 --	host->cmd_per_lun = 3;
  1.1004 --	host->unique_id = ha->instance;
  1.1005 --	host->max_cmd_len = MAX_CMDSZ;
  1.1006 --	host->max_channel = ha->ports - 1;
  1.1007 --	host->max_id = ha->max_targets;
  1.1008 --	host->max_lun = ha->max_luns;
  1.1009 --	host->transportt = qla2xxx_transport_template;
  1.1010 --	if (scsi_add_host(host, &pdev->dev))
  1.1011 --		goto probe_alloc_failed;
  1.1012 --
  1.1013 --	qla2x00_alloc_sysfs_attr(ha);
  1.1014 --
  1.1015 - 	if (qla2x00_initialize_adapter(ha) &&
  1.1016 - 	    !(ha->device_flags & DFLG_NO_CABLE)) {
  1.1017 - 
  1.1018 -@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
  1.1019 - 		    "Adapter flags %x.\n",
  1.1020 - 		    ha->host_no, ha->device_flags));
  1.1021 - 
  1.1022 -+		ret = -ENODEV;
  1.1023 - 		goto probe_failed;
  1.1024 - 	}
  1.1025 - 
  1.1026 --	qla2x00_init_host_attr(ha);
  1.1027 --
  1.1028 - 	/*
  1.1029 - 	 * Startup the kernel thread for this host adapter
  1.1030 - 	 */
  1.1031 -@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
  1.1032 - 		qla_printk(KERN_WARNING, ha,
  1.1033 - 		    "Unable to start DPC thread!\n");
  1.1034 - 
  1.1035 -+		ret = -ENODEV;
  1.1036 - 		goto probe_failed;
  1.1037 - 	}
  1.1038 - 	wait_for_completion(&ha->dpc_inited);
  1.1039 - 
  1.1040 -+	host->this_id = 255;
  1.1041 -+	host->cmd_per_lun = 3;
  1.1042 -+	host->unique_id = ha->instance;
  1.1043 -+	host->max_cmd_len = MAX_CMDSZ;
  1.1044 -+	host->max_channel = ha->ports - 1;
  1.1045 -+	host->max_lun = MAX_LUNS;
  1.1046 -+	host->transportt = qla2xxx_transport_template;
  1.1047 -+
  1.1048 - 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
  1.1049 - 		ret = request_irq(host->irq, qla2100_intr_handler,
  1.1050 - 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
  1.1051 - 	else
  1.1052 - 		ret = request_irq(host->irq, qla2300_intr_handler,
  1.1053 - 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
  1.1054 --	if (ret != 0) {
  1.1055 -+	if (ret) {
  1.1056 - 		qla_printk(KERN_WARNING, ha,
  1.1057 - 		    "Failed to reserve interrupt %d already in use.\n",
  1.1058 - 		    host->irq);
  1.1059 -@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
  1.1060 - 		msleep(10);
  1.1061 - 	}
  1.1062 - 
  1.1063 -+	pci_set_drvdata(pdev, ha);
  1.1064 - 	ha->flags.init_done = 1;
  1.1065 - 	num_hosts++;
  1.1066 - 
  1.1067 -+	ret = scsi_add_host(host, &pdev->dev);
  1.1068 -+	if (ret)
  1.1069 -+		goto probe_failed;
  1.1070 -+
  1.1071 -+	qla2x00_alloc_sysfs_attr(ha);
  1.1072 -+
  1.1073 -+	qla2x00_init_host_attr(ha);
  1.1074 -+
  1.1075 - 	qla_printk(KERN_INFO, ha, "\n"
  1.1076 - 	    " QLogic Fibre Channel HBA Driver: %s\n"
  1.1077 - 	    "  QLogic %s - %s\n"
  1.1078 -@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
  1.1079 - probe_failed:
  1.1080 - 	fc_remove_host(ha->host);
  1.1081 - 
  1.1082 --	scsi_remove_host(host);
  1.1083 --
  1.1084 --probe_alloc_failed:
  1.1085 - 	qla2x00_free_device(ha);
  1.1086 - 
  1.1087 - 	scsi_host_put(host);
  1.1088 -@@ -1394,7 +1394,8 @@ probe_alloc_failed:
  1.1089 - probe_disable_device:
  1.1090 - 	pci_disable_device(pdev);
  1.1091 - 
  1.1092 --	return -1;
  1.1093 -+probe_out:
  1.1094 -+	return ret;
  1.1095 - }
  1.1096 - EXPORT_SYMBOL_GPL(qla2x00_probe_one);
  1.1097 - 
  1.1098 -diff --git a/fs/bio.c b/fs/bio.c
  1.1099 ---- a/fs/bio.c
  1.1100 -+++ b/fs/bio.c
  1.1101 -@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
  1.1102 - 	 */
  1.1103 - 	bio->bi_vcnt = bio_src->bi_vcnt;
  1.1104 - 	bio->bi_size = bio_src->bi_size;
  1.1105 -+	bio->bi_idx = bio_src->bi_idx;
  1.1106 - 	bio_phys_segments(q, bio);
  1.1107 - 	bio_hw_segments(q, bio);
  1.1108 - }
  1.1109 -diff --git a/fs/char_dev.c b/fs/char_dev.c
  1.1110 ---- a/fs/char_dev.c
  1.1111 -+++ b/fs/char_dev.c
  1.1112 -@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
  1.1113 - 	struct char_device_struct *cd = NULL, **cp;
  1.1114 - 	int i = major_to_index(major);
  1.1115 - 
  1.1116 --	up(&chrdevs_lock);
  1.1117 -+	down(&chrdevs_lock);
  1.1118 - 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
  1.1119 - 		if ((*cp)->major == major &&
  1.1120 - 		    (*cp)->baseminor == baseminor &&
  1.1121 -diff --git a/fs/exec.c b/fs/exec.c
  1.1122 ---- a/fs/exec.c
  1.1123 -+++ b/fs/exec.c
  1.1124 -@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
  1.1125 - 	}
  1.1126 - 	sig->group_exit_task = NULL;
  1.1127 - 	sig->notify_count = 0;
  1.1128 -+	sig->real_timer.data = (unsigned long)current;
  1.1129 - 	spin_unlock_irq(lock);
  1.1130 - 
  1.1131 - 	/*
  1.1132 -diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
  1.1133 ---- a/fs/isofs/compress.c
  1.1134 -+++ b/fs/isofs/compress.c
  1.1135 -@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
  1.1136 - 	cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
  1.1137 - 	brelse(bh);
  1.1138 - 
  1.1139 -+	if (cstart > cend)
  1.1140 -+		goto eio;
  1.1141 -+		
  1.1142 - 	csize = cend-cstart;
  1.1143 - 
  1.1144 -+	if (csize > deflateBound(1UL << zisofs_block_shift))
  1.1145 -+		goto eio;
  1.1146 -+
  1.1147 - 	/* Now page[] contains an array of pages, any of which can be NULL,
  1.1148 - 	   and the locks on which we hold.  We should now read the data and
  1.1149 - 	   release the pages.  If the pages are NULL the decompressed data
  1.1150 -diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
  1.1151 ---- a/include/asm-i386/string.h
  1.1152 -+++ b/include/asm-i386/string.h
  1.1153 -@@ -116,7 +116,8 @@ __asm__ __volatile__(
  1.1154 - 	"orb $1,%%al\n"
  1.1155 - 	"3:"
  1.1156 - 	:"=a" (__res), "=&S" (d0), "=&D" (d1)
  1.1157 --		     :"1" (cs),"2" (ct));
  1.1158 -+	:"1" (cs),"2" (ct)
  1.1159 -+	:"memory");
  1.1160 - return __res;
  1.1161 - }
  1.1162 - 
  1.1163 -@@ -138,8 +139,9 @@ __asm__ __volatile__(
  1.1164 - 	"3:\tsbbl %%eax,%%eax\n\t"
  1.1165 - 	"orb $1,%%al\n"
  1.1166 - 	"4:"
  1.1167 --		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
  1.1168 --		     :"1" (cs),"2" (ct),"3" (count));
  1.1169 -+	:"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
  1.1170 -+	:"1" (cs),"2" (ct),"3" (count)
  1.1171 -+	:"memory");
  1.1172 - return __res;
  1.1173 - }
  1.1174 - 
  1.1175 -@@ -158,7 +160,9 @@ __asm__ __volatile__(
  1.1176 - 	"movl $1,%1\n"
  1.1177 - 	"2:\tmovl %1,%0\n\t"
  1.1178 - 	"decl %0"
  1.1179 --	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
  1.1180 -+	:"=a" (__res), "=&S" (d0)
  1.1181 -+	:"1" (s),"0" (c)
  1.1182 -+	:"memory");
  1.1183 - return __res;
  1.1184 - }
  1.1185 - 
  1.1186 -@@ -175,7 +179,9 @@ __asm__ __volatile__(
  1.1187 - 	"leal -1(%%esi),%0\n"
  1.1188 - 	"2:\ttestb %%al,%%al\n\t"
  1.1189 - 	"jne 1b"
  1.1190 --	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
  1.1191 -+	:"=g" (__res), "=&S" (d0), "=&a" (d1)
  1.1192 -+	:"0" (0),"1" (s),"2" (c)
  1.1193 -+	:"memory");
  1.1194 - return __res;
  1.1195 - }
  1.1196 - 
  1.1197 -@@ -189,7 +195,9 @@ __asm__ __volatile__(
  1.1198 - 	"scasb\n\t"
  1.1199 - 	"notl %0\n\t"
  1.1200 - 	"decl %0"
  1.1201 --	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
  1.1202 -+	:"=c" (__res), "=&D" (d0)
  1.1203 -+	:"1" (s),"a" (0), "0" (0xffffffffu)
  1.1204 -+	:"memory");
  1.1205 - return __res;
  1.1206 - }
  1.1207 - 
  1.1208 -@@ -333,7 +341,9 @@ __asm__ __volatile__(
  1.1209 - 	"je 1f\n\t"
  1.1210 - 	"movl $1,%0\n"
  1.1211 - 	"1:\tdecl %0"
  1.1212 --	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
  1.1213 -+	:"=D" (__res), "=&c" (d0)
  1.1214 -+	:"a" (c),"0" (cs),"1" (count)
  1.1215 -+	:"memory");
  1.1216 - return __res;
  1.1217 - }
  1.1218 - 
  1.1219 -@@ -369,7 +379,7 @@ __asm__ __volatile__(
  1.1220 - 	"je 2f\n\t"
  1.1221 - 	"stosb\n"
  1.1222 - 	"2:"
  1.1223 --	: "=&c" (d0), "=&D" (d1)
  1.1224 -+	:"=&c" (d0), "=&D" (d1)
  1.1225 - 	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
  1.1226 - 	:"memory");
  1.1227 - return (s);	
  1.1228 -@@ -392,7 +402,8 @@ __asm__ __volatile__(
  1.1229 - 	"jne 1b\n"
  1.1230 - 	"3:\tsubl %2,%0"
  1.1231 - 	:"=a" (__res), "=&d" (d0)
  1.1232 --	:"c" (s),"1" (count));
  1.1233 -+	:"c" (s),"1" (count)
  1.1234 -+	:"memory");
  1.1235 - return __res;
  1.1236 - }
  1.1237 - /* end of additional stuff */
  1.1238 -@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
  1.1239 - 		"dec %%edi\n"
  1.1240 - 		"1:"
  1.1241 - 		: "=D" (addr), "=c" (size)
  1.1242 --		: "0" (addr), "1" (size), "a" (c));
  1.1243 -+		: "0" (addr), "1" (size), "a" (c)
  1.1244 -+		: "memory");
  1.1245 - 	return addr;
  1.1246 - }
  1.1247 - 
  1.1248 -diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
  1.1249 ---- a/include/asm-x86_64/smp.h
  1.1250 -+++ b/include/asm-x86_64/smp.h
  1.1251 -@@ -46,6 +46,8 @@ extern int pic_mode;
  1.1252 - extern int smp_num_siblings;
  1.1253 - extern void smp_flush_tlb(void);
  1.1254 - extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
  1.1255 -+extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
  1.1256 -+				     int retry, int wait);
  1.1257 - extern void smp_send_reschedule(int cpu);
  1.1258 - extern void smp_invalidate_rcv(void);		/* Process an NMI */
  1.1259 - extern void zap_low_mappings(void);
  1.1260 -diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
  1.1261 ---- a/include/linux/if_shaper.h
  1.1262 -+++ b/include/linux/if_shaper.h
  1.1263 -@@ -23,7 +23,7 @@ struct shaper
  1.1264 - 	__u32 shapeclock;
  1.1265 - 	unsigned long recovery;	/* Time we can next clock a packet out on
  1.1266 - 				   an empty queue */
  1.1267 --	struct semaphore sem;
  1.1268 -+	spinlock_t lock;
  1.1269 -         struct net_device_stats stats;
  1.1270 - 	struct net_device *dev;
  1.1271 - 	int  (*hard_start_xmit) (struct sk_buff *skb,
  1.1272 -diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
  1.1273 ---- a/include/linux/skbuff.h
  1.1274 -+++ b/include/linux/skbuff.h
  1.1275 -@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
  1.1276 - {
  1.1277 - 	int hlen = skb_headlen(skb);
  1.1278 - 
  1.1279 --	if (offset + len <= hlen)
  1.1280 -+	if (hlen - offset >= len)
  1.1281 - 		return skb->data + offset;
  1.1282 - 
  1.1283 - 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
  1.1284 -diff --git a/include/linux/zlib.h b/include/linux/zlib.h
  1.1285 ---- a/include/linux/zlib.h
  1.1286 -+++ b/include/linux/zlib.h
  1.1287 -@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
  1.1288 -    stream state was inconsistent (such as zalloc or state being NULL).
  1.1289 - */
  1.1290 - 
  1.1291 -+static inline unsigned long deflateBound(unsigned long s)
  1.1292 -+{
  1.1293 -+	return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
  1.1294 -+}
  1.1295 -+
  1.1296 - extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
  1.1297 - /*
  1.1298 -      Dynamically update the compression level and compression strategy.  The
  1.1299 -diff --git a/kernel/module.c b/kernel/module.c
  1.1300 ---- a/kernel/module.c
  1.1301 -+++ b/kernel/module.c
  1.1302 -@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
  1.1303 - /* Created by linker magic */
  1.1304 - extern char __per_cpu_start[], __per_cpu_end[];
  1.1305 - 
  1.1306 --static void *percpu_modalloc(unsigned long size, unsigned long align)
  1.1307 -+static void *percpu_modalloc(unsigned long size, unsigned long align,
  1.1308 -+			     const char *name)
  1.1309 - {
  1.1310 - 	unsigned long extra;
  1.1311 - 	unsigned int i;
  1.1312 - 	void *ptr;
  1.1313 - 
  1.1314 --	BUG_ON(align > SMP_CACHE_BYTES);
  1.1315 -+	if (align > SMP_CACHE_BYTES) {
  1.1316 -+		printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
  1.1317 -+		       name, align, SMP_CACHE_BYTES);
  1.1318 -+		align = SMP_CACHE_BYTES;
  1.1319 -+	}
  1.1320 - 
  1.1321 - 	ptr = __per_cpu_start;
  1.1322 - 	for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
  1.1323 -@@ -347,7 +352,8 @@ static int percpu_modinit(void)
  1.1324 - }	
  1.1325 - __initcall(percpu_modinit);
  1.1326 - #else /* ... !CONFIG_SMP */
  1.1327 --static inline void *percpu_modalloc(unsigned long size, unsigned long align)
  1.1328 -+static inline void *percpu_modalloc(unsigned long size, unsigned long align,
  1.1329 -+				    const char *name)
  1.1330 - {
  1.1331 - 	return NULL;
  1.1332 - }
  1.1333 -@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
  1.1334 - 	if (pcpuindex) {
  1.1335 - 		/* We have a special allocation for this section. */
  1.1336 - 		percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
  1.1337 --					 sechdrs[pcpuindex].sh_addralign);
  1.1338 -+					 sechdrs[pcpuindex].sh_addralign,
  1.1339 -+					 mod->name);
  1.1340 - 		if (!percpu) {
  1.1341 - 			err = -ENOMEM;
  1.1342 - 			goto free_mod;
  1.1343 -diff --git a/lib/inflate.c b/lib/inflate.c
  1.1344 ---- a/lib/inflate.c
  1.1345 -+++ b/lib/inflate.c
  1.1346 -@@ -326,7 +326,7 @@ DEBG("huft1 ");
  1.1347 -   {
  1.1348 -     *t = (struct huft *)NULL;
  1.1349 -     *m = 0;
  1.1350 --    return 0;
  1.1351 -+    return 2;
  1.1352 -   }
  1.1353 - 
  1.1354 - DEBG("huft2 ");
  1.1355 -@@ -374,6 +374,7 @@ DEBG("huft5 ");
  1.1356 -     if ((j = *p++) != 0)
  1.1357 -       v[x[j]++] = i;
  1.1358 -   } while (++i < n);
  1.1359 -+  n = x[g];                   /* set n to length of v */
  1.1360 - 
  1.1361 - DEBG("h6 ");
  1.1362 - 
  1.1363 -@@ -410,12 +411,13 @@ DEBG1("1 ");
  1.1364 - DEBG1("2 ");
  1.1365 -           f -= a + 1;           /* deduct codes from patterns left */
  1.1366 -           xp = c + k;
  1.1367 --          while (++j < z)       /* try smaller tables up to z bits */
  1.1368 --          {
  1.1369 --            if ((f <<= 1) <= *++xp)
  1.1370 --              break;            /* enough codes to use up j bits */
  1.1371 --            f -= *xp;           /* else deduct codes from patterns */
  1.1372 --          }
  1.1373 -+          if (j < z)
  1.1374 -+            while (++j < z)       /* try smaller tables up to z bits */
  1.1375 -+            {
  1.1376 -+              if ((f <<= 1) <= *++xp)
  1.1377 -+                break;            /* enough codes to use up j bits */
  1.1378 -+              f -= *xp;           /* else deduct codes from patterns */
  1.1379 -+            }
  1.1380 -         }
  1.1381 - DEBG1("3 ");
  1.1382 -         z = 1 << j;             /* table entries for j-bit table */
  1.1383 -diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
  1.1384 ---- a/lib/zlib_inflate/inftrees.c
  1.1385 -+++ b/lib/zlib_inflate/inftrees.c
  1.1386 -@@ -141,7 +141,7 @@ static int huft_build(
  1.1387 -   {
  1.1388 -     *t = NULL;
  1.1389 -     *m = 0;
  1.1390 --    return Z_OK;
  1.1391 -+    return Z_DATA_ERROR;
  1.1392 -   }
  1.1393 - 
  1.1394 - 
  1.1395 -diff --git a/mm/memory.c b/mm/memory.c
  1.1396 ---- a/mm/memory.c
  1.1397 -+++ b/mm/memory.c
  1.1398 -@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
  1.1399 - {
  1.1400 - 	pgd_t *pgd;
  1.1401 - 	unsigned long next;
  1.1402 --	unsigned long end = addr + size;
  1.1403 -+	unsigned long end = addr + PAGE_ALIGN(size);
  1.1404 - 	struct mm_struct *mm = vma->vm_mm;
  1.1405 - 	int err;
  1.1406 - 
  1.1407 -diff --git a/mm/mempolicy.c b/mm/mempolicy.c
  1.1408 ---- a/mm/mempolicy.c
  1.1409 -+++ b/mm/mempolicy.c
  1.1410 -@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
  1.1411 - 	struct mempolicy *new;
  1.1412 - 	DECLARE_BITMAP(nodes, MAX_NUMNODES);
  1.1413 - 
  1.1414 --	if (mode > MPOL_MAX)
  1.1415 -+	if (mode < 0 || mode > MPOL_MAX)
  1.1416 - 		return -EINVAL;
  1.1417 - 	err = get_nodes(nodes, nmask, maxnode, mode);
  1.1418 - 	if (err)
  1.1419 -diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
  1.1420 ---- a/net/8021q/vlan.c
  1.1421 -+++ b/net/8021q/vlan.c
  1.1422 -@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
  1.1423 - 			if (!vlandev)
  1.1424 - 				continue;
  1.1425 - 
  1.1426 -+			if (netif_carrier_ok(dev)) {
  1.1427 -+				if (!netif_carrier_ok(vlandev))
  1.1428 -+					netif_carrier_on(vlandev);
  1.1429 -+			} else {
  1.1430 -+				if (netif_carrier_ok(vlandev))
  1.1431 -+					netif_carrier_off(vlandev);
  1.1432 -+			}
  1.1433 -+
  1.1434 - 			if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
  1.1435 - 				vlandev->state = (vlandev->state &~ VLAN_LINK_STATE_MASK) 
  1.1436 - 					| flgs;
  1.1437 -diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
  1.1438 ---- a/net/ipv4/ip_output.c
  1.1439 -+++ b/net/ipv4/ip_output.c
  1.1440 -@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
  1.1441 - #ifdef CONFIG_NETFILTER_DEBUG
  1.1442 - 	nf_debug_ip_loopback_xmit(newskb);
  1.1443 - #endif
  1.1444 --	nf_reset(newskb);
  1.1445 - 	netif_rx(newskb);
  1.1446 - 	return 0;
  1.1447 - }
  1.1448 -@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
  1.1449 - 	nf_debug_ip_finish_output2(skb);
  1.1450 - #endif /*CONFIG_NETFILTER_DEBUG*/
  1.1451 - 
  1.1452 --	nf_reset(skb);
  1.1453 --
  1.1454 - 	if (hh) {
  1.1455 - 		int hh_alen;
  1.1456 - 
  1.1457 -diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
  1.1458 ---- a/net/ipv4/netfilter/ip_conntrack_core.c
  1.1459 -+++ b/net/ipv4/netfilter/ip_conntrack_core.c
  1.1460 -@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
  1.1461 - 		schedule();
  1.1462 - 		goto i_see_dead_people;
  1.1463 - 	}
  1.1464 -+	/* wait until all references to ip_conntrack_untracked are dropped */
  1.1465 -+	while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
  1.1466 -+		schedule();
  1.1467 - 
  1.1468 - 	kmem_cache_destroy(ip_conntrack_cachep);
  1.1469 - 	kmem_cache_destroy(ip_conntrack_expect_cachep);
  1.1470 -diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
  1.1471 ---- a/net/ipv4/netfilter/ip_conntrack_standalone.c
  1.1472 -+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
  1.1473 -@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
  1.1474 - 				        const struct net_device *out,
  1.1475 - 				        int (*okfn)(struct sk_buff *))
  1.1476 - {
  1.1477 -+#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
  1.1478 -+	/* Previously seen (loopback)?  Ignore.  Do this before
  1.1479 -+           fragment check. */
  1.1480 -+	if ((*pskb)->nfct)
  1.1481 -+		return NF_ACCEPT;
  1.1482 -+#endif
  1.1483 -+
  1.1484 - 	/* Gather fragments. */
  1.1485 - 	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
  1.1486 - 		*pskb = ip_ct_gather_frags(*pskb,
  1.1487 -diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
  1.1488 ---- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
  1.1489 -+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
  1.1490 -@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
  1.1491 - 		 enum ip_nat_manip_type maniptype,
  1.1492 - 		 const struct ip_conntrack *conntrack)
  1.1493 - {
  1.1494 --	static u_int16_t port, *portptr;
  1.1495 -+	static u_int16_t port;
  1.1496 -+	u_int16_t *portptr;
  1.1497 - 	unsigned int range_size, min, i;
  1.1498 - 
  1.1499 - 	if (maniptype == IP_NAT_MANIP_SRC)
  1.1500 -diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
  1.1501 ---- a/net/ipv4/netfilter/ip_nat_proto_udp.c
  1.1502 -+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
  1.1503 -@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
  1.1504 - 		 enum ip_nat_manip_type maniptype,
  1.1505 - 		 const struct ip_conntrack *conntrack)
  1.1506 - {
  1.1507 --	static u_int16_t port, *portptr;
  1.1508 -+	static u_int16_t port;
  1.1509 -+	u_int16_t *portptr;
  1.1510 - 	unsigned int range_size, min, i;
  1.1511 - 
  1.1512 - 	if (maniptype == IP_NAT_MANIP_SRC)
  1.1513 -diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
  1.1514 ---- a/net/ipv6/netfilter/ip6_queue.c
  1.1515 -+++ b/net/ipv6/netfilter/ip6_queue.c
  1.1516 -@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
  1.1517 - static void
  1.1518 - ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
  1.1519 - {
  1.1520 -+	local_bh_disable();
  1.1521 - 	nf_reinject(entry->skb, entry->info, verdict);
  1.1522 -+	local_bh_enable();
  1.1523 - 	kfree(entry);
  1.1524 - }
  1.1525 - 
  1.1526 -diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
  1.1527 ---- a/net/netlink/af_netlink.c
  1.1528 -+++ b/net/netlink/af_netlink.c
  1.1529 -@@ -315,8 +315,8 @@ err:
  1.1530 - static void netlink_remove(struct sock *sk)
  1.1531 - {
  1.1532 - 	netlink_table_grab();
  1.1533 --	nl_table[sk->sk_protocol].hash.entries--;
  1.1534 --	sk_del_node_init(sk);
  1.1535 -+	if (sk_del_node_init(sk))
  1.1536 -+		nl_table[sk->sk_protocol].hash.entries--;
  1.1537 - 	if (nlk_sk(sk)->groups)
  1.1538 - 		__sk_del_bind_node(sk);
  1.1539 - 	netlink_table_ungrab();
  1.1540 -@@ -429,7 +429,12 @@ retry:
  1.1541 - 	err = netlink_insert(sk, pid);
  1.1542 - 	if (err == -EADDRINUSE)
  1.1543 - 		goto retry;
  1.1544 --	return 0;
  1.1545 -+
  1.1546 -+	/* If 2 threads race to autobind, that is fine.  */
  1.1547 -+	if (err == -EBUSY)
  1.1548 -+		err = 0;
  1.1549 -+
  1.1550 -+	return err;
  1.1551 - }
  1.1552 - 
  1.1553 - static inline int netlink_capable(struct socket *sock, unsigned int flag) 
  1.1554 -diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
  1.1555 ---- a/net/packet/af_packet.c
  1.1556 -+++ b/net/packet/af_packet.c
  1.1557 -@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
  1.1558 - 	dst_release(skb->dst);
  1.1559 - 	skb->dst = NULL;
  1.1560 - 
  1.1561 -+	/* drop conntrack reference */
  1.1562 -+	nf_reset(skb);
  1.1563 -+
  1.1564 - 	spkt = (struct sockaddr_pkt*)skb->cb;
  1.1565 - 
  1.1566 - 	skb_push(skb, skb->data-skb->mac.raw);
  1.1567 -@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
  1.1568 - 	dst_release(skb->dst);
  1.1569 - 	skb->dst = NULL;
  1.1570 - 
  1.1571 -+	/* drop conntrack reference */
  1.1572 -+	nf_reset(skb);
  1.1573 -+
  1.1574 - 	spin_lock(&sk->sk_receive_queue.lock);
  1.1575 - 	po->stats.tp_packets++;
  1.1576 - 	__skb_queue_tail(&sk->sk_receive_queue, skb);
  1.1577 -diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
  1.1578 ---- a/net/xfrm/xfrm_user.c
  1.1579 -+++ b/net/xfrm/xfrm_user.c
  1.1580 -@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
  1.1581 - 	if (nr > XFRM_MAX_DEPTH)
  1.1582 - 		return NULL;
  1.1583 - 
  1.1584 -+	if (p->dir > XFRM_POLICY_OUT)
  1.1585 -+		return NULL;
  1.1586 -+
  1.1587 - 	xp = xfrm_policy_alloc(GFP_KERNEL);
  1.1588 - 	if (xp == NULL) {
  1.1589 - 		*dir = -ENOBUFS;
  1.1590 -diff --git a/security/keys/keyring.c b/security/keys/keyring.c
  1.1591 ---- a/security/keys/keyring.c
  1.1592 -+++ b/security/keys/keyring.c
  1.1593 -@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
  1.1594 - 
  1.1595 - 	if (keyring->description) {
  1.1596 - 		write_lock(&keyring_name_lock);
  1.1597 --		list_del(&keyring->type_data.link);
  1.1598 -+
  1.1599 -+		if (keyring->type_data.link.next != NULL &&
  1.1600 -+		    !list_empty(&keyring->type_data.link))
  1.1601 -+			list_del(&keyring->type_data.link);
  1.1602 -+
  1.1603 - 		write_unlock(&keyring_name_lock);
  1.1604 - 	}
  1.1605 - 
  1.1606 -diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
  1.1607 ---- a/security/keys/process_keys.c
  1.1608 -+++ b/security/keys/process_keys.c
  1.1609 -@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
  1.1610 - 		keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
  1.1611 - 		if (IS_ERR(keyring)) {
  1.1612 - 			ret = PTR_ERR(keyring);
  1.1613 --			goto error;
  1.1614 -+			goto error2;
  1.1615 - 		}
  1.1616 - 	}
  1.1617 - 	else if (IS_ERR(keyring)) {
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/patches/linux-2.6.12/2.6.12.6.patch	Fri Oct 21 10:46:30 2005 +0100
     2.3 @@ -0,0 +1,1738 @@
     2.4 +diff --git a/Makefile b/Makefile
     2.5 +--- a/Makefile
     2.6 ++++ b/Makefile
     2.7 +@@ -1,7 +1,7 @@
     2.8 + VERSION = 2
     2.9 + PATCHLEVEL = 6
    2.10 + SUBLEVEL = 12
    2.11 +-EXTRAVERSION =
    2.12 ++EXTRAVERSION = .6
    2.13 + NAME=Woozy Numbat
    2.14 + 
    2.15 + # *DOCUMENTATION*
    2.16 +@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
    2.17 + #(which is the most common case IMHO) to avoid unneeded clutter in the big tags file.
    2.18 + #Adding $(srctree) adds about 20M on i386 to the size of the output file!
    2.19 + 
    2.20 +-ifeq ($(KBUILD_OUTPUT),)
    2.21 ++ifeq ($(src),$(obj))
    2.22 + __srctree =
    2.23 + else
    2.24 + __srctree = $(srctree)/
    2.25 +diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    2.26 +--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    2.27 ++++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    2.28 +@@ -44,7 +44,7 @@
    2.29 + 
    2.30 + #define PFX "powernow-k8: "
    2.31 + #define BFX PFX "BIOS error: "
    2.32 +-#define VERSION "version 1.40.2"
    2.33 ++#define VERSION "version 1.40.4"
    2.34 + #include "powernow-k8.h"
    2.35 + 
    2.36 + /* serialize freq changes  */
    2.37 +@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
    2.38 + {
    2.39 + 	struct powernow_k8_data *data;
    2.40 + 	cpumask_t oldmask = CPU_MASK_ALL;
    2.41 +-	int rc;
    2.42 ++	int rc, i;
    2.43 + 
    2.44 + 	if (!check_supported_cpu(pol->cpu))
    2.45 + 		return -ENODEV;
    2.46 +@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
    2.47 + 	printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
    2.48 + 	       data->currfid, data->currvid);
    2.49 + 
    2.50 +-	powernow_data[pol->cpu] = data;
    2.51 ++	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
    2.52 ++		powernow_data[i] = data;
    2.53 ++	}
    2.54 + 
    2.55 + 	return 0;
    2.56 + 
    2.57 +diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
    2.58 +--- a/arch/i386/kernel/process.c
    2.59 ++++ b/arch/i386/kernel/process.c
    2.60 +@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
    2.61 + 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
    2.62 + 		return -EINVAL;
    2.63 + 
    2.64 ++	memset(&info, 0, sizeof(info));
    2.65 ++
    2.66 + 	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
    2.67 + 
    2.68 + 	info.entry_number = idx;
    2.69 +diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
    2.70 +--- a/arch/ia64/kernel/ptrace.c
    2.71 ++++ b/arch/ia64/kernel/ptrace.c
    2.72 +@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
    2.73 + 				*data = (pt->cr_ipsr & IPSR_MASK);
    2.74 + 			return 0;
    2.75 + 
    2.76 ++		      case PT_AR_RSC:
    2.77 ++			if (write_access)
    2.78 ++				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
    2.79 ++			else
    2.80 ++				*data = pt->ar_rsc;
    2.81 ++			return 0;
    2.82 ++
    2.83 + 		      case PT_AR_RNAT:
    2.84 + 			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
    2.85 + 			rnat_addr = (long) ia64_rse_rnat_addr((long *)
    2.86 +@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
    2.87 + 		      case PT_AR_BSPSTORE:
    2.88 + 			ptr = pt_reg_addr(pt, ar_bspstore);
    2.89 + 			break;
    2.90 +-		      case PT_AR_RSC:
    2.91 +-			ptr = pt_reg_addr(pt, ar_rsc);
    2.92 +-			break;
    2.93 + 		      case PT_AR_UNAT:
    2.94 + 			ptr = pt_reg_addr(pt, ar_unat);
    2.95 + 			break;
    2.96 +@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
    2.97 + static long
    2.98 + ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
    2.99 + {
   2.100 +-	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
   2.101 ++	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
   2.102 + 	struct unw_frame_info info;
   2.103 + 	struct switch_stack *sw;
   2.104 + 	struct ia64_fpreg fpval;
   2.105 +@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
   2.106 + 	/* app regs */
   2.107 + 
   2.108 + 	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
   2.109 +-	retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
   2.110 ++	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
   2.111 + 	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
   2.112 + 	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
   2.113 + 	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
   2.114 +@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
   2.115 + 	retval |= __get_user(nat_bits, &ppr->nat);
   2.116 + 
   2.117 + 	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
   2.118 ++	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
   2.119 + 	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
   2.120 + 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
   2.121 + 	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
   2.122 +diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
   2.123 +--- a/arch/ia64/kernel/signal.c
   2.124 ++++ b/arch/ia64/kernel/signal.c
   2.125 +@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
   2.126 + static long
   2.127 + restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
   2.128 + {
   2.129 +-	unsigned long ip, flags, nat, um, cfm;
   2.130 ++	unsigned long ip, flags, nat, um, cfm, rsc;
   2.131 + 	long err;
   2.132 + 
   2.133 + 	/* Always make any pending restarted system calls return -EINTR */
   2.134 +@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
   2.135 + 	err |= __get_user(ip, &sc->sc_ip);			/* instruction pointer */
   2.136 + 	err |= __get_user(cfm, &sc->sc_cfm);
   2.137 + 	err |= __get_user(um, &sc->sc_um);			/* user mask */
   2.138 +-	err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
   2.139 ++	err |= __get_user(rsc, &sc->sc_ar_rsc);
   2.140 + 	err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
   2.141 + 	err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
   2.142 + 	err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
   2.143 +@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
   2.144 + 	err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);	/* r15 */
   2.145 + 
   2.146 + 	scr->pt.cr_ifs = cfm | (1UL << 63);
   2.147 ++	scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
   2.148 + 
   2.149 + 	/* establish new instruction pointer: */
   2.150 + 	scr->pt.cr_iip = ip & ~0x3UL;
   2.151 +diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
   2.152 +--- a/arch/ppc/kernel/time.c
   2.153 ++++ b/arch/ppc/kernel/time.c
   2.154 +@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
   2.155 + 
   2.156 + extern unsigned long wall_jiffies;
   2.157 + 
   2.158 ++/* used for timezone offset */
   2.159 ++static long timezone_offset;
   2.160 ++
   2.161 + DEFINE_SPINLOCK(rtc_lock);
   2.162 + 
   2.163 + EXPORT_SYMBOL(rtc_lock);
   2.164 +@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
   2.165 + 		     xtime.tv_sec - last_rtc_update >= 659 &&
   2.166 + 		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
   2.167 + 		     jiffies - wall_jiffies == 1) {
   2.168 +-		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0)
   2.169 ++		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
   2.170 + 				last_rtc_update = xtime.tv_sec+1;
   2.171 + 			else
   2.172 + 				/* Try again one minute later */
   2.173 +@@ -286,7 +289,7 @@ void __init time_init(void)
   2.174 + 	unsigned old_stamp, stamp, elapsed;
   2.175 + 
   2.176 +         if (ppc_md.time_init != NULL)
   2.177 +-                time_offset = ppc_md.time_init();
   2.178 ++                timezone_offset = ppc_md.time_init();
   2.179 + 
   2.180 + 	if (__USE_RTC()) {
   2.181 + 		/* 601 processor: dec counts down by 128 every 128ns */
   2.182 +@@ -331,10 +334,10 @@ void __init time_init(void)
   2.183 + 	set_dec(tb_ticks_per_jiffy);
   2.184 + 
   2.185 + 	/* If platform provided a timezone (pmac), we correct the time */
   2.186 +-        if (time_offset) {
   2.187 +-		sys_tz.tz_minuteswest = -time_offset / 60;
   2.188 ++        if (timezone_offset) {
   2.189 ++		sys_tz.tz_minuteswest = -timezone_offset / 60;
   2.190 + 		sys_tz.tz_dsttime = 0;
   2.191 +-		xtime.tv_sec -= time_offset;
   2.192 ++		xtime.tv_sec -= timezone_offset;
   2.193 +         }
   2.194 +         set_normalized_timespec(&wall_to_monotonic,
   2.195 +                                 -xtime.tv_sec, -xtime.tv_nsec);
   2.196 +diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
   2.197 +--- a/arch/ppc64/boot/zlib.c
   2.198 ++++ b/arch/ppc64/boot/zlib.c
   2.199 +@@ -1307,7 +1307,7 @@ local int huft_build(
   2.200 +   {
   2.201 +     *t = (inflate_huft *)Z_NULL;
   2.202 +     *m = 0;
   2.203 +-    return Z_OK;
   2.204 ++    return Z_DATA_ERROR;
   2.205 +   }
   2.206 + 
   2.207 + 
   2.208 +@@ -1351,6 +1351,7 @@ local int huft_build(
   2.209 +     if ((j = *p++) != 0)
   2.210 +       v[x[j]++] = i;
   2.211 +   } while (++i < n);
   2.212 ++  n = x[g];			/* set n to length of v */
   2.213 + 
   2.214 + 
   2.215 +   /* Generate the Huffman codes and for each, make the table entries */
   2.216 +diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
   2.217 +--- a/arch/um/kernel/process.c
   2.218 ++++ b/arch/um/kernel/process.c
   2.219 +@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
   2.220 + 	return(arg.pid);
   2.221 + }
   2.222 + 
   2.223 +-static int ptrace_child(void)
   2.224 ++static int ptrace_child(void *arg)
   2.225 + {
   2.226 + 	int ret;
   2.227 + 	int pid = os_getpid(), ppid = getppid();
   2.228 +@@ -159,16 +159,20 @@ static int ptrace_child(void)
   2.229 + 	_exit(ret);
   2.230 + }
   2.231 + 
   2.232 +-static int start_ptraced_child(void)
   2.233 ++static int start_ptraced_child(void **stack_out)
   2.234 + {
   2.235 ++	void *stack;
   2.236 ++	unsigned long sp;
   2.237 + 	int pid, n, status;
   2.238 + 	
   2.239 +-	pid = fork();
   2.240 +-	if(pid == 0)
   2.241 +-		ptrace_child();
   2.242 +-
   2.243 ++	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
   2.244 ++		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   2.245 ++	if(stack == MAP_FAILED)
   2.246 ++		panic("check_ptrace : mmap failed, errno = %d", errno);
   2.247 ++	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
   2.248 ++	pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
   2.249 + 	if(pid < 0)
   2.250 +-		panic("check_ptrace : fork failed, errno = %d", errno);
   2.251 ++		panic("check_ptrace : clone failed, errno = %d", errno);
   2.252 + 	CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
   2.253 + 	if(n < 0)
   2.254 + 		panic("check_ptrace : wait failed, errno = %d", errno);
   2.255 +@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
   2.256 + 		panic("check_ptrace : expected SIGSTOP, got status = %d",
   2.257 + 		      status);
   2.258 + 
   2.259 ++	*stack_out = stack;
   2.260 + 	return(pid);
   2.261 + }
   2.262 + 
   2.263 +@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
   2.264 +  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
   2.265 +  * So only for SYSEMU features we test mustpanic, while normal host features
   2.266 +  * must work anyway!*/
   2.267 +-static int stop_ptraced_child(int pid, int exitcode, int mustexit)
   2.268 ++static int stop_ptraced_child(int pid, void *stack, int exitcode, int mustpanic)
   2.269 + {
   2.270 + 	int status, n, ret = 0;
   2.271 + 
   2.272 + 	if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
   2.273 +-		panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
   2.274 ++		panic("check_ptrace : ptrace failed, errno = %d", errno);
   2.275 + 	CATCH_EINTR(n = waitpid(pid, &status, 0));
   2.276 + 	if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
   2.277 + 		int exit_with = WEXITSTATUS(status);
   2.278 +@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
   2.279 + 		printk("check_ptrace : child exited with exitcode %d, while "
   2.280 + 		      "expecting %d; status 0x%x", exit_with,
   2.281 + 		      exitcode, status);
   2.282 +-		if (mustexit)
   2.283 ++		if (mustpanic)
   2.284 + 			panic("\n");
   2.285 + 		else
   2.286 + 			printk("\n");
   2.287 + 		ret = -1;
   2.288 + 	}
   2.289 + 
   2.290 ++	if(munmap(stack, PAGE_SIZE) < 0)
   2.291 ++		panic("check_ptrace : munmap failed, errno = %d", errno);
   2.292 + 	return ret;
   2.293 + }
   2.294 + 
   2.295 +@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
   2.296 + 
   2.297 + static void __init check_sysemu(void)
   2.298 + {
   2.299 ++	void *stack;
   2.300 + 	int pid, syscall, n, status, count=0;
   2.301 + 
   2.302 + 	printk("Checking syscall emulation patch for ptrace...");
   2.303 + 	sysemu_supported = 0;
   2.304 +-	pid = start_ptraced_child();
   2.305 ++	pid = start_ptraced_child(&stack);
   2.306 + 
   2.307 + 	if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
   2.308 + 		goto fail;
   2.309 +@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
   2.310 + 		panic("check_sysemu : failed to modify system "
   2.311 + 		      "call return, errno = %d", errno);
   2.312 + 
   2.313 +-	if (stop_ptraced_child(pid, 0, 0) < 0)
   2.314 ++	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
   2.315 + 		goto fail_stopped;
   2.316 + 
   2.317 + 	sysemu_supported = 1;
   2.318 +@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
   2.319 + 	set_using_sysemu(!force_sysemu_disabled);
   2.320 + 
   2.321 + 	printk("Checking advanced syscall emulation patch for ptrace...");
   2.322 +-	pid = start_ptraced_child();
   2.323 ++	pid = start_ptraced_child(&stack);
   2.324 + 	while(1){
   2.325 + 		count++;
   2.326 + 		if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
   2.327 +@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
   2.328 + 			break;
   2.329 + 		}
   2.330 + 	}
   2.331 +-	if (stop_ptraced_child(pid, 0, 0) < 0)
   2.332 ++	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
   2.333 + 		goto fail_stopped;
   2.334 + 
   2.335 + 	sysemu_supported = 2;
   2.336 +@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
   2.337 + 	return;
   2.338 + 
   2.339 + fail:
   2.340 +-	stop_ptraced_child(pid, 1, 0);
   2.341 ++	stop_ptraced_child(pid, stack, 1, 0);
   2.342 + fail_stopped:
   2.343 + 	printk("missing\n");
   2.344 + }
   2.345 + 
   2.346 + void __init check_ptrace(void)
   2.347 + {
   2.348 ++	void *stack;
   2.349 + 	int pid, syscall, n, status;
   2.350 + 
   2.351 + 	printk("Checking that ptrace can change system call numbers...");
   2.352 +-	pid = start_ptraced_child();
   2.353 ++	pid = start_ptraced_child(&stack);
   2.354 + 
   2.355 + 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) < 0)
   2.356 + 		panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", errno);
   2.357 +@@ -330,7 +339,7 @@ void __init check_ptrace(void)
   2.358 + 			break;
   2.359 + 		}
   2.360 + 	}
   2.361 +-	stop_ptraced_child(pid, 0, 1);
   2.362 ++	stop_ptraced_child(pid, stack, 0, 1);
   2.363 + 	printk("OK\n");
   2.364 + 	check_sysemu();
   2.365 + }
   2.366 +@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
   2.367 + static inline int check_skas3_ptrace_support(void)
   2.368 + {
   2.369 + 	struct ptrace_faultinfo fi;
   2.370 ++	void *stack;
   2.371 + 	int pid, n, ret = 1;
   2.372 + 
   2.373 + 	printf("Checking for the skas3 patch in the host...");
   2.374 +-	pid = start_ptraced_child();
   2.375 ++	pid = start_ptraced_child(&stack);
   2.376 + 
   2.377 + 	n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
   2.378 + 	if (n < 0) {
   2.379 +@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
   2.380 + 	}
   2.381 + 
   2.382 + 	init_registers(pid);
   2.383 +-	stop_ptraced_child(pid, 1, 1);
   2.384 ++	stop_ptraced_child(pid, stack, 1, 1);
   2.385 + 
   2.386 + 	return(ret);
   2.387 + }
   2.388 +diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
   2.389 +--- a/arch/x86_64/ia32/syscall32.c
   2.390 ++++ b/arch/x86_64/ia32/syscall32.c
   2.391 +@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
   2.392 + 	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
   2.393 + 	struct vm_area_struct *vma;
   2.394 + 	struct mm_struct *mm = current->mm;
   2.395 ++	int ret;
   2.396 + 
   2.397 + 	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
   2.398 + 	if (!vma)
   2.399 +@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
   2.400 + 	vma->vm_mm = mm;
   2.401 + 
   2.402 + 	down_write(&mm->mmap_sem);
   2.403 +-	insert_vm_struct(mm, vma);
   2.404 ++	if ((ret = insert_vm_struct(mm, vma))) {
   2.405 ++		up_write(&mm->mmap_sem);
   2.406 ++		kmem_cache_free(vm_area_cachep, vma);
   2.407 ++		return ret;
   2.408 ++	}
   2.409 + 	mm->total_vm += npages;
   2.410 + 	up_write(&mm->mmap_sem);
   2.411 + 	return 0;
   2.412 +diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
   2.413 +--- a/arch/x86_64/kernel/setup.c
   2.414 ++++ b/arch/x86_64/kernel/setup.c
   2.415 +@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
   2.416 + 	int cpu = smp_processor_id();
   2.417 + 	int node = 0;
   2.418 + 	unsigned bits;
   2.419 +-	if (c->x86_num_cores == 1)
   2.420 +-		return;
   2.421 + 
   2.422 + 	bits = 0;
   2.423 + 	while ((1 << bits) < c->x86_num_cores)
   2.424 +diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
   2.425 +--- a/arch/x86_64/kernel/smp.c
   2.426 ++++ b/arch/x86_64/kernel/smp.c
   2.427 +@@ -284,6 +284,71 @@ struct call_data_struct {
   2.428 + static struct call_data_struct * call_data;
   2.429 + 
   2.430 + /*
   2.431 ++ * this function sends a 'generic call function' IPI to one other CPU
   2.432 ++ * in the system.
   2.433 ++ */
   2.434 ++static void __smp_call_function_single (int cpu, void (*func) (void *info), void *info,
   2.435 ++				int nonatomic, int wait)
   2.436 ++{
   2.437 ++	struct call_data_struct data;
   2.438 ++	int cpus = 1;
   2.439 ++
   2.440 ++	data.func = func;
   2.441 ++	data.info = info;
   2.442 ++	atomic_set(&data.started, 0);
   2.443 ++	data.wait = wait;
   2.444 ++	if (wait)
   2.445 ++		atomic_set(&data.finished, 0);
   2.446 ++
   2.447 ++	call_data = &data;
   2.448 ++	wmb();
   2.449 ++	/* Send a message to all other CPUs and wait for them to respond */
   2.450 ++	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
   2.451 ++
   2.452 ++	/* Wait for response */
   2.453 ++	while (atomic_read(&data.started) != cpus)
   2.454 ++		cpu_relax();
   2.455 ++
   2.456 ++	if (!wait)
   2.457 ++		return;
   2.458 ++
   2.459 ++	while (atomic_read(&data.finished) != cpus)
   2.460 ++		cpu_relax();
   2.461 ++}
   2.462 ++
   2.463 ++/*
   2.464 ++ * Run a function on another CPU
   2.465 ++ *  <func>	The function to run. This must be fast and non-blocking.
   2.466 ++ *  <info>	An arbitrary pointer to pass to the function.
   2.467 ++ *  <nonatomic>	Currently unused.
   2.468 ++ *  <wait>	If true, wait until function has completed on other CPUs.
   2.469 ++ *  [RETURNS]   0 on success, else a negative status code.
   2.470 ++ *
   2.471 ++ * Does not return until the remote CPU is nearly ready to execute <func>
   2.472 ++ * or is or has executed.
   2.473 ++ */
   2.474 ++
   2.475 ++int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
   2.476 ++	int nonatomic, int wait)
   2.477 ++{
   2.478 ++	
   2.479 ++	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
   2.480 ++
   2.481 ++	if (cpu == me) {
   2.482 ++		printk("%s: trying to call self\n", __func__);
   2.483 ++		put_cpu();
   2.484 ++		return -EBUSY;
   2.485 ++	}
   2.486 ++	spin_lock_bh(&call_lock);
   2.487 ++
   2.488 ++	__smp_call_function_single(cpu, func,info,nonatomic,wait);	
   2.489 ++
   2.490 ++	spin_unlock_bh(&call_lock);
   2.491 ++	put_cpu();
   2.492 ++	return 0;
   2.493 ++}
   2.494 ++
   2.495 ++/*
   2.496 +  * this function sends a 'generic call function' IPI to all other CPUs
   2.497 +  * in the system.
   2.498 +  */
   2.499 +diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
   2.500 +--- a/arch/x86_64/kernel/smpboot.c
   2.501 ++++ b/arch/x86_64/kernel/smpboot.c
   2.502 +@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
   2.503 + {
   2.504 + 	unsigned long flags, i;
   2.505 + 
   2.506 +-	if (smp_processor_id() != boot_cpu_id)
   2.507 +-		return;
   2.508 +-
   2.509 + 	go[MASTER] = 0;
   2.510 + 
   2.511 + 	local_irq_save(flags);
   2.512 +@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
   2.513 + 	return tcenter - best_tm;
   2.514 + }
   2.515 + 
   2.516 +-static __cpuinit void sync_tsc(void)
   2.517 ++static __cpuinit void sync_tsc(unsigned int master)
   2.518 + {
   2.519 + 	int i, done = 0;
   2.520 + 	long delta, adj, adjust_latency = 0;
   2.521 +@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
   2.522 + 	} t[NUM_ROUNDS] __cpuinitdata;
   2.523 + #endif
   2.524 + 
   2.525 ++	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
   2.526 ++		smp_processor_id(), master);
   2.527 ++
   2.528 + 	go[MASTER] = 1;
   2.529 + 
   2.530 +-	smp_call_function(sync_master, NULL, 1, 0);
   2.531 ++	/* It is dangerous to broadcast IPI as cpus are coming up,
   2.532 ++	 * as they may not be ready to accept them.  So since
   2.533 ++	 * we only need to send the ipi to the boot cpu direct
   2.534 ++	 * the message, and avoid the race.
   2.535 ++	 */
   2.536 ++	smp_call_function_single(master, sync_master, NULL, 1, 0);
   2.537 + 
   2.538 + 	while (go[MASTER])	/* wait for master to be ready */
   2.539 + 		no_cpu_relax();
   2.540 +@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
   2.541 + 	printk(KERN_INFO
   2.542 + 	       "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
   2.543 + 	       "maxerr %lu cycles)\n",
   2.544 +-	       smp_processor_id(), boot_cpu_id, delta, rt);
   2.545 ++	       smp_processor_id(), master, delta, rt);
   2.546 + }
   2.547 + 
   2.548 + static void __cpuinit tsc_sync_wait(void)
   2.549 + {
   2.550 + 	if (notscsync || !cpu_has_tsc)
   2.551 + 		return;
   2.552 +-	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
   2.553 +-			boot_cpu_id);
   2.554 +-	sync_tsc();
   2.555 ++	sync_tsc(0);
   2.556 + }
   2.557 + 
   2.558 + static __init int notscsync_setup(char *s)
   2.559 +diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
   2.560 +--- a/drivers/acpi/pci_irq.c
   2.561 ++++ b/drivers/acpi/pci_irq.c
   2.562 +@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
   2.563 + 		printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
   2.564 + 			pci_name(dev), ('A' + pin));
   2.565 + 		/* Interrupt Line values above 0xF are forbidden */
   2.566 +-		if (dev->irq >= 0 && (dev->irq <= 0xF)) {
   2.567 ++		if (dev->irq > 0 && (dev->irq <= 0xF)) {
   2.568 + 			printk(" - using IRQ %d\n", dev->irq);
   2.569 ++			acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
   2.570 + 			return_VALUE(0);
   2.571 + 		}
   2.572 + 		else {
   2.573 +diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
   2.574 +--- a/drivers/char/rocket.c
   2.575 ++++ b/drivers/char/rocket.c
   2.576 +@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
   2.577 + 		ToRecv = space;
   2.578 + 
   2.579 + 	if (ToRecv <= 0)
   2.580 +-		return;
   2.581 ++		goto done;
   2.582 + 
   2.583 + 	/*
   2.584 + 	 * if status indicates there are errored characters in the
   2.585 +@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
   2.586 + 	}
   2.587 + 	/*  Push the data up to the tty layer */
   2.588 + 	ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
   2.589 ++done:
   2.590 + 	tty_ldisc_deref(ld);
   2.591 + }
   2.592 + 
   2.593 +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
   2.594 +--- a/drivers/char/tpm/tpm.c
   2.595 ++++ b/drivers/char/tpm/tpm.c
   2.596 +@@ -32,12 +32,6 @@
   2.597 + 
   2.598 + #define	TPM_BUFSIZE			2048
   2.599 + 
   2.600 +-/* PCI configuration addresses */
   2.601 +-#define	PCI_GEN_PMCON_1			0xA0
   2.602 +-#define	PCI_GEN1_DEC			0xE4
   2.603 +-#define	PCI_LPC_EN			0xE6
   2.604 +-#define	PCI_GEN2_DEC			0xEC
   2.605 +-
   2.606 + static LIST_HEAD(tpm_chip_list);
   2.607 + static DEFINE_SPINLOCK(driver_lock);
   2.608 + static int dev_mask[32];
   2.609 +@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
   2.610 + EXPORT_SYMBOL_GPL(tpm_time_expired);
   2.611 + 
   2.612 + /*
   2.613 +- * Initialize the LPC bus and enable the TPM ports
   2.614 +- */
   2.615 +-int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
   2.616 +-{
   2.617 +-	u32 lpcenable, tmp;
   2.618 +-	int is_lpcm = 0;
   2.619 +-
   2.620 +-	switch (pci_dev->vendor) {
   2.621 +-	case PCI_VENDOR_ID_INTEL:
   2.622 +-		switch (pci_dev->device) {
   2.623 +-		case PCI_DEVICE_ID_INTEL_82801CA_12:
   2.624 +-		case PCI_DEVICE_ID_INTEL_82801DB_12:
   2.625 +-			is_lpcm = 1;
   2.626 +-			break;
   2.627 +-		}
   2.628 +-		/* init ICH (enable LPC) */
   2.629 +-		pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
   2.630 +-		lpcenable |= 0x20000000;
   2.631 +-		pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
   2.632 +-
   2.633 +-		if (is_lpcm) {
   2.634 +-			pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
   2.635 +-					      &lpcenable);
   2.636 +-			if ((lpcenable & 0x20000000) == 0) {
   2.637 +-				dev_err(&pci_dev->dev,
   2.638 +-					"cannot enable LPC\n");
   2.639 +-				return -ENODEV;
   2.640 +-			}
   2.641 +-		}
   2.642 +-
   2.643 +-		/* initialize TPM registers */
   2.644 +-		pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
   2.645 +-
   2.646 +-		if (!is_lpcm)
   2.647 +-			tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
   2.648 +-		else
   2.649 +-			tmp =
   2.650 +-			    (tmp & 0xFFFF0000) | (base & 0xFFF0) |
   2.651 +-			    0x00000001;
   2.652 +-
   2.653 +-		pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
   2.654 +-
   2.655 +-		if (is_lpcm) {
   2.656 +-			pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
   2.657 +-					      &tmp);
   2.658 +-			tmp |= 0x00000004;	/* enable CLKRUN */
   2.659 +-			pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
   2.660 +-					       tmp);
   2.661 +-		}
   2.662 +-		tpm_write_index(0x0D, 0x55);	/* unlock 4F */
   2.663 +-		tpm_write_index(0x0A, 0x00);	/* int disable */
   2.664 +-		tpm_write_index(0x08, base);	/* base addr lo */
   2.665 +-		tpm_write_index(0x09, (base & 0xFF00) >> 8);	/* base addr hi */
   2.666 +-		tpm_write_index(0x0D, 0xAA);	/* lock 4F */
   2.667 +-		break;
   2.668 +-	case PCI_VENDOR_ID_AMD:
   2.669 +-		/* nothing yet */
   2.670 +-		break;
   2.671 +-	}
   2.672 +-
   2.673 +-	return 0;
   2.674 +-}
   2.675 +-
   2.676 +-EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
   2.677 +-
   2.678 +-/*
   2.679 +  * Internal kernel interface to transmit TPM commands
   2.680 +  */
   2.681 + static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
   2.682 +@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
   2.683 + 	if (chip == NULL)
   2.684 + 		return -ENODEV;
   2.685 + 
   2.686 +-	spin_lock(&driver_lock);
   2.687 +-	tpm_lpc_bus_init(pci_dev, chip->vendor->base);
   2.688 +-	spin_unlock(&driver_lock);
   2.689 +-
   2.690 + 	return 0;
   2.691 + }
   2.692 + 
   2.693 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
   2.694 +--- a/drivers/char/tpm/tpm.h
   2.695 ++++ b/drivers/char/tpm/tpm.h
   2.696 +@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
   2.697 + }
   2.698 + 
   2.699 + extern void tpm_time_expired(unsigned long);
   2.700 +-extern int tpm_lpc_bus_init(struct pci_dev *, u16);
   2.701 +-
   2.702 + extern int tpm_register_hardware(struct pci_dev *,
   2.703 + 				 struct tpm_vendor_specific *);
   2.704 + extern int tpm_open(struct inode *, struct file *);
   2.705 +diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
   2.706 +--- a/drivers/char/tpm/tpm_atmel.c
   2.707 ++++ b/drivers/char/tpm/tpm_atmel.c
   2.708 +@@ -22,7 +22,10 @@
   2.709 + #include "tpm.h"
   2.710 + 
   2.711 + /* Atmel definitions */
   2.712 +-#define	TPM_ATML_BASE			0x400
   2.713 ++enum tpm_atmel_addr {
   2.714 ++	TPM_ATMEL_BASE_ADDR_LO = 0x08,
   2.715 ++	TPM_ATMEL_BASE_ADDR_HI = 0x09
   2.716 ++};
   2.717 + 
   2.718 + /* write status bits */
   2.719 + #define	ATML_STATUS_ABORT		0x01
   2.720 +@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
   2.721 + 	.cancel = tpm_atml_cancel,
   2.722 + 	.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
   2.723 + 	.req_complete_val = ATML_STATUS_DATA_AVAIL,
   2.724 +-	.base = TPM_ATML_BASE,
   2.725 + 	.miscdev = { .fops = &atmel_ops, },
   2.726 + };
   2.727 + 
   2.728 +@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
   2.729 + {
   2.730 + 	u8 version[4];
   2.731 + 	int rc = 0;
   2.732 ++	int lo, hi;
   2.733 + 
   2.734 + 	if (pci_enable_device(pci_dev))
   2.735 + 		return -EIO;
   2.736 + 
   2.737 +-	if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
   2.738 +-		rc = -ENODEV;
   2.739 +-		goto out_err;
   2.740 +-	}
   2.741 ++	lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
   2.742 ++	hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
   2.743 ++
   2.744 ++	tpm_atmel.base = (hi<<8)|lo;
   2.745 ++	dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
   2.746 + 
   2.747 + 	/* verify that it is an Atmel part */
   2.748 + 	if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
   2.749 +diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
   2.750 +--- a/drivers/char/tpm/tpm_nsc.c
   2.751 ++++ b/drivers/char/tpm/tpm_nsc.c
   2.752 +@@ -24,6 +24,10 @@
   2.753 + /* National definitions */
   2.754 + #define	TPM_NSC_BASE			0x360
   2.755 + #define	TPM_NSC_IRQ			0x07
   2.756 ++#define	TPM_NSC_BASE0_HI		0x60
   2.757 ++#define	TPM_NSC_BASE0_LO		0x61
   2.758 ++#define	TPM_NSC_BASE1_HI		0x62
   2.759 ++#define	TPM_NSC_BASE1_LO		0x63
   2.760 + 
   2.761 + #define	NSC_LDN_INDEX			0x07
   2.762 + #define	NSC_SID_INDEX			0x20
   2.763 +@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
   2.764 + 	.cancel = tpm_nsc_cancel,
   2.765 + 	.req_complete_mask = NSC_STATUS_OBF,
   2.766 + 	.req_complete_val = NSC_STATUS_OBF,
   2.767 +-	.base = TPM_NSC_BASE,
   2.768 + 	.miscdev = { .fops = &nsc_ops, },
   2.769 + 	
   2.770 + };
   2.771 +@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
   2.772 + 				  const struct pci_device_id *pci_id)
   2.773 + {
   2.774 + 	int rc = 0;
   2.775 ++	int lo, hi;
   2.776 ++
   2.777 ++	hi = tpm_read_index(TPM_NSC_BASE0_HI);
   2.778 ++	lo = tpm_read_index(TPM_NSC_BASE0_LO);
   2.779 ++
   2.780 ++	tpm_nsc.base = (hi<<8) | lo;
   2.781 + 
   2.782 + 	if (pci_enable_device(pci_dev))
   2.783 + 		return -EIO;
   2.784 + 
   2.785 +-	if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
   2.786 +-		rc = -ENODEV;
   2.787 +-		goto out_err;
   2.788 +-	}
   2.789 +-
   2.790 + 	/* verify that it is a National part (SID) */
   2.791 + 	if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
   2.792 + 		rc = -ENODEV;
   2.793 +diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
   2.794 +--- a/drivers/char/tty_ioctl.c
   2.795 ++++ b/drivers/char/tty_ioctl.c
   2.796 +@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
   2.797 + 			ld = tty_ldisc_ref(tty);
   2.798 + 			switch (arg) {
   2.799 + 			case TCIFLUSH:
   2.800 +-				if (ld->flush_buffer)
   2.801 ++				if (ld && ld->flush_buffer)
   2.802 + 					ld->flush_buffer(tty);
   2.803 + 				break;
   2.804 + 			case TCIOFLUSH:
   2.805 +-				if (ld->flush_buffer)
   2.806 ++				if (ld && ld->flush_buffer)
   2.807 + 					ld->flush_buffer(tty);
   2.808 + 				/* fall through */
   2.809 + 			case TCOFLUSH:
   2.810 +diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
   2.811 +--- a/drivers/media/video/cx88/cx88-video.c
   2.812 ++++ b/drivers/media/video/cx88/cx88-video.c
   2.813 +@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
   2.814 + 			.default_value = 0,
   2.815 + 			.type          = V4L2_CTRL_TYPE_INTEGER,
   2.816 + 		},
   2.817 +-		.off                   = 0,
   2.818 ++		.off                   = 128,
   2.819 + 		.reg                   = MO_HUE,
   2.820 + 		.mask                  = 0x00ff,
   2.821 + 		.shift                 = 0,
   2.822 +diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
   2.823 +--- a/drivers/net/e1000/e1000_main.c
   2.824 ++++ b/drivers/net/e1000/e1000_main.c
   2.825 +@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
   2.826 + 	tso = e1000_tso(adapter, skb);
   2.827 + 	if (tso < 0) {
   2.828 + 		dev_kfree_skb_any(skb);
   2.829 ++		spin_unlock_irqrestore(&adapter->tx_lock, flags);
   2.830 + 		return NETDEV_TX_OK;
   2.831 + 	}
   2.832 + 
   2.833 +diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
   2.834 +--- a/drivers/net/hamradio/Kconfig
   2.835 ++++ b/drivers/net/hamradio/Kconfig
   2.836 +@@ -17,7 +17,7 @@ config MKISS
   2.837 + 
   2.838 + config 6PACK
   2.839 + 	tristate "Serial port 6PACK driver"
   2.840 +-	depends on AX25 && BROKEN_ON_SMP
   2.841 ++	depends on AX25
   2.842 + 	---help---
   2.843 + 	  6pack is a transmission protocol for the data exchange between your
   2.844 + 	  PC and your TNC (the Terminal Node Controller acts as a kind of
   2.845 +diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
   2.846 +--- a/drivers/net/shaper.c
   2.847 ++++ b/drivers/net/shaper.c
   2.848 +@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
   2.849 + {
   2.850 + 	struct shaper *shaper = dev->priv;
   2.851 +  	struct sk_buff *ptr;
   2.852 +-   
   2.853 +-	if (down_trylock(&shaper->sem))
   2.854 +-		return -1;
   2.855 + 
   2.856 ++	spin_lock(&shaper->lock);
   2.857 +  	ptr=shaper->sendq.prev;
   2.858 +  	
   2.859 +  	/*
   2.860 +@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
   2.861 +                 shaper->stats.collisions++;
   2.862 +  	}
   2.863 + 	shaper_kick(shaper);
   2.864 +-	up(&shaper->sem);
   2.865 ++	spin_unlock(&shaper->lock);
   2.866 +  	return 0;
   2.867 + }
   2.868 + 
   2.869 +@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
   2.870 + {
   2.871 + 	struct shaper *shaper = (struct shaper *)data;
   2.872 + 
   2.873 +-	if (!down_trylock(&shaper->sem)) {
   2.874 +-		shaper_kick(shaper);
   2.875 +-		up(&shaper->sem);
   2.876 +-	} else
   2.877 +-		mod_timer(&shaper->timer, jiffies);
   2.878 ++	spin_lock(&shaper->lock);
   2.879 ++	shaper_kick(shaper);
   2.880 ++	spin_unlock(&shaper->lock);
   2.881 + }
   2.882 + 
   2.883 + /*
   2.884 +@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
   2.885 + 
   2.886 + 
   2.887 + /*
   2.888 +- *	Flush the shaper queues on a closedown
   2.889 +- */
   2.890 +- 
   2.891 +-static void shaper_flush(struct shaper *shaper)
   2.892 +-{
   2.893 +-	struct sk_buff *skb;
   2.894 +-
   2.895 +-	down(&shaper->sem);
   2.896 +-	while((skb=skb_dequeue(&shaper->sendq))!=NULL)
   2.897 +-		dev_kfree_skb(skb);
   2.898 +-	shaper_kick(shaper);
   2.899 +-	up(&shaper->sem);
   2.900 +-}
   2.901 +-
   2.902 +-/*
   2.903 +  *	Bring the interface up. We just disallow this until a 
   2.904 +  *	bind.
   2.905 +  */
   2.906 +@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
   2.907 + static int shaper_close(struct net_device *dev)
   2.908 + {
   2.909 + 	struct shaper *shaper=dev->priv;
   2.910 +-	shaper_flush(shaper);
   2.911 ++	struct sk_buff *skb;
   2.912 ++
   2.913 ++	while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
   2.914 ++		dev_kfree_skb(skb);
   2.915 ++
   2.916 ++	spin_lock_bh(&shaper->lock);
   2.917 ++	shaper_kick(shaper);
   2.918 ++	spin_unlock_bh(&shaper->lock);
   2.919 ++
   2.920 + 	del_timer_sync(&shaper->timer);
   2.921 + 	return 0;
   2.922 + }
   2.923 +@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
   2.924 + 	init_timer(&sh->timer);
   2.925 + 	sh->timer.function=shaper_timer;
   2.926 + 	sh->timer.data=(unsigned long)sh;
   2.927 ++	spin_lock_init(&sh->lock);
   2.928 + }
   2.929 + 
   2.930 + /*
   2.931 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
   2.932 +--- a/drivers/pci/pci-driver.c
   2.933 ++++ b/drivers/pci/pci-driver.c
   2.934 +@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
   2.935 + 	/* FIXME, once all of the existing PCI drivers have been fixed to set
   2.936 + 	 * the pci shutdown function, this test can go away. */
   2.937 + 	if (!drv->driver.shutdown)
   2.938 +-		drv->driver.shutdown = pci_device_shutdown,
   2.939 ++		drv->driver.shutdown = pci_device_shutdown;
   2.940 + 	drv->driver.owner = drv->owner;
   2.941 + 	drv->driver.kobj.ktype = &pci_driver_kobj_type;
   2.942 + 	pci_init_dynids(&drv->dynids);
   2.943 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
   2.944 +--- a/drivers/scsi/qla2xxx/qla_init.c
   2.945 ++++ b/drivers/scsi/qla2xxx/qla_init.c
   2.946 +@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
   2.947 + 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
   2.948 + 
   2.949 + 	fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
   2.950 +-	if (!rport)
   2.951 ++	if (!rport) {
   2.952 + 		qla_printk(KERN_WARNING, ha,
   2.953 + 		    "Unable to allocate fc remote port!\n");
   2.954 ++		return;
   2.955 ++	}
   2.956 + 
   2.957 + 	if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
   2.958 + 		fcport->os_target_id = rport->scsi_target_id;
   2.959 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
   2.960 +--- a/drivers/scsi/qla2xxx/qla_os.c
   2.961 ++++ b/drivers/scsi/qla2xxx/qla_os.c
   2.962 +@@ -1150,7 +1150,7 @@ iospace_error_exit:
   2.963 +  */
   2.964 + int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
   2.965 + {
   2.966 +-	int	ret;
   2.967 ++	int	ret = -ENODEV;
   2.968 + 	device_reg_t __iomem *reg;
   2.969 + 	struct Scsi_Host *host;
   2.970 + 	scsi_qla_host_t *ha;
   2.971 +@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
   2.972 + 	fc_port_t *fcport;
   2.973 + 
   2.974 + 	if (pci_enable_device(pdev))
   2.975 +-		return -1;
   2.976 ++		goto probe_out;
   2.977 + 
   2.978 + 	host = scsi_host_alloc(&qla2x00_driver_template,
   2.979 + 	    sizeof(scsi_qla_host_t));
   2.980 +@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
   2.981 + 
   2.982 + 	/* Configure PCI I/O space */
   2.983 + 	ret = qla2x00_iospace_config(ha);
   2.984 +-	if (ret != 0) {
   2.985 +-		goto probe_alloc_failed;
   2.986 +-	}
   2.987 ++	if (ret)
   2.988 ++		goto probe_failed;
   2.989 + 
   2.990 + 	/* Sanitize the information from PCI BIOS. */
   2.991 + 	host->irq = pdev->irq;
   2.992 +@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
   2.993 + 		qla_printk(KERN_WARNING, ha,
   2.994 + 		    "[ERROR] Failed to allocate memory for adapter\n");
   2.995 + 
   2.996 +-		goto probe_alloc_failed;
   2.997 ++		ret = -ENOMEM;
   2.998 ++		goto probe_failed;
   2.999 + 	}
  2.1000 + 
  2.1001 +-	pci_set_drvdata(pdev, ha);
  2.1002 +-	host->this_id = 255;
  2.1003 +-	host->cmd_per_lun = 3;
  2.1004 +-	host->unique_id = ha->instance;
  2.1005 +-	host->max_cmd_len = MAX_CMDSZ;
  2.1006 +-	host->max_channel = ha->ports - 1;
  2.1007 +-	host->max_id = ha->max_targets;
  2.1008 +-	host->max_lun = ha->max_luns;
  2.1009 +-	host->transportt = qla2xxx_transport_template;
  2.1010 +-	if (scsi_add_host(host, &pdev->dev))
  2.1011 +-		goto probe_alloc_failed;
  2.1012 +-
  2.1013 +-	qla2x00_alloc_sysfs_attr(ha);
  2.1014 +-
  2.1015 + 	if (qla2x00_initialize_adapter(ha) &&
  2.1016 + 	    !(ha->device_flags & DFLG_NO_CABLE)) {
  2.1017 + 
  2.1018 +@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
  2.1019 + 		    "Adapter flags %x.\n",
  2.1020 + 		    ha->host_no, ha->device_flags));
  2.1021 + 
  2.1022 ++		ret = -ENODEV;
  2.1023 + 		goto probe_failed;
  2.1024 + 	}
  2.1025 + 
  2.1026 +-	qla2x00_init_host_attr(ha);
  2.1027 +-
  2.1028 + 	/*
  2.1029 + 	 * Startup the kernel thread for this host adapter
  2.1030 + 	 */
  2.1031 +@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
  2.1032 + 		qla_printk(KERN_WARNING, ha,
  2.1033 + 		    "Unable to start DPC thread!\n");
  2.1034 + 
  2.1035 ++		ret = -ENODEV;
  2.1036 + 		goto probe_failed;
  2.1037 + 	}
  2.1038 + 	wait_for_completion(&ha->dpc_inited);
  2.1039 + 
  2.1040 ++	host->this_id = 255;
  2.1041 ++	host->cmd_per_lun = 3;
  2.1042 ++	host->unique_id = ha->instance;
  2.1043 ++	host->max_cmd_len = MAX_CMDSZ;
  2.1044 ++	host->max_channel = ha->ports - 1;
  2.1045 ++	host->max_lun = MAX_LUNS;
  2.1046 ++	host->transportt = qla2xxx_transport_template;
  2.1047 ++
  2.1048 + 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
  2.1049 + 		ret = request_irq(host->irq, qla2100_intr_handler,
  2.1050 + 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
  2.1051 + 	else
  2.1052 + 		ret = request_irq(host->irq, qla2300_intr_handler,
  2.1053 + 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
  2.1054 +-	if (ret != 0) {
  2.1055 ++	if (ret) {
  2.1056 + 		qla_printk(KERN_WARNING, ha,
  2.1057 + 		    "Failed to reserve interrupt %d already in use.\n",
  2.1058 + 		    host->irq);
  2.1059 +@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
  2.1060 + 		msleep(10);
  2.1061 + 	}
  2.1062 + 
  2.1063 ++	pci_set_drvdata(pdev, ha);
  2.1064 + 	ha->flags.init_done = 1;
  2.1065 + 	num_hosts++;
  2.1066 + 
  2.1067 ++	ret = scsi_add_host(host, &pdev->dev);
  2.1068 ++	if (ret)
  2.1069 ++		goto probe_failed;
  2.1070 ++
  2.1071 ++	qla2x00_alloc_sysfs_attr(ha);
  2.1072 ++
  2.1073 ++	qla2x00_init_host_attr(ha);
  2.1074 ++
  2.1075 + 	qla_printk(KERN_INFO, ha, "\n"
  2.1076 + 	    " QLogic Fibre Channel HBA Driver: %s\n"
  2.1077 + 	    "  QLogic %s - %s\n"
  2.1078 +@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
  2.1079 + probe_failed:
  2.1080 + 	fc_remove_host(ha->host);
  2.1081 + 
  2.1082 +-	scsi_remove_host(host);
  2.1083 +-
  2.1084 +-probe_alloc_failed:
  2.1085 + 	qla2x00_free_device(ha);
  2.1086 + 
  2.1087 + 	scsi_host_put(host);
  2.1088 +@@ -1394,7 +1394,8 @@ probe_alloc_failed:
  2.1089 + probe_disable_device:
  2.1090 + 	pci_disable_device(pdev);
  2.1091 + 
  2.1092 +-	return -1;
  2.1093 ++probe_out:
  2.1094 ++	return ret;
  2.1095 + }
  2.1096 + EXPORT_SYMBOL_GPL(qla2x00_probe_one);
  2.1097 + 
  2.1098 +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
  2.1099 +--- a/drivers/scsi/sg.c
  2.1100 ++++ b/drivers/scsi/sg.c
  2.1101 +@@ -2969,23 +2969,22 @@ static void * dev_seq_start(struct seq_f
  2.1102 + {
  2.1103 + 	struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
  2.1104 + 
  2.1105 ++	s->private = it;
  2.1106 + 	if (! it)
  2.1107 + 		return NULL;
  2.1108 ++
  2.1109 + 	if (NULL == sg_dev_arr)
  2.1110 +-		goto err1;
  2.1111 ++		return NULL;
  2.1112 + 	it->index = *pos;
  2.1113 + 	it->max = sg_last_dev();
  2.1114 + 	if (it->index >= it->max)
  2.1115 +-		goto err1;
  2.1116 ++		return NULL;
  2.1117 + 	return it;
  2.1118 +-err1:
  2.1119 +-	kfree(it);
  2.1120 +-	return NULL;
  2.1121 + }
  2.1122 + 
  2.1123 + static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
  2.1124 + {
  2.1125 +-	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
  2.1126 ++	struct sg_proc_deviter * it = s->private;
  2.1127 + 
  2.1128 + 	*pos = ++it->index;
  2.1129 + 	return (it->index < it->max) ? it : NULL;
  2.1130 +@@ -2993,7 +2992,9 @@ static void * dev_seq_next(struct seq_fi
  2.1131 + 
  2.1132 + static void dev_seq_stop(struct seq_file *s, void *v)
  2.1133 + {
  2.1134 +-	kfree (v);
  2.1135 ++	struct sg_proc_deviter * it = s->private;
  2.1136 ++
  2.1137 ++	kfree (it);
  2.1138 + }
  2.1139 + 
  2.1140 + static int sg_proc_open_dev(struct inode *inode, struct file *file)
  2.1141 +diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
  2.1142 +--- a/drivers/usb/net/usbnet.c
  2.1143 ++++ b/drivers/usb/net/usbnet.c
  2.1144 +@@ -1922,7 +1922,7 @@ static int genelink_rx_fixup (struct usb
  2.1145 + 
  2.1146 + 			// copy the packet data to the new skb
  2.1147 + 			memcpy(skb_put(gl_skb, size), packet->packet_data, size);
  2.1148 +-			skb_return (dev, skb);
  2.1149 ++			skb_return (dev, gl_skb);
  2.1150 + 		}
  2.1151 + 
  2.1152 + 		// advance to the next packet
  2.1153 +diff --git a/fs/bio.c b/fs/bio.c
  2.1154 +--- a/fs/bio.c
  2.1155 ++++ b/fs/bio.c
  2.1156 +@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
  2.1157 + 	 */
  2.1158 + 	bio->bi_vcnt = bio_src->bi_vcnt;
  2.1159 + 	bio->bi_size = bio_src->bi_size;
  2.1160 ++	bio->bi_idx = bio_src->bi_idx;
  2.1161 + 	bio_phys_segments(q, bio);
  2.1162 + 	bio_hw_segments(q, bio);
  2.1163 + }
  2.1164 +diff --git a/fs/char_dev.c b/fs/char_dev.c
  2.1165 +--- a/fs/char_dev.c
  2.1166 ++++ b/fs/char_dev.c
  2.1167 +@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
  2.1168 + 	struct char_device_struct *cd = NULL, **cp;
  2.1169 + 	int i = major_to_index(major);
  2.1170 + 
  2.1171 +-	up(&chrdevs_lock);
  2.1172 ++	down(&chrdevs_lock);
  2.1173 + 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
  2.1174 + 		if ((*cp)->major == major &&
  2.1175 + 		    (*cp)->baseminor == baseminor &&
  2.1176 +diff --git a/fs/exec.c b/fs/exec.c
  2.1177 +--- a/fs/exec.c
  2.1178 ++++ b/fs/exec.c
  2.1179 +@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
  2.1180 + 	}
  2.1181 + 	sig->group_exit_task = NULL;
  2.1182 + 	sig->notify_count = 0;
  2.1183 ++	sig->real_timer.data = (unsigned long)current;
  2.1184 + 	spin_unlock_irq(lock);
  2.1185 + 
  2.1186 + 	/*
  2.1187 +diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
  2.1188 +--- a/fs/isofs/compress.c
  2.1189 ++++ b/fs/isofs/compress.c
  2.1190 +@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
  2.1191 + 	cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
  2.1192 + 	brelse(bh);
  2.1193 + 
  2.1194 ++	if (cstart > cend)
  2.1195 ++		goto eio;
  2.1196 ++		
  2.1197 + 	csize = cend-cstart;
  2.1198 + 
  2.1199 ++	if (csize > deflateBound(1UL << zisofs_block_shift))
  2.1200 ++		goto eio;
  2.1201 ++
  2.1202 + 	/* Now page[] contains an array of pages, any of which can be NULL,
  2.1203 + 	   and the locks on which we hold.  We should now read the data and
  2.1204 + 	   release the pages.  If the pages are NULL the decompressed data
  2.1205 +diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
  2.1206 +--- a/include/asm-i386/string.h
  2.1207 ++++ b/include/asm-i386/string.h
  2.1208 +@@ -116,7 +116,8 @@ __asm__ __volatile__(
  2.1209 + 	"orb $1,%%al\n"
  2.1210 + 	"3:"
  2.1211 + 	:"=a" (__res), "=&S" (d0), "=&D" (d1)
  2.1212 +-		     :"1" (cs),"2" (ct));
  2.1213 ++	:"1" (cs),"2" (ct)
  2.1214 ++	:"memory");
  2.1215 + return __res;
  2.1216 + }
  2.1217 + 
  2.1218 +@@ -138,8 +139,9 @@ __asm__ __volatile__(
  2.1219 + 	"3:\tsbbl %%eax,%%eax\n\t"
  2.1220 + 	"orb $1,%%al\n"
  2.1221 + 	"4:"
  2.1222 +-		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
  2.1223 +-		     :"1" (cs),"2" (ct),"3" (count));
  2.1224 ++	:"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
  2.1225 ++	:"1" (cs),"2" (ct),"3" (count)
  2.1226 ++	:"memory");
  2.1227 + return __res;
  2.1228 + }
  2.1229 + 
  2.1230 +@@ -158,7 +160,9 @@ __asm__ __volatile__(
  2.1231 + 	"movl $1,%1\n"
  2.1232 + 	"2:\tmovl %1,%0\n\t"
  2.1233 + 	"decl %0"
  2.1234 +-	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
  2.1235 ++	:"=a" (__res), "=&S" (d0)
  2.1236 ++	:"1" (s),"0" (c)
  2.1237 ++	:"memory");
  2.1238 + return __res;
  2.1239 + }
  2.1240 + 
  2.1241 +@@ -175,7 +179,9 @@ __asm__ __volatile__(
  2.1242 + 	"leal -1(%%esi),%0\n"
  2.1243 + 	"2:\ttestb %%al,%%al\n\t"
  2.1244 + 	"jne 1b"
  2.1245 +-	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
  2.1246 ++	:"=g" (__res), "=&S" (d0), "=&a" (d1)
  2.1247 ++	:"0" (0),"1" (s),"2" (c)
  2.1248 ++	:"memory");
  2.1249 + return __res;
  2.1250 + }
  2.1251 + 
  2.1252 +@@ -189,7 +195,9 @@ __asm__ __volatile__(
  2.1253 + 	"scasb\n\t"
  2.1254 + 	"notl %0\n\t"
  2.1255 + 	"decl %0"
  2.1256 +-	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
  2.1257 ++	:"=c" (__res), "=&D" (d0)
  2.1258 ++	:"1" (s),"a" (0), "0" (0xffffffffu)
  2.1259 ++	:"memory");
  2.1260 + return __res;
  2.1261 + }
  2.1262 + 
  2.1263 +@@ -333,7 +341,9 @@ __asm__ __volatile__(
  2.1264 + 	"je 1f\n\t"
  2.1265 + 	"movl $1,%0\n"
  2.1266 + 	"1:\tdecl %0"
  2.1267 +-	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
  2.1268 ++	:"=D" (__res), "=&c" (d0)
  2.1269 ++	:"a" (c),"0" (cs),"1" (count)
  2.1270 ++	:"memory");
  2.1271 + return __res;
  2.1272 + }
  2.1273 + 
  2.1274 +@@ -369,7 +379,7 @@ __asm__ __volatile__(
  2.1275 + 	"je 2f\n\t"
  2.1276 + 	"stosb\n"
  2.1277 + 	"2:"
  2.1278 +-	: "=&c" (d0), "=&D" (d1)
  2.1279 ++	:"=&c" (d0), "=&D" (d1)
  2.1280 + 	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
  2.1281 + 	:"memory");
  2.1282 + return (s);	
  2.1283 +@@ -392,7 +402,8 @@ __asm__ __volatile__(
  2.1284 + 	"jne 1b\n"
  2.1285 + 	"3:\tsubl %2,%0"
  2.1286 + 	:"=a" (__res), "=&d" (d0)
  2.1287 +-	:"c" (s),"1" (count));
  2.1288 ++	:"c" (s),"1" (count)
  2.1289 ++	:"memory");
  2.1290 + return __res;
  2.1291 + }
  2.1292 + /* end of additional stuff */
  2.1293 +@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
  2.1294 + 		"dec %%edi\n"
  2.1295 + 		"1:"
  2.1296 + 		: "=D" (addr), "=c" (size)
  2.1297 +-		: "0" (addr), "1" (size), "a" (c));
  2.1298 ++		: "0" (addr), "1" (size), "a" (c)
  2.1299 ++		: "memory");
  2.1300 + 	return addr;
  2.1301 + }
  2.1302 + 
  2.1303 +diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
  2.1304 +--- a/include/asm-x86_64/smp.h
  2.1305 ++++ b/include/asm-x86_64/smp.h
  2.1306 +@@ -46,6 +46,8 @@ extern int pic_mode;
  2.1307 + extern int smp_num_siblings;
  2.1308 + extern void smp_flush_tlb(void);
  2.1309 + extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
  2.1310 ++extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
  2.1311 ++				     int retry, int wait);
  2.1312 + extern void smp_send_reschedule(int cpu);
  2.1313 + extern void smp_invalidate_rcv(void);		/* Process an NMI */
  2.1314 + extern void zap_low_mappings(void);
  2.1315 +diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
  2.1316 +--- a/include/linux/if_shaper.h
  2.1317 ++++ b/include/linux/if_shaper.h
  2.1318 +@@ -23,7 +23,7 @@ struct shaper
  2.1319 + 	__u32 shapeclock;
  2.1320 + 	unsigned long recovery;	/* Time we can next clock a packet out on
  2.1321 + 				   an empty queue */
  2.1322 +-	struct semaphore sem;
  2.1323 ++	spinlock_t lock;
  2.1324 +         struct net_device_stats stats;
  2.1325 + 	struct net_device *dev;
  2.1326 + 	int  (*hard_start_xmit) (struct sk_buff *skb,
  2.1327 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
  2.1328 +--- a/include/linux/skbuff.h
  2.1329 ++++ b/include/linux/skbuff.h
  2.1330 +@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
  2.1331 + {
  2.1332 + 	int hlen = skb_headlen(skb);
  2.1333 + 
  2.1334 +-	if (offset + len <= hlen)
  2.1335 ++	if (hlen - offset >= len)
  2.1336 + 		return skb->data + offset;
  2.1337 + 
  2.1338 + 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
  2.1339 +diff --git a/include/linux/zlib.h b/include/linux/zlib.h
  2.1340 +--- a/include/linux/zlib.h
  2.1341 ++++ b/include/linux/zlib.h
  2.1342 +@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
  2.1343 +    stream state was inconsistent (such as zalloc or state being NULL).
  2.1344 + */
  2.1345 + 
  2.1346 ++static inline unsigned long deflateBound(unsigned long s)
  2.1347 ++{
  2.1348 ++	return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
  2.1349 ++}
  2.1350 ++
  2.1351 + extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
  2.1352 + /*
  2.1353 +      Dynamically update the compression level and compression strategy.  The
  2.1354 +diff --git a/kernel/module.c b/kernel/module.c
  2.1355 +--- a/kernel/module.c
  2.1356 ++++ b/kernel/module.c
  2.1357 +@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
  2.1358 + /* Created by linker magic */
  2.1359 + extern char __per_cpu_start[], __per_cpu_end[];
  2.1360 + 
  2.1361 +-static void *percpu_modalloc(unsigned long size, unsigned long align)
  2.1362 ++static void *percpu_modalloc(unsigned long size, unsigned long align,
  2.1363 ++			     const char *name)
  2.1364 + {
  2.1365 + 	unsigned long extra;
  2.1366 + 	unsigned int i;
  2.1367 + 	void *ptr;
  2.1368 + 
  2.1369 +-	BUG_ON(align > SMP_CACHE_BYTES);
  2.1370 ++	if (align > SMP_CACHE_BYTES) {
  2.1371 ++		printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
  2.1372 ++		       name, align, SMP_CACHE_BYTES);
  2.1373 ++		align = SMP_CACHE_BYTES;
  2.1374 ++	}
  2.1375 + 
  2.1376 + 	ptr = __per_cpu_start;
  2.1377 + 	for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
  2.1378 +@@ -347,7 +352,8 @@ static int percpu_modinit(void)
  2.1379 + }	
  2.1380 + __initcall(percpu_modinit);
  2.1381 + #else /* ... !CONFIG_SMP */
  2.1382 +-static inline void *percpu_modalloc(unsigned long size, unsigned long align)
  2.1383 ++static inline void *percpu_modalloc(unsigned long size, unsigned long align,
  2.1384 ++				    const char *name)
  2.1385 + {
  2.1386 + 	return NULL;
  2.1387 + }
  2.1388 +@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
  2.1389 + 	if (pcpuindex) {
  2.1390 + 		/* We have a special allocation for this section. */
  2.1391 + 		percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
  2.1392 +-					 sechdrs[pcpuindex].sh_addralign);
  2.1393 ++					 sechdrs[pcpuindex].sh_addralign,
  2.1394 ++					 mod->name);
  2.1395 + 		if (!percpu) {
  2.1396 + 			err = -ENOMEM;
  2.1397 + 			goto free_mod;
  2.1398 +diff --git a/kernel/signal.c b/kernel/signal.c
  2.1399 +--- a/kernel/signal.c
  2.1400 ++++ b/kernel/signal.c
  2.1401 +@@ -686,7 +686,7 @@ static void handle_stop_signal(int sig, 
  2.1402 + {
  2.1403 + 	struct task_struct *t;
  2.1404 + 
  2.1405 +-	if (p->flags & SIGNAL_GROUP_EXIT)
  2.1406 ++	if (p->signal->flags & SIGNAL_GROUP_EXIT)
  2.1407 + 		/*
  2.1408 + 		 * The process is in the middle of dying already.
  2.1409 + 		 */
  2.1410 +diff --git a/lib/inflate.c b/lib/inflate.c
  2.1411 +--- a/lib/inflate.c
  2.1412 ++++ b/lib/inflate.c
  2.1413 +@@ -326,7 +326,7 @@ DEBG("huft1 ");
  2.1414 +   {
  2.1415 +     *t = (struct huft *)NULL;
  2.1416 +     *m = 0;
  2.1417 +-    return 0;
  2.1418 ++    return 2;
  2.1419 +   }
  2.1420 + 
  2.1421 + DEBG("huft2 ");
  2.1422 +@@ -374,6 +374,7 @@ DEBG("huft5 ");
  2.1423 +     if ((j = *p++) != 0)
  2.1424 +       v[x[j]++] = i;
  2.1425 +   } while (++i < n);
  2.1426 ++  n = x[g];                   /* set n to length of v */
  2.1427 + 
  2.1428 + DEBG("h6 ");
  2.1429 + 
  2.1430 +@@ -410,12 +411,13 @@ DEBG1("1 ");
  2.1431 + DEBG1("2 ");
  2.1432 +           f -= a + 1;           /* deduct codes from patterns left */
  2.1433 +           xp = c + k;
  2.1434 +-          while (++j < z)       /* try smaller tables up to z bits */
  2.1435 +-          {
  2.1436 +-            if ((f <<= 1) <= *++xp)
  2.1437 +-              break;            /* enough codes to use up j bits */
  2.1438 +-            f -= *xp;           /* else deduct codes from patterns */
  2.1439 +-          }
  2.1440 ++          if (j < z)
  2.1441 ++            while (++j < z)       /* try smaller tables up to z bits */
  2.1442 ++            {
  2.1443 ++              if ((f <<= 1) <= *++xp)
  2.1444 ++                break;            /* enough codes to use up j bits */
  2.1445 ++              f -= *xp;           /* else deduct codes from patterns */
  2.1446 ++            }
  2.1447 +         }
  2.1448 + DEBG1("3 ");
  2.1449 +         z = 1 << j;             /* table entries for j-bit table */
  2.1450 +diff --git a/mm/memory.c b/mm/memory.c
  2.1451 +--- a/mm/memory.c
  2.1452 ++++ b/mm/memory.c
  2.1453 +@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
  2.1454 + {
  2.1455 + 	pgd_t *pgd;
  2.1456 + 	unsigned long next;
  2.1457 +-	unsigned long end = addr + size;
  2.1458 ++	unsigned long end = addr + PAGE_ALIGN(size);
  2.1459 + 	struct mm_struct *mm = vma->vm_mm;
  2.1460 + 	int err;
  2.1461 + 
  2.1462 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
  2.1463 +--- a/mm/mempolicy.c
  2.1464 ++++ b/mm/mempolicy.c
  2.1465 +@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
  2.1466 + 	struct mempolicy *new;
  2.1467 + 	DECLARE_BITMAP(nodes, MAX_NUMNODES);
  2.1468 + 
  2.1469 +-	if (mode > MPOL_MAX)
  2.1470 ++	if (mode < 0 || mode > MPOL_MAX)
  2.1471 + 		return -EINVAL;
  2.1472 + 	err = get_nodes(nodes, nmask, maxnode, mode);
  2.1473 + 	if (err)
  2.1474 +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
  2.1475 +--- a/net/8021q/vlan.c
  2.1476 ++++ b/net/8021q/vlan.c
  2.1477 +@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
  2.1478 + 			if (!vlandev)
  2.1479 + 				continue;
  2.1480 + 
  2.1481 ++			if (netif_carrier_ok(dev)) {
  2.1482 ++				if (!netif_carrier_ok(vlandev))
  2.1483 ++					netif_carrier_on(vlandev);
  2.1484 ++			} else {
  2.1485 ++				if (netif_carrier_ok(vlandev))
  2.1486 ++					netif_carrier_off(vlandev);
  2.1487 ++			}
  2.1488 ++
  2.1489 + 			if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
  2.1490 + 				vlandev->state = (vlandev->state &~ VLAN_LINK_STATE_MASK) 
  2.1491 + 					| flgs;
  2.1492 +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
  2.1493 +--- a/net/ipv4/icmp.c
  2.1494 ++++ b/net/ipv4/icmp.c
  2.1495 +@@ -349,12 +349,12 @@ static void icmp_push_reply(struct icmp_
  2.1496 + {
  2.1497 + 	struct sk_buff *skb;
  2.1498 + 
  2.1499 +-	ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
  2.1500 +-		       icmp_param->data_len+icmp_param->head_len,
  2.1501 +-		       icmp_param->head_len,
  2.1502 +-		       ipc, rt, MSG_DONTWAIT);
  2.1503 +-
  2.1504 +-	if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
  2.1505 ++	if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
  2.1506 ++		           icmp_param->data_len+icmp_param->head_len,
  2.1507 ++		           icmp_param->head_len,
  2.1508 ++		           ipc, rt, MSG_DONTWAIT) < 0)
  2.1509 ++		ip_flush_pending_frames(icmp_socket->sk);
  2.1510 ++	else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
  2.1511 + 		struct icmphdr *icmph = skb->h.icmph;
  2.1512 + 		unsigned int csum = 0;
  2.1513 + 		struct sk_buff *skb1;
  2.1514 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
  2.1515 +--- a/net/ipv4/ip_output.c
  2.1516 ++++ b/net/ipv4/ip_output.c
  2.1517 +@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
  2.1518 + #ifdef CONFIG_NETFILTER_DEBUG
  2.1519 + 	nf_debug_ip_loopback_xmit(newskb);
  2.1520 + #endif
  2.1521 +-	nf_reset(newskb);
  2.1522 + 	netif_rx(newskb);
  2.1523 + 	return 0;
  2.1524 + }
  2.1525 +@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
  2.1526 + 	nf_debug_ip_finish_output2(skb);
  2.1527 + #endif /*CONFIG_NETFILTER_DEBUG*/
  2.1528 + 
  2.1529 +-	nf_reset(skb);
  2.1530 +-
  2.1531 + 	if (hh) {
  2.1532 + 		int hh_alen;
  2.1533 + 
  2.1534 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
  2.1535 +--- a/net/ipv4/ip_sockglue.c
  2.1536 ++++ b/net/ipv4/ip_sockglue.c
  2.1537 +@@ -848,6 +848,9 @@ mc_msf_out:
  2.1538 +  
  2.1539 + 		case IP_IPSEC_POLICY:
  2.1540 + 		case IP_XFRM_POLICY:
  2.1541 ++			err = -EPERM;
  2.1542 ++			if (!capable(CAP_NET_ADMIN))
  2.1543 ++				break;
  2.1544 + 			err = xfrm_user_policy(sk, optname, optval, optlen);
  2.1545 + 			break;
  2.1546 + 
  2.1547 +diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
  2.1548 +--- a/net/ipv4/netfilter/ip_conntrack_core.c
  2.1549 ++++ b/net/ipv4/netfilter/ip_conntrack_core.c
  2.1550 +@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
  2.1551 + 		schedule();
  2.1552 + 		goto i_see_dead_people;
  2.1553 + 	}
  2.1554 ++	/* wait until all references to ip_conntrack_untracked are dropped */
  2.1555 ++	while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
  2.1556 ++		schedule();
  2.1557 + 
  2.1558 + 	kmem_cache_destroy(ip_conntrack_cachep);
  2.1559 + 	kmem_cache_destroy(ip_conntrack_expect_cachep);
  2.1560 +diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
  2.1561 +--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
  2.1562 ++++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
  2.1563 +@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
  2.1564 + 				        const struct net_device *out,
  2.1565 + 				        int (*okfn)(struct sk_buff *))
  2.1566 + {
  2.1567 ++#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
  2.1568 ++	/* Previously seen (loopback)?  Ignore.  Do this before
  2.1569 ++           fragment check. */
  2.1570 ++	if ((*pskb)->nfct)
  2.1571 ++		return NF_ACCEPT;
  2.1572 ++#endif
  2.1573 ++
  2.1574 + 	/* Gather fragments. */
  2.1575 + 	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
  2.1576 + 		*pskb = ip_ct_gather_frags(*pskb,
  2.1577 +diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
  2.1578 +--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
  2.1579 ++++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
  2.1580 +@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
  2.1581 + 		 enum ip_nat_manip_type maniptype,
  2.1582 + 		 const struct ip_conntrack *conntrack)
  2.1583 + {
  2.1584 +-	static u_int16_t port, *portptr;
  2.1585 ++	static u_int16_t port;
  2.1586 ++	u_int16_t *portptr;
  2.1587 + 	unsigned int range_size, min, i;
  2.1588 + 
  2.1589 + 	if (maniptype == IP_NAT_MANIP_SRC)
  2.1590 +diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
  2.1591 +--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
  2.1592 ++++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
  2.1593 +@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
  2.1594 + 		 enum ip_nat_manip_type maniptype,
  2.1595 + 		 const struct ip_conntrack *conntrack)
  2.1596 + {
  2.1597 +-	static u_int16_t port, *portptr;
  2.1598 ++	static u_int16_t port;
  2.1599 ++	u_int16_t *portptr;
  2.1600 + 	unsigned int range_size, min, i;
  2.1601 + 
  2.1602 + 	if (maniptype == IP_NAT_MANIP_SRC)
  2.1603 +diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
  2.1604 +--- a/net/ipv6/ip6_input.c
  2.1605 ++++ b/net/ipv6/ip6_input.c
  2.1606 +@@ -198,12 +198,13 @@ resubmit:
  2.1607 + 		if (!raw_sk) {
  2.1608 + 			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
  2.1609 + 				IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
  2.1610 +-				icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
  2.1611 ++				icmpv6_send(skb, ICMPV6_PARAMPROB,
  2.1612 ++				            ICMPV6_UNK_NEXTHDR, nhoff,
  2.1613 ++				            skb->dev);
  2.1614 + 			}
  2.1615 +-		} else {
  2.1616 ++		} else
  2.1617 + 			IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
  2.1618 +-			kfree_skb(skb);
  2.1619 +-		}
  2.1620 ++		kfree_skb(skb);
  2.1621 + 	}
  2.1622 + 	rcu_read_unlock();
  2.1623 + 	return 0;
  2.1624 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
  2.1625 +--- a/net/ipv6/ipv6_sockglue.c
  2.1626 ++++ b/net/ipv6/ipv6_sockglue.c
  2.1627 +@@ -503,6 +503,9 @@ done:
  2.1628 + 		break;
  2.1629 + 	case IPV6_IPSEC_POLICY:
  2.1630 + 	case IPV6_XFRM_POLICY:
  2.1631 ++		retv = -EPERM;
  2.1632 ++		if (!capable(CAP_NET_ADMIN))
  2.1633 ++			break;
  2.1634 + 		retv = xfrm_user_policy(sk, optname, optval, optlen);
  2.1635 + 		break;
  2.1636 + 
  2.1637 +diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
  2.1638 +--- a/net/ipv6/netfilter/ip6_queue.c
  2.1639 ++++ b/net/ipv6/netfilter/ip6_queue.c
  2.1640 +@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
  2.1641 + static void
  2.1642 + ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
  2.1643 + {
  2.1644 ++	local_bh_disable();
  2.1645 + 	nf_reinject(entry->skb, entry->info, verdict);
  2.1646 ++	local_bh_enable();
  2.1647 + 	kfree(entry);
  2.1648 + }
  2.1649 + 
  2.1650 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
  2.1651 +--- a/net/netlink/af_netlink.c
  2.1652 ++++ b/net/netlink/af_netlink.c
  2.1653 +@@ -315,8 +315,8 @@ err:
  2.1654 + static void netlink_remove(struct sock *sk)
  2.1655 + {
  2.1656 + 	netlink_table_grab();
  2.1657 +-	nl_table[sk->sk_protocol].hash.entries--;
  2.1658 +-	sk_del_node_init(sk);
  2.1659 ++	if (sk_del_node_init(sk))
  2.1660 ++		nl_table[sk->sk_protocol].hash.entries--;
  2.1661 + 	if (nlk_sk(sk)->groups)
  2.1662 + 		__sk_del_bind_node(sk);
  2.1663 + 	netlink_table_ungrab();
  2.1664 +@@ -429,7 +429,12 @@ retry:
  2.1665 + 	err = netlink_insert(sk, pid);
  2.1666 + 	if (err == -EADDRINUSE)
  2.1667 + 		goto retry;
  2.1668 +-	return 0;
  2.1669 ++
  2.1670 ++	/* If 2 threads race to autobind, that is fine.  */
  2.1671 ++	if (err == -EBUSY)
  2.1672 ++		err = 0;
  2.1673 ++
  2.1674 ++	return err;
  2.1675 + }
  2.1676 + 
  2.1677 + static inline int netlink_capable(struct socket *sock, unsigned int flag) 
  2.1678 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
  2.1679 +--- a/net/packet/af_packet.c
  2.1680 ++++ b/net/packet/af_packet.c
  2.1681 +@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
  2.1682 + 	dst_release(skb->dst);
  2.1683 + 	skb->dst = NULL;
  2.1684 + 
  2.1685 ++	/* drop conntrack reference */
  2.1686 ++	nf_reset(skb);
  2.1687 ++
  2.1688 + 	spkt = (struct sockaddr_pkt*)skb->cb;
  2.1689 + 
  2.1690 + 	skb_push(skb, skb->data-skb->mac.raw);
  2.1691 +@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
  2.1692 + 	dst_release(skb->dst);
  2.1693 + 	skb->dst = NULL;
  2.1694 + 
  2.1695 ++	/* drop conntrack reference */
  2.1696 ++	nf_reset(skb);
  2.1697 ++
  2.1698 + 	spin_lock(&sk->sk_receive_queue.lock);
  2.1699 + 	po->stats.tp_packets++;
  2.1700 + 	__skb_queue_tail(&sk->sk_receive_queue, skb);
  2.1701 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
  2.1702 +--- a/net/xfrm/xfrm_user.c
  2.1703 ++++ b/net/xfrm/xfrm_user.c
  2.1704 +@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
  2.1705 + 	if (nr > XFRM_MAX_DEPTH)
  2.1706 + 		return NULL;
  2.1707 + 
  2.1708 ++	if (p->dir > XFRM_POLICY_OUT)
  2.1709 ++		return NULL;
  2.1710 ++
  2.1711 + 	xp = xfrm_policy_alloc(GFP_KERNEL);
  2.1712 + 	if (xp == NULL) {
  2.1713 + 		*dir = -ENOBUFS;
  2.1714 +diff --git a/security/keys/keyring.c b/security/keys/keyring.c
  2.1715 +--- a/security/keys/keyring.c
  2.1716 ++++ b/security/keys/keyring.c
  2.1717 +@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
  2.1718 + 
  2.1719 + 	if (keyring->description) {
  2.1720 + 		write_lock(&keyring_name_lock);
  2.1721 +-		list_del(&keyring->type_data.link);
  2.1722 ++
  2.1723 ++		if (keyring->type_data.link.next != NULL &&
  2.1724 ++		    !list_empty(&keyring->type_data.link))
  2.1725 ++			list_del(&keyring->type_data.link);
  2.1726 ++
  2.1727 + 		write_unlock(&keyring_name_lock);
  2.1728 + 	}
  2.1729 + 
  2.1730 +diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
  2.1731 +--- a/security/keys/process_keys.c
  2.1732 ++++ b/security/keys/process_keys.c
  2.1733 +@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
  2.1734 + 		keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
  2.1735 + 		if (IS_ERR(keyring)) {
  2.1736 + 			ret = PTR_ERR(keyring);
  2.1737 +-			goto error;
  2.1738 ++			goto error2;
  2.1739 + 		}
  2.1740 + 	}
  2.1741 + 	else if (IS_ERR(keyring)) {