direct-io.hg

changeset 7422:4dd58ef39639

Rename 2.6.12.5 patch so it actually gets applied. Fail
the build if 'patch' is missing.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Oct 19 07:43:03 2005 +0100 (2005-10-19)
parents aabc33c3c0ac
children 8dbf531776e1
files buildconfigs/Rules.mk patches/linux-2.6.12/2.6.12.5.patch
line diff
     1.1 --- a/buildconfigs/Rules.mk	Tue Oct 18 19:28:16 2005 +0100
     1.2 +++ b/buildconfigs/Rules.mk	Wed Oct 19 07:43:03 2005 +0100
     1.3 @@ -82,6 +82,7 @@ clean::
     1.4  ref-%/.valid-ref: pristine-%/.valid-pristine
     1.5  	rm -rf $(@D)
     1.6  	cp -al $(<D) $(@D)
     1.7 +	which patch || exit 1
     1.8  	([ -d patches/$* ] && \
     1.9  	  for i in patches/$*/*.patch ; do ( cd $(@D) ; patch -p1 <../$$i || exit 1 ) ; done) || true
    1.10  	touch $@ # update timestamp to avoid rebuild
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/patches/linux-2.6.12/2.6.12.5.patch	Wed Oct 19 07:43:03 2005 +0100
     2.3 @@ -0,0 +1,1614 @@
     2.4 +diff --git a/Makefile b/Makefile
     2.5 +--- a/Makefile
     2.6 ++++ b/Makefile
     2.7 +@@ -1,7 +1,7 @@
     2.8 + VERSION = 2
     2.9 + PATCHLEVEL = 6
    2.10 + SUBLEVEL = 12
    2.11 +-EXTRAVERSION =
    2.12 ++EXTRAVERSION = .5
    2.13 + NAME=Woozy Numbat
    2.14 + 
    2.15 + # *DOCUMENTATION*
    2.16 +@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
    2.17 + #(which is the most common case IMHO) to avoid unneeded clutter in the big tags file.
    2.18 + #Adding $(srctree) adds about 20M on i386 to the size of the output file!
    2.19 + 
    2.20 +-ifeq ($(KBUILD_OUTPUT),)
    2.21 ++ifeq ($(src),$(obj))
    2.22 + __srctree =
    2.23 + else
    2.24 + __srctree = $(srctree)/
    2.25 +diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    2.26 +--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    2.27 ++++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    2.28 +@@ -44,7 +44,7 @@
    2.29 + 
    2.30 + #define PFX "powernow-k8: "
    2.31 + #define BFX PFX "BIOS error: "
    2.32 +-#define VERSION "version 1.40.2"
    2.33 ++#define VERSION "version 1.40.4"
    2.34 + #include "powernow-k8.h"
    2.35 + 
    2.36 + /* serialize freq changes  */
    2.37 +@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
    2.38 + {
    2.39 + 	struct powernow_k8_data *data;
    2.40 + 	cpumask_t oldmask = CPU_MASK_ALL;
    2.41 +-	int rc;
    2.42 ++	int rc, i;
    2.43 + 
    2.44 + 	if (!check_supported_cpu(pol->cpu))
    2.45 + 		return -ENODEV;
    2.46 +@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
    2.47 + 	printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
    2.48 + 	       data->currfid, data->currvid);
    2.49 + 
    2.50 +-	powernow_data[pol->cpu] = data;
    2.51 ++	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
    2.52 ++		powernow_data[i] = data;
    2.53 ++	}
    2.54 + 
    2.55 + 	return 0;
    2.56 + 
    2.57 +diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
    2.58 +--- a/arch/i386/kernel/process.c
    2.59 ++++ b/arch/i386/kernel/process.c
    2.60 +@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
    2.61 + 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
    2.62 + 		return -EINVAL;
    2.63 + 
    2.64 ++	memset(&info, 0, sizeof(info));
    2.65 ++
    2.66 + 	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
    2.67 + 
    2.68 + 	info.entry_number = idx;
    2.69 +diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
    2.70 +--- a/arch/ia64/kernel/ptrace.c
    2.71 ++++ b/arch/ia64/kernel/ptrace.c
    2.72 +@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
    2.73 + 				*data = (pt->cr_ipsr & IPSR_MASK);
    2.74 + 			return 0;
    2.75 + 
    2.76 ++		      case PT_AR_RSC:
    2.77 ++			if (write_access)
    2.78 ++				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
    2.79 ++			else
    2.80 ++				*data = pt->ar_rsc;
    2.81 ++			return 0;
    2.82 ++
    2.83 + 		      case PT_AR_RNAT:
    2.84 + 			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
    2.85 + 			rnat_addr = (long) ia64_rse_rnat_addr((long *)
    2.86 +@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
    2.87 + 		      case PT_AR_BSPSTORE:
    2.88 + 			ptr = pt_reg_addr(pt, ar_bspstore);
    2.89 + 			break;
    2.90 +-		      case PT_AR_RSC:
    2.91 +-			ptr = pt_reg_addr(pt, ar_rsc);
    2.92 +-			break;
    2.93 + 		      case PT_AR_UNAT:
    2.94 + 			ptr = pt_reg_addr(pt, ar_unat);
    2.95 + 			break;
    2.96 +@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
    2.97 + static long
    2.98 + ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
    2.99 + {
   2.100 +-	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
   2.101 ++	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
   2.102 + 	struct unw_frame_info info;
   2.103 + 	struct switch_stack *sw;
   2.104 + 	struct ia64_fpreg fpval;
   2.105 +@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
   2.106 + 	/* app regs */
   2.107 + 
   2.108 + 	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
   2.109 +-	retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
   2.110 ++	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
   2.111 + 	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
   2.112 + 	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
   2.113 + 	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
   2.114 +@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
   2.115 + 	retval |= __get_user(nat_bits, &ppr->nat);
   2.116 + 
   2.117 + 	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
   2.118 ++	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
   2.119 + 	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
   2.120 + 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
   2.121 + 	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
   2.122 +diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
   2.123 +--- a/arch/ia64/kernel/signal.c
   2.124 ++++ b/arch/ia64/kernel/signal.c
   2.125 +@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
   2.126 + static long
   2.127 + restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
   2.128 + {
   2.129 +-	unsigned long ip, flags, nat, um, cfm;
   2.130 ++	unsigned long ip, flags, nat, um, cfm, rsc;
   2.131 + 	long err;
   2.132 + 
   2.133 + 	/* Always make any pending restarted system calls return -EINTR */
   2.134 +@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
   2.135 + 	err |= __get_user(ip, &sc->sc_ip);			/* instruction pointer */
   2.136 + 	err |= __get_user(cfm, &sc->sc_cfm);
   2.137 + 	err |= __get_user(um, &sc->sc_um);			/* user mask */
   2.138 +-	err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
   2.139 ++	err |= __get_user(rsc, &sc->sc_ar_rsc);
   2.140 + 	err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
   2.141 + 	err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
   2.142 + 	err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
   2.143 +@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
   2.144 + 	err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);	/* r15 */
   2.145 + 
   2.146 + 	scr->pt.cr_ifs = cfm | (1UL << 63);
   2.147 ++	scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
   2.148 + 
   2.149 + 	/* establish new instruction pointer: */
   2.150 + 	scr->pt.cr_iip = ip & ~0x3UL;
   2.151 +diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
   2.152 +--- a/arch/ppc/kernel/time.c
   2.153 ++++ b/arch/ppc/kernel/time.c
   2.154 +@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
   2.155 + 
   2.156 + extern unsigned long wall_jiffies;
   2.157 + 
   2.158 ++/* used for timezone offset */
   2.159 ++static long timezone_offset;
   2.160 ++
   2.161 + DEFINE_SPINLOCK(rtc_lock);
   2.162 + 
   2.163 + EXPORT_SYMBOL(rtc_lock);
   2.164 +@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
   2.165 + 		     xtime.tv_sec - last_rtc_update >= 659 &&
   2.166 + 		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
   2.167 + 		     jiffies - wall_jiffies == 1) {
   2.168 +-		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0)
   2.169 ++		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
   2.170 + 				last_rtc_update = xtime.tv_sec+1;
   2.171 + 			else
   2.172 + 				/* Try again one minute later */
   2.173 +@@ -286,7 +289,7 @@ void __init time_init(void)
   2.174 + 	unsigned old_stamp, stamp, elapsed;
   2.175 + 
   2.176 +         if (ppc_md.time_init != NULL)
   2.177 +-                time_offset = ppc_md.time_init();
   2.178 ++                timezone_offset = ppc_md.time_init();
   2.179 + 
   2.180 + 	if (__USE_RTC()) {
   2.181 + 		/* 601 processor: dec counts down by 128 every 128ns */
   2.182 +@@ -331,10 +334,10 @@ void __init time_init(void)
   2.183 + 	set_dec(tb_ticks_per_jiffy);
   2.184 + 
   2.185 + 	/* If platform provided a timezone (pmac), we correct the time */
   2.186 +-        if (time_offset) {
   2.187 +-		sys_tz.tz_minuteswest = -time_offset / 60;
   2.188 ++        if (timezone_offset) {
   2.189 ++		sys_tz.tz_minuteswest = -timezone_offset / 60;
   2.190 + 		sys_tz.tz_dsttime = 0;
   2.191 +-		xtime.tv_sec -= time_offset;
   2.192 ++		xtime.tv_sec -= timezone_offset;
   2.193 +         }
   2.194 +         set_normalized_timespec(&wall_to_monotonic,
   2.195 +                                 -xtime.tv_sec, -xtime.tv_nsec);
   2.196 +diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
   2.197 +--- a/arch/ppc64/boot/zlib.c
   2.198 ++++ b/arch/ppc64/boot/zlib.c
   2.199 +@@ -1307,7 +1307,7 @@ local int huft_build(
   2.200 +   {
   2.201 +     *t = (inflate_huft *)Z_NULL;
   2.202 +     *m = 0;
   2.203 +-    return Z_OK;
   2.204 ++    return Z_DATA_ERROR;
   2.205 +   }
   2.206 + 
   2.207 + 
   2.208 +@@ -1351,6 +1351,7 @@ local int huft_build(
   2.209 +     if ((j = *p++) != 0)
   2.210 +       v[x[j]++] = i;
   2.211 +   } while (++i < n);
   2.212 ++  n = x[g];			/* set n to length of v */
   2.213 + 
   2.214 + 
   2.215 +   /* Generate the Huffman codes and for each, make the table entries */
   2.216 +diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
   2.217 +--- a/arch/um/kernel/process.c
   2.218 ++++ b/arch/um/kernel/process.c
   2.219 +@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
   2.220 + 	return(arg.pid);
   2.221 + }
   2.222 + 
   2.223 +-static int ptrace_child(void)
   2.224 ++static int ptrace_child(void *arg)
   2.225 + {
   2.226 + 	int ret;
   2.227 + 	int pid = os_getpid(), ppid = getppid();
   2.228 +@@ -159,16 +159,20 @@ static int ptrace_child(void)
   2.229 + 	_exit(ret);
   2.230 + }
   2.231 + 
   2.232 +-static int start_ptraced_child(void)
   2.233 ++static int start_ptraced_child(void **stack_out)
   2.234 + {
   2.235 ++	void *stack;
   2.236 ++	unsigned long sp;
   2.237 + 	int pid, n, status;
   2.238 + 	
   2.239 +-	pid = fork();
   2.240 +-	if(pid == 0)
   2.241 +-		ptrace_child();
   2.242 +-
   2.243 ++	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
   2.244 ++		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   2.245 ++	if(stack == MAP_FAILED)
   2.246 ++		panic("check_ptrace : mmap failed, errno = %d", errno);
   2.247 ++	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
   2.248 ++	pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
   2.249 + 	if(pid < 0)
   2.250 +-		panic("check_ptrace : fork failed, errno = %d", errno);
   2.251 ++		panic("check_ptrace : clone failed, errno = %d", errno);
   2.252 + 	CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
   2.253 + 	if(n < 0)
   2.254 + 		panic("check_ptrace : wait failed, errno = %d", errno);
   2.255 +@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
   2.256 + 		panic("check_ptrace : expected SIGSTOP, got status = %d",
   2.257 + 		      status);
   2.258 + 
   2.259 ++	*stack_out = stack;
   2.260 + 	return(pid);
   2.261 + }
   2.262 + 
   2.263 +@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
   2.264 +  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
   2.265 +  * So only for SYSEMU features we test mustpanic, while normal host features
   2.266 +  * must work anyway!*/
   2.267 +-static int stop_ptraced_child(int pid, int exitcode, int mustexit)
   2.268 ++static int stop_ptraced_child(int pid, void *stack, int exitcode, int mustpanic)
   2.269 + {
   2.270 + 	int status, n, ret = 0;
   2.271 + 
   2.272 + 	if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
   2.273 +-		panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
   2.274 ++		panic("check_ptrace : ptrace failed, errno = %d", errno);
   2.275 + 	CATCH_EINTR(n = waitpid(pid, &status, 0));
   2.276 + 	if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
   2.277 + 		int exit_with = WEXITSTATUS(status);
   2.278 +@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
   2.279 + 		printk("check_ptrace : child exited with exitcode %d, while "
   2.280 + 		      "expecting %d; status 0x%x", exit_with,
   2.281 + 		      exitcode, status);
   2.282 +-		if (mustexit)
   2.283 ++		if (mustpanic)
   2.284 + 			panic("\n");
   2.285 + 		else
   2.286 + 			printk("\n");
   2.287 + 		ret = -1;
   2.288 + 	}
   2.289 + 
   2.290 ++	if(munmap(stack, PAGE_SIZE) < 0)
   2.291 ++		panic("check_ptrace : munmap failed, errno = %d", errno);
   2.292 + 	return ret;
   2.293 + }
   2.294 + 
   2.295 +@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
   2.296 + 
   2.297 + static void __init check_sysemu(void)
   2.298 + {
   2.299 ++	void *stack;
   2.300 + 	int pid, syscall, n, status, count=0;
   2.301 + 
   2.302 + 	printk("Checking syscall emulation patch for ptrace...");
   2.303 + 	sysemu_supported = 0;
   2.304 +-	pid = start_ptraced_child();
   2.305 ++	pid = start_ptraced_child(&stack);
   2.306 + 
   2.307 + 	if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
   2.308 + 		goto fail;
   2.309 +@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
   2.310 + 		panic("check_sysemu : failed to modify system "
   2.311 + 		      "call return, errno = %d", errno);
   2.312 + 
   2.313 +-	if (stop_ptraced_child(pid, 0, 0) < 0)
   2.314 ++	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
   2.315 + 		goto fail_stopped;
   2.316 + 
   2.317 + 	sysemu_supported = 1;
   2.318 +@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
   2.319 + 	set_using_sysemu(!force_sysemu_disabled);
   2.320 + 
   2.321 + 	printk("Checking advanced syscall emulation patch for ptrace...");
   2.322 +-	pid = start_ptraced_child();
   2.323 ++	pid = start_ptraced_child(&stack);
   2.324 + 	while(1){
   2.325 + 		count++;
   2.326 + 		if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
   2.327 +@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
   2.328 + 			break;
   2.329 + 		}
   2.330 + 	}
   2.331 +-	if (stop_ptraced_child(pid, 0, 0) < 0)
   2.332 ++	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
   2.333 + 		goto fail_stopped;
   2.334 + 
   2.335 + 	sysemu_supported = 2;
   2.336 +@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
   2.337 + 	return;
   2.338 + 
   2.339 + fail:
   2.340 +-	stop_ptraced_child(pid, 1, 0);
   2.341 ++	stop_ptraced_child(pid, stack, 1, 0);
   2.342 + fail_stopped:
   2.343 + 	printk("missing\n");
   2.344 + }
   2.345 + 
   2.346 + void __init check_ptrace(void)
   2.347 + {
   2.348 ++	void *stack;
   2.349 + 	int pid, syscall, n, status;
   2.350 + 
   2.351 + 	printk("Checking that ptrace can change system call numbers...");
   2.352 +-	pid = start_ptraced_child();
   2.353 ++	pid = start_ptraced_child(&stack);
   2.354 + 
   2.355 + 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) < 0)
   2.356 + 		panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", errno);
   2.357 +@@ -330,7 +339,7 @@ void __init check_ptrace(void)
   2.358 + 			break;
   2.359 + 		}
   2.360 + 	}
   2.361 +-	stop_ptraced_child(pid, 0, 1);
   2.362 ++	stop_ptraced_child(pid, stack, 0, 1);
   2.363 + 	printk("OK\n");
   2.364 + 	check_sysemu();
   2.365 + }
   2.366 +@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
   2.367 + static inline int check_skas3_ptrace_support(void)
   2.368 + {
   2.369 + 	struct ptrace_faultinfo fi;
   2.370 ++	void *stack;
   2.371 + 	int pid, n, ret = 1;
   2.372 + 
   2.373 + 	printf("Checking for the skas3 patch in the host...");
   2.374 +-	pid = start_ptraced_child();
   2.375 ++	pid = start_ptraced_child(&stack);
   2.376 + 
   2.377 + 	n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
   2.378 + 	if (n < 0) {
   2.379 +@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
   2.380 + 	}
   2.381 + 
   2.382 + 	init_registers(pid);
   2.383 +-	stop_ptraced_child(pid, 1, 1);
   2.384 ++	stop_ptraced_child(pid, stack, 1, 1);
   2.385 + 
   2.386 + 	return(ret);
   2.387 + }
   2.388 +diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
   2.389 +--- a/arch/x86_64/ia32/syscall32.c
   2.390 ++++ b/arch/x86_64/ia32/syscall32.c
   2.391 +@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
   2.392 + 	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
   2.393 + 	struct vm_area_struct *vma;
   2.394 + 	struct mm_struct *mm = current->mm;
   2.395 ++	int ret;
   2.396 + 
   2.397 + 	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
   2.398 + 	if (!vma)
   2.399 +@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
   2.400 + 	vma->vm_mm = mm;
   2.401 + 
   2.402 + 	down_write(&mm->mmap_sem);
   2.403 +-	insert_vm_struct(mm, vma);
   2.404 ++	if ((ret = insert_vm_struct(mm, vma))) {
   2.405 ++		up_write(&mm->mmap_sem);
   2.406 ++		kmem_cache_free(vm_area_cachep, vma);
   2.407 ++		return ret;
   2.408 ++	}
   2.409 + 	mm->total_vm += npages;
   2.410 + 	up_write(&mm->mmap_sem);
   2.411 + 	return 0;
   2.412 +diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
   2.413 +--- a/arch/x86_64/kernel/setup.c
   2.414 ++++ b/arch/x86_64/kernel/setup.c
   2.415 +@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
   2.416 + 	int cpu = smp_processor_id();
   2.417 + 	int node = 0;
   2.418 + 	unsigned bits;
   2.419 +-	if (c->x86_num_cores == 1)
   2.420 +-		return;
   2.421 + 
   2.422 + 	bits = 0;
   2.423 + 	while ((1 << bits) < c->x86_num_cores)
   2.424 +diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
   2.425 +--- a/arch/x86_64/kernel/smp.c
   2.426 ++++ b/arch/x86_64/kernel/smp.c
   2.427 +@@ -284,6 +284,71 @@ struct call_data_struct {
   2.428 + static struct call_data_struct * call_data;
   2.429 + 
   2.430 + /*
   2.431 ++ * this function sends a 'generic call function' IPI to one other CPU
   2.432 ++ * in the system.
   2.433 ++ */
   2.434 ++static void __smp_call_function_single (int cpu, void (*func) (void *info), void *info,
   2.435 ++				int nonatomic, int wait)
   2.436 ++{
   2.437 ++	struct call_data_struct data;
   2.438 ++	int cpus = 1;
   2.439 ++
   2.440 ++	data.func = func;
   2.441 ++	data.info = info;
   2.442 ++	atomic_set(&data.started, 0);
   2.443 ++	data.wait = wait;
   2.444 ++	if (wait)
   2.445 ++		atomic_set(&data.finished, 0);
   2.446 ++
   2.447 ++	call_data = &data;
   2.448 ++	wmb();
   2.449 ++	/* Send a message to all other CPUs and wait for them to respond */
   2.450 ++	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
   2.451 ++
   2.452 ++	/* Wait for response */
   2.453 ++	while (atomic_read(&data.started) != cpus)
   2.454 ++		cpu_relax();
   2.455 ++
   2.456 ++	if (!wait)
   2.457 ++		return;
   2.458 ++
   2.459 ++	while (atomic_read(&data.finished) != cpus)
   2.460 ++		cpu_relax();
   2.461 ++}
   2.462 ++
   2.463 ++/*
   2.464 ++ * Run a function on another CPU
   2.465 ++ *  <func>	The function to run. This must be fast and non-blocking.
   2.466 ++ *  <info>	An arbitrary pointer to pass to the function.
   2.467 ++ *  <nonatomic>	Currently unused.
   2.468 ++ *  <wait>	If true, wait until function has completed on other CPUs.
   2.469 ++ *  [RETURNS]   0 on success, else a negative status code.
   2.470 ++ *
   2.471 ++ * Does not return until the remote CPU is nearly ready to execute <func>
   2.472 ++ * or is or has executed.
   2.473 ++ */
   2.474 ++
   2.475 ++int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
   2.476 ++	int nonatomic, int wait)
   2.477 ++{
   2.478 ++	
   2.479 ++	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
   2.480 ++
   2.481 ++	if (cpu == me) {
   2.482 ++		printk("%s: trying to call self\n", __func__);
   2.483 ++		put_cpu();
   2.484 ++		return -EBUSY;
   2.485 ++	}
   2.486 ++	spin_lock_bh(&call_lock);
   2.487 ++
   2.488 ++	__smp_call_function_single(cpu, func,info,nonatomic,wait);	
   2.489 ++
   2.490 ++	spin_unlock_bh(&call_lock);
   2.491 ++	put_cpu();
   2.492 ++	return 0;
   2.493 ++}
   2.494 ++
   2.495 ++/*
   2.496 +  * this function sends a 'generic call function' IPI to all other CPUs
   2.497 +  * in the system.
   2.498 +  */
   2.499 +diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
   2.500 +--- a/arch/x86_64/kernel/smpboot.c
   2.501 ++++ b/arch/x86_64/kernel/smpboot.c
   2.502 +@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
   2.503 + {
   2.504 + 	unsigned long flags, i;
   2.505 + 
   2.506 +-	if (smp_processor_id() != boot_cpu_id)
   2.507 +-		return;
   2.508 +-
   2.509 + 	go[MASTER] = 0;
   2.510 + 
   2.511 + 	local_irq_save(flags);
   2.512 +@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
   2.513 + 	return tcenter - best_tm;
   2.514 + }
   2.515 + 
   2.516 +-static __cpuinit void sync_tsc(void)
   2.517 ++static __cpuinit void sync_tsc(unsigned int master)
   2.518 + {
   2.519 + 	int i, done = 0;
   2.520 + 	long delta, adj, adjust_latency = 0;
   2.521 +@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
   2.522 + 	} t[NUM_ROUNDS] __cpuinitdata;
   2.523 + #endif
   2.524 + 
   2.525 ++	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
   2.526 ++		smp_processor_id(), master);
   2.527 ++
   2.528 + 	go[MASTER] = 1;
   2.529 + 
   2.530 +-	smp_call_function(sync_master, NULL, 1, 0);
   2.531 ++	/* It is dangerous to broadcast IPI as cpus are coming up,
   2.532 ++	 * as they may not be ready to accept them.  So since
   2.533 ++	 * we only need to send the ipi to the boot cpu direct
   2.534 ++	 * the message, and avoid the race.
   2.535 ++	 */
   2.536 ++	smp_call_function_single(master, sync_master, NULL, 1, 0);
   2.537 + 
   2.538 + 	while (go[MASTER])	/* wait for master to be ready */
   2.539 + 		no_cpu_relax();
   2.540 +@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
   2.541 + 	printk(KERN_INFO
   2.542 + 	       "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
   2.543 + 	       "maxerr %lu cycles)\n",
   2.544 +-	       smp_processor_id(), boot_cpu_id, delta, rt);
   2.545 ++	       smp_processor_id(), master, delta, rt);
   2.546 + }
   2.547 + 
   2.548 + static void __cpuinit tsc_sync_wait(void)
   2.549 + {
   2.550 + 	if (notscsync || !cpu_has_tsc)
   2.551 + 		return;
   2.552 +-	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
   2.553 +-			boot_cpu_id);
   2.554 +-	sync_tsc();
   2.555 ++	sync_tsc(0);
   2.556 + }
   2.557 + 
   2.558 + static __init int notscsync_setup(char *s)
   2.559 +diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
   2.560 +--- a/drivers/acpi/pci_irq.c
   2.561 ++++ b/drivers/acpi/pci_irq.c
   2.562 +@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
   2.563 + 		printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
   2.564 + 			pci_name(dev), ('A' + pin));
   2.565 + 		/* Interrupt Line values above 0xF are forbidden */
   2.566 +-		if (dev->irq >= 0 && (dev->irq <= 0xF)) {
   2.567 ++		if (dev->irq > 0 && (dev->irq <= 0xF)) {
   2.568 + 			printk(" - using IRQ %d\n", dev->irq);
   2.569 ++			acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
   2.570 + 			return_VALUE(0);
   2.571 + 		}
   2.572 + 		else {
   2.573 +diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
   2.574 +--- a/drivers/char/rocket.c
   2.575 ++++ b/drivers/char/rocket.c
   2.576 +@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
   2.577 + 		ToRecv = space;
   2.578 + 
   2.579 + 	if (ToRecv <= 0)
   2.580 +-		return;
   2.581 ++		goto done;
   2.582 + 
   2.583 + 	/*
   2.584 + 	 * if status indicates there are errored characters in the
   2.585 +@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
   2.586 + 	}
   2.587 + 	/*  Push the data up to the tty layer */
   2.588 + 	ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
   2.589 ++done:
   2.590 + 	tty_ldisc_deref(ld);
   2.591 + }
   2.592 + 
   2.593 +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
   2.594 +--- a/drivers/char/tpm/tpm.c
   2.595 ++++ b/drivers/char/tpm/tpm.c
   2.596 +@@ -32,12 +32,6 @@
   2.597 + 
   2.598 + #define	TPM_BUFSIZE			2048
   2.599 + 
   2.600 +-/* PCI configuration addresses */
   2.601 +-#define	PCI_GEN_PMCON_1			0xA0
   2.602 +-#define	PCI_GEN1_DEC			0xE4
   2.603 +-#define	PCI_LPC_EN			0xE6
   2.604 +-#define	PCI_GEN2_DEC			0xEC
   2.605 +-
   2.606 + static LIST_HEAD(tpm_chip_list);
   2.607 + static DEFINE_SPINLOCK(driver_lock);
   2.608 + static int dev_mask[32];
   2.609 +@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
   2.610 + EXPORT_SYMBOL_GPL(tpm_time_expired);
   2.611 + 
   2.612 + /*
   2.613 +- * Initialize the LPC bus and enable the TPM ports
   2.614 +- */
   2.615 +-int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
   2.616 +-{
   2.617 +-	u32 lpcenable, tmp;
   2.618 +-	int is_lpcm = 0;
   2.619 +-
   2.620 +-	switch (pci_dev->vendor) {
   2.621 +-	case PCI_VENDOR_ID_INTEL:
   2.622 +-		switch (pci_dev->device) {
   2.623 +-		case PCI_DEVICE_ID_INTEL_82801CA_12:
   2.624 +-		case PCI_DEVICE_ID_INTEL_82801DB_12:
   2.625 +-			is_lpcm = 1;
   2.626 +-			break;
   2.627 +-		}
   2.628 +-		/* init ICH (enable LPC) */
   2.629 +-		pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
   2.630 +-		lpcenable |= 0x20000000;
   2.631 +-		pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
   2.632 +-
   2.633 +-		if (is_lpcm) {
   2.634 +-			pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
   2.635 +-					      &lpcenable);
   2.636 +-			if ((lpcenable & 0x20000000) == 0) {
   2.637 +-				dev_err(&pci_dev->dev,
   2.638 +-					"cannot enable LPC\n");
   2.639 +-				return -ENODEV;
   2.640 +-			}
   2.641 +-		}
   2.642 +-
   2.643 +-		/* initialize TPM registers */
   2.644 +-		pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
   2.645 +-
   2.646 +-		if (!is_lpcm)
   2.647 +-			tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
   2.648 +-		else
   2.649 +-			tmp =
   2.650 +-			    (tmp & 0xFFFF0000) | (base & 0xFFF0) |
   2.651 +-			    0x00000001;
   2.652 +-
   2.653 +-		pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
   2.654 +-
   2.655 +-		if (is_lpcm) {
   2.656 +-			pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
   2.657 +-					      &tmp);
   2.658 +-			tmp |= 0x00000004;	/* enable CLKRUN */
   2.659 +-			pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
   2.660 +-					       tmp);
   2.661 +-		}
   2.662 +-		tpm_write_index(0x0D, 0x55);	/* unlock 4F */
   2.663 +-		tpm_write_index(0x0A, 0x00);	/* int disable */
   2.664 +-		tpm_write_index(0x08, base);	/* base addr lo */
   2.665 +-		tpm_write_index(0x09, (base & 0xFF00) >> 8);	/* base addr hi */
   2.666 +-		tpm_write_index(0x0D, 0xAA);	/* lock 4F */
   2.667 +-		break;
   2.668 +-	case PCI_VENDOR_ID_AMD:
   2.669 +-		/* nothing yet */
   2.670 +-		break;
   2.671 +-	}
   2.672 +-
   2.673 +-	return 0;
   2.674 +-}
   2.675 +-
   2.676 +-EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
   2.677 +-
   2.678 +-/*
   2.679 +  * Internal kernel interface to transmit TPM commands
   2.680 +  */
   2.681 + static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
   2.682 +@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
   2.683 + 	if (chip == NULL)
   2.684 + 		return -ENODEV;
   2.685 + 
   2.686 +-	spin_lock(&driver_lock);
   2.687 +-	tpm_lpc_bus_init(pci_dev, chip->vendor->base);
   2.688 +-	spin_unlock(&driver_lock);
   2.689 +-
   2.690 + 	return 0;
   2.691 + }
   2.692 + 
   2.693 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
   2.694 +--- a/drivers/char/tpm/tpm.h
   2.695 ++++ b/drivers/char/tpm/tpm.h
   2.696 +@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
   2.697 + }
   2.698 + 
   2.699 + extern void tpm_time_expired(unsigned long);
   2.700 +-extern int tpm_lpc_bus_init(struct pci_dev *, u16);
   2.701 +-
   2.702 + extern int tpm_register_hardware(struct pci_dev *,
   2.703 + 				 struct tpm_vendor_specific *);
   2.704 + extern int tpm_open(struct inode *, struct file *);
   2.705 +diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
   2.706 +--- a/drivers/char/tpm/tpm_atmel.c
   2.707 ++++ b/drivers/char/tpm/tpm_atmel.c
   2.708 +@@ -22,7 +22,10 @@
   2.709 + #include "tpm.h"
   2.710 + 
   2.711 + /* Atmel definitions */
   2.712 +-#define	TPM_ATML_BASE			0x400
   2.713 ++enum tpm_atmel_addr {
   2.714 ++	TPM_ATMEL_BASE_ADDR_LO = 0x08,
   2.715 ++	TPM_ATMEL_BASE_ADDR_HI = 0x09
   2.716 ++};
   2.717 + 
   2.718 + /* write status bits */
   2.719 + #define	ATML_STATUS_ABORT		0x01
   2.720 +@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
   2.721 + 	.cancel = tpm_atml_cancel,
   2.722 + 	.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
   2.723 + 	.req_complete_val = ATML_STATUS_DATA_AVAIL,
   2.724 +-	.base = TPM_ATML_BASE,
   2.725 + 	.miscdev = { .fops = &atmel_ops, },
   2.726 + };
   2.727 + 
   2.728 +@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
   2.729 + {
   2.730 + 	u8 version[4];
   2.731 + 	int rc = 0;
   2.732 ++	int lo, hi;
   2.733 + 
   2.734 + 	if (pci_enable_device(pci_dev))
   2.735 + 		return -EIO;
   2.736 + 
   2.737 +-	if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
   2.738 +-		rc = -ENODEV;
   2.739 +-		goto out_err;
   2.740 +-	}
   2.741 ++	lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
   2.742 ++	hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
   2.743 ++
   2.744 ++	tpm_atmel.base = (hi<<8)|lo;
   2.745 ++	dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
   2.746 + 
   2.747 + 	/* verify that it is an Atmel part */
   2.748 + 	if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
   2.749 +diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
   2.750 +--- a/drivers/char/tpm/tpm_nsc.c
   2.751 ++++ b/drivers/char/tpm/tpm_nsc.c
   2.752 +@@ -24,6 +24,10 @@
   2.753 + /* National definitions */
   2.754 + #define	TPM_NSC_BASE			0x360
   2.755 + #define	TPM_NSC_IRQ			0x07
   2.756 ++#define	TPM_NSC_BASE0_HI		0x60
   2.757 ++#define	TPM_NSC_BASE0_LO		0x61
   2.758 ++#define	TPM_NSC_BASE1_HI		0x62
   2.759 ++#define	TPM_NSC_BASE1_LO		0x63
   2.760 + 
   2.761 + #define	NSC_LDN_INDEX			0x07
   2.762 + #define	NSC_SID_INDEX			0x20
   2.763 +@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
   2.764 + 	.cancel = tpm_nsc_cancel,
   2.765 + 	.req_complete_mask = NSC_STATUS_OBF,
   2.766 + 	.req_complete_val = NSC_STATUS_OBF,
   2.767 +-	.base = TPM_NSC_BASE,
   2.768 + 	.miscdev = { .fops = &nsc_ops, },
   2.769 + 	
   2.770 + };
   2.771 +@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
   2.772 + 				  const struct pci_device_id *pci_id)
   2.773 + {
   2.774 + 	int rc = 0;
   2.775 ++	int lo, hi;
   2.776 ++
   2.777 ++	hi = tpm_read_index(TPM_NSC_BASE0_HI);
   2.778 ++	lo = tpm_read_index(TPM_NSC_BASE0_LO);
   2.779 ++
   2.780 ++	tpm_nsc.base = (hi<<8) | lo;
   2.781 + 
   2.782 + 	if (pci_enable_device(pci_dev))
   2.783 + 		return -EIO;
   2.784 + 
   2.785 +-	if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
   2.786 +-		rc = -ENODEV;
   2.787 +-		goto out_err;
   2.788 +-	}
   2.789 +-
   2.790 + 	/* verify that it is a National part (SID) */
   2.791 + 	if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
   2.792 + 		rc = -ENODEV;
   2.793 +diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
   2.794 +--- a/drivers/char/tty_ioctl.c
   2.795 ++++ b/drivers/char/tty_ioctl.c
   2.796 +@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
   2.797 + 			ld = tty_ldisc_ref(tty);
   2.798 + 			switch (arg) {
   2.799 + 			case TCIFLUSH:
   2.800 +-				if (ld->flush_buffer)
   2.801 ++				if (ld && ld->flush_buffer)
   2.802 + 					ld->flush_buffer(tty);
   2.803 + 				break;
   2.804 + 			case TCIOFLUSH:
   2.805 +-				if (ld->flush_buffer)
   2.806 ++				if (ld && ld->flush_buffer)
   2.807 + 					ld->flush_buffer(tty);
   2.808 + 				/* fall through */
   2.809 + 			case TCOFLUSH:
   2.810 +diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
   2.811 +--- a/drivers/media/video/cx88/cx88-video.c
   2.812 ++++ b/drivers/media/video/cx88/cx88-video.c
   2.813 +@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
   2.814 + 			.default_value = 0,
   2.815 + 			.type          = V4L2_CTRL_TYPE_INTEGER,
   2.816 + 		},
   2.817 +-		.off                   = 0,
   2.818 ++		.off                   = 128,
   2.819 + 		.reg                   = MO_HUE,
   2.820 + 		.mask                  = 0x00ff,
   2.821 + 		.shift                 = 0,
   2.822 +diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
   2.823 +--- a/drivers/net/e1000/e1000_main.c
   2.824 ++++ b/drivers/net/e1000/e1000_main.c
   2.825 +@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
   2.826 + 	tso = e1000_tso(adapter, skb);
   2.827 + 	if (tso < 0) {
   2.828 + 		dev_kfree_skb_any(skb);
   2.829 ++		spin_unlock_irqrestore(&adapter->tx_lock, flags);
   2.830 + 		return NETDEV_TX_OK;
   2.831 + 	}
   2.832 + 
   2.833 +diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
   2.834 +--- a/drivers/net/hamradio/Kconfig
   2.835 ++++ b/drivers/net/hamradio/Kconfig
   2.836 +@@ -17,7 +17,7 @@ config MKISS
   2.837 + 
   2.838 + config 6PACK
   2.839 + 	tristate "Serial port 6PACK driver"
   2.840 +-	depends on AX25 && BROKEN_ON_SMP
   2.841 ++	depends on AX25
   2.842 + 	---help---
   2.843 + 	  6pack is a transmission protocol for the data exchange between your
   2.844 + 	  PC and your TNC (the Terminal Node Controller acts as a kind of
   2.845 +diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
   2.846 +--- a/drivers/net/shaper.c
   2.847 ++++ b/drivers/net/shaper.c
   2.848 +@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
   2.849 + {
   2.850 + 	struct shaper *shaper = dev->priv;
   2.851 +  	struct sk_buff *ptr;
   2.852 +-   
   2.853 +-	if (down_trylock(&shaper->sem))
   2.854 +-		return -1;
   2.855 + 
   2.856 ++	spin_lock(&shaper->lock);
   2.857 +  	ptr=shaper->sendq.prev;
   2.858 +  	
   2.859 +  	/*
   2.860 +@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
   2.861 +                 shaper->stats.collisions++;
   2.862 +  	}
   2.863 + 	shaper_kick(shaper);
   2.864 +-	up(&shaper->sem);
   2.865 ++	spin_unlock(&shaper->lock);
   2.866 +  	return 0;
   2.867 + }
   2.868 + 
   2.869 +@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
   2.870 + {
   2.871 + 	struct shaper *shaper = (struct shaper *)data;
   2.872 + 
   2.873 +-	if (!down_trylock(&shaper->sem)) {
   2.874 +-		shaper_kick(shaper);
   2.875 +-		up(&shaper->sem);
   2.876 +-	} else
   2.877 +-		mod_timer(&shaper->timer, jiffies);
   2.878 ++	spin_lock(&shaper->lock);
   2.879 ++	shaper_kick(shaper);
   2.880 ++	spin_unlock(&shaper->lock);
   2.881 + }
   2.882 + 
   2.883 + /*
   2.884 +@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
   2.885 + 
   2.886 + 
   2.887 + /*
   2.888 +- *	Flush the shaper queues on a closedown
   2.889 +- */
   2.890 +- 
   2.891 +-static void shaper_flush(struct shaper *shaper)
   2.892 +-{
   2.893 +-	struct sk_buff *skb;
   2.894 +-
   2.895 +-	down(&shaper->sem);
   2.896 +-	while((skb=skb_dequeue(&shaper->sendq))!=NULL)
   2.897 +-		dev_kfree_skb(skb);
   2.898 +-	shaper_kick(shaper);
   2.899 +-	up(&shaper->sem);
   2.900 +-}
   2.901 +-
   2.902 +-/*
   2.903 +  *	Bring the interface up. We just disallow this until a 
   2.904 +  *	bind.
   2.905 +  */
   2.906 +@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
   2.907 + static int shaper_close(struct net_device *dev)
   2.908 + {
   2.909 + 	struct shaper *shaper=dev->priv;
   2.910 +-	shaper_flush(shaper);
   2.911 ++	struct sk_buff *skb;
   2.912 ++
   2.913 ++	while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
   2.914 ++		dev_kfree_skb(skb);
   2.915 ++
   2.916 ++	spin_lock_bh(&shaper->lock);
   2.917 ++	shaper_kick(shaper);
   2.918 ++	spin_unlock_bh(&shaper->lock);
   2.919 ++
   2.920 + 	del_timer_sync(&shaper->timer);
   2.921 + 	return 0;
   2.922 + }
   2.923 +@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
   2.924 + 	init_timer(&sh->timer);
   2.925 + 	sh->timer.function=shaper_timer;
   2.926 + 	sh->timer.data=(unsigned long)sh;
   2.927 ++	spin_lock_init(&sh->lock);
   2.928 + }
   2.929 + 
   2.930 + /*
   2.931 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
   2.932 +--- a/drivers/pci/pci-driver.c
   2.933 ++++ b/drivers/pci/pci-driver.c
   2.934 +@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
   2.935 + 	/* FIXME, once all of the existing PCI drivers have been fixed to set
   2.936 + 	 * the pci shutdown function, this test can go away. */
   2.937 + 	if (!drv->driver.shutdown)
   2.938 +-		drv->driver.shutdown = pci_device_shutdown,
   2.939 ++		drv->driver.shutdown = pci_device_shutdown;
   2.940 + 	drv->driver.owner = drv->owner;
   2.941 + 	drv->driver.kobj.ktype = &pci_driver_kobj_type;
   2.942 + 	pci_init_dynids(&drv->dynids);
   2.943 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
   2.944 +--- a/drivers/scsi/qla2xxx/qla_init.c
   2.945 ++++ b/drivers/scsi/qla2xxx/qla_init.c
   2.946 +@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
   2.947 + 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
   2.948 + 
   2.949 + 	fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
   2.950 +-	if (!rport)
   2.951 ++	if (!rport) {
   2.952 + 		qla_printk(KERN_WARNING, ha,
   2.953 + 		    "Unable to allocate fc remote port!\n");
   2.954 ++		return;
   2.955 ++	}
   2.956 + 
   2.957 + 	if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
   2.958 + 		fcport->os_target_id = rport->scsi_target_id;
   2.959 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
   2.960 +--- a/drivers/scsi/qla2xxx/qla_os.c
   2.961 ++++ b/drivers/scsi/qla2xxx/qla_os.c
   2.962 +@@ -1150,7 +1150,7 @@ iospace_error_exit:
   2.963 +  */
   2.964 + int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
   2.965 + {
   2.966 +-	int	ret;
   2.967 ++	int	ret = -ENODEV;
   2.968 + 	device_reg_t __iomem *reg;
   2.969 + 	struct Scsi_Host *host;
   2.970 + 	scsi_qla_host_t *ha;
   2.971 +@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
   2.972 + 	fc_port_t *fcport;
   2.973 + 
   2.974 + 	if (pci_enable_device(pdev))
   2.975 +-		return -1;
   2.976 ++		goto probe_out;
   2.977 + 
   2.978 + 	host = scsi_host_alloc(&qla2x00_driver_template,
   2.979 + 	    sizeof(scsi_qla_host_t));
   2.980 +@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
   2.981 + 
   2.982 + 	/* Configure PCI I/O space */
   2.983 + 	ret = qla2x00_iospace_config(ha);
   2.984 +-	if (ret != 0) {
   2.985 +-		goto probe_alloc_failed;
   2.986 +-	}
   2.987 ++	if (ret)
   2.988 ++		goto probe_failed;
   2.989 + 
   2.990 + 	/* Sanitize the information from PCI BIOS. */
   2.991 + 	host->irq = pdev->irq;
   2.992 +@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
   2.993 + 		qla_printk(KERN_WARNING, ha,
   2.994 + 		    "[ERROR] Failed to allocate memory for adapter\n");
   2.995 + 
   2.996 +-		goto probe_alloc_failed;
   2.997 ++		ret = -ENOMEM;
   2.998 ++		goto probe_failed;
   2.999 + 	}
  2.1000 + 
  2.1001 +-	pci_set_drvdata(pdev, ha);
  2.1002 +-	host->this_id = 255;
  2.1003 +-	host->cmd_per_lun = 3;
  2.1004 +-	host->unique_id = ha->instance;
  2.1005 +-	host->max_cmd_len = MAX_CMDSZ;
  2.1006 +-	host->max_channel = ha->ports - 1;
  2.1007 +-	host->max_id = ha->max_targets;
  2.1008 +-	host->max_lun = ha->max_luns;
  2.1009 +-	host->transportt = qla2xxx_transport_template;
  2.1010 +-	if (scsi_add_host(host, &pdev->dev))
  2.1011 +-		goto probe_alloc_failed;
  2.1012 +-
  2.1013 +-	qla2x00_alloc_sysfs_attr(ha);
  2.1014 +-
  2.1015 + 	if (qla2x00_initialize_adapter(ha) &&
  2.1016 + 	    !(ha->device_flags & DFLG_NO_CABLE)) {
  2.1017 + 
  2.1018 +@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
  2.1019 + 		    "Adapter flags %x.\n",
  2.1020 + 		    ha->host_no, ha->device_flags));
  2.1021 + 
  2.1022 ++		ret = -ENODEV;
  2.1023 + 		goto probe_failed;
  2.1024 + 	}
  2.1025 + 
  2.1026 +-	qla2x00_init_host_attr(ha);
  2.1027 +-
  2.1028 + 	/*
  2.1029 + 	 * Startup the kernel thread for this host adapter
  2.1030 + 	 */
  2.1031 +@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
  2.1032 + 		qla_printk(KERN_WARNING, ha,
  2.1033 + 		    "Unable to start DPC thread!\n");
  2.1034 + 
  2.1035 ++		ret = -ENODEV;
  2.1036 + 		goto probe_failed;
  2.1037 + 	}
  2.1038 + 	wait_for_completion(&ha->dpc_inited);
  2.1039 + 
  2.1040 ++	host->this_id = 255;
  2.1041 ++	host->cmd_per_lun = 3;
  2.1042 ++	host->unique_id = ha->instance;
  2.1043 ++	host->max_cmd_len = MAX_CMDSZ;
  2.1044 ++	host->max_channel = ha->ports - 1;
  2.1045 ++	host->max_lun = MAX_LUNS;
  2.1046 ++	host->transportt = qla2xxx_transport_template;
  2.1047 ++
  2.1048 + 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
  2.1049 + 		ret = request_irq(host->irq, qla2100_intr_handler,
  2.1050 + 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
  2.1051 + 	else
  2.1052 + 		ret = request_irq(host->irq, qla2300_intr_handler,
  2.1053 + 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
  2.1054 +-	if (ret != 0) {
  2.1055 ++	if (ret) {
  2.1056 + 		qla_printk(KERN_WARNING, ha,
  2.1057 + 		    "Failed to reserve interrupt %d already in use.\n",
  2.1058 + 		    host->irq);
  2.1059 +@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
  2.1060 + 		msleep(10);
  2.1061 + 	}
  2.1062 + 
  2.1063 ++	pci_set_drvdata(pdev, ha);
  2.1064 + 	ha->flags.init_done = 1;
  2.1065 + 	num_hosts++;
  2.1066 + 
  2.1067 ++	ret = scsi_add_host(host, &pdev->dev);
  2.1068 ++	if (ret)
  2.1069 ++		goto probe_failed;
  2.1070 ++
  2.1071 ++	qla2x00_alloc_sysfs_attr(ha);
  2.1072 ++
  2.1073 ++	qla2x00_init_host_attr(ha);
  2.1074 ++
  2.1075 + 	qla_printk(KERN_INFO, ha, "\n"
  2.1076 + 	    " QLogic Fibre Channel HBA Driver: %s\n"
  2.1077 + 	    "  QLogic %s - %s\n"
  2.1078 +@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
  2.1079 + probe_failed:
  2.1080 + 	fc_remove_host(ha->host);
  2.1081 + 
  2.1082 +-	scsi_remove_host(host);
  2.1083 +-
  2.1084 +-probe_alloc_failed:
  2.1085 + 	qla2x00_free_device(ha);
  2.1086 + 
  2.1087 + 	scsi_host_put(host);
  2.1088 +@@ -1394,7 +1394,8 @@ probe_alloc_failed:
  2.1089 + probe_disable_device:
  2.1090 + 	pci_disable_device(pdev);
  2.1091 + 
  2.1092 +-	return -1;
  2.1093 ++probe_out:
  2.1094 ++	return ret;
  2.1095 + }
  2.1096 + EXPORT_SYMBOL_GPL(qla2x00_probe_one);
  2.1097 + 
  2.1098 +diff --git a/fs/bio.c b/fs/bio.c
  2.1099 +--- a/fs/bio.c
  2.1100 ++++ b/fs/bio.c
  2.1101 +@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
  2.1102 + 	 */
  2.1103 + 	bio->bi_vcnt = bio_src->bi_vcnt;
  2.1104 + 	bio->bi_size = bio_src->bi_size;
  2.1105 ++	bio->bi_idx = bio_src->bi_idx;
  2.1106 + 	bio_phys_segments(q, bio);
  2.1107 + 	bio_hw_segments(q, bio);
  2.1108 + }
  2.1109 +diff --git a/fs/char_dev.c b/fs/char_dev.c
  2.1110 +--- a/fs/char_dev.c
  2.1111 ++++ b/fs/char_dev.c
  2.1112 +@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
  2.1113 + 	struct char_device_struct *cd = NULL, **cp;
  2.1114 + 	int i = major_to_index(major);
  2.1115 + 
  2.1116 +-	up(&chrdevs_lock);
  2.1117 ++	down(&chrdevs_lock);
  2.1118 + 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
  2.1119 + 		if ((*cp)->major == major &&
  2.1120 + 		    (*cp)->baseminor == baseminor &&
  2.1121 +diff --git a/fs/exec.c b/fs/exec.c
  2.1122 +--- a/fs/exec.c
  2.1123 ++++ b/fs/exec.c
  2.1124 +@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
  2.1125 + 	}
  2.1126 + 	sig->group_exit_task = NULL;
  2.1127 + 	sig->notify_count = 0;
  2.1128 ++	sig->real_timer.data = (unsigned long)current;
  2.1129 + 	spin_unlock_irq(lock);
  2.1130 + 
  2.1131 + 	/*
  2.1132 +diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
  2.1133 +--- a/fs/isofs/compress.c
  2.1134 ++++ b/fs/isofs/compress.c
  2.1135 +@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
  2.1136 + 	cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
  2.1137 + 	brelse(bh);
  2.1138 + 
  2.1139 ++	if (cstart > cend)
  2.1140 ++		goto eio;
  2.1141 ++		
  2.1142 + 	csize = cend-cstart;
  2.1143 + 
  2.1144 ++	if (csize > deflateBound(1UL << zisofs_block_shift))
  2.1145 ++		goto eio;
  2.1146 ++
  2.1147 + 	/* Now page[] contains an array of pages, any of which can be NULL,
  2.1148 + 	   and the locks on which we hold.  We should now read the data and
  2.1149 + 	   release the pages.  If the pages are NULL the decompressed data
  2.1150 +diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
  2.1151 +--- a/include/asm-i386/string.h
  2.1152 ++++ b/include/asm-i386/string.h
  2.1153 +@@ -116,7 +116,8 @@ __asm__ __volatile__(
  2.1154 + 	"orb $1,%%al\n"
  2.1155 + 	"3:"
  2.1156 + 	:"=a" (__res), "=&S" (d0), "=&D" (d1)
  2.1157 +-		     :"1" (cs),"2" (ct));
  2.1158 ++	:"1" (cs),"2" (ct)
  2.1159 ++	:"memory");
  2.1160 + return __res;
  2.1161 + }
  2.1162 + 
  2.1163 +@@ -138,8 +139,9 @@ __asm__ __volatile__(
  2.1164 + 	"3:\tsbbl %%eax,%%eax\n\t"
  2.1165 + 	"orb $1,%%al\n"
  2.1166 + 	"4:"
  2.1167 +-		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
  2.1168 +-		     :"1" (cs),"2" (ct),"3" (count));
  2.1169 ++	:"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
  2.1170 ++	:"1" (cs),"2" (ct),"3" (count)
  2.1171 ++	:"memory");
  2.1172 + return __res;
  2.1173 + }
  2.1174 + 
  2.1175 +@@ -158,7 +160,9 @@ __asm__ __volatile__(
  2.1176 + 	"movl $1,%1\n"
  2.1177 + 	"2:\tmovl %1,%0\n\t"
  2.1178 + 	"decl %0"
  2.1179 +-	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
  2.1180 ++	:"=a" (__res), "=&S" (d0)
  2.1181 ++	:"1" (s),"0" (c)
  2.1182 ++	:"memory");
  2.1183 + return __res;
  2.1184 + }
  2.1185 + 
  2.1186 +@@ -175,7 +179,9 @@ __asm__ __volatile__(
  2.1187 + 	"leal -1(%%esi),%0\n"
  2.1188 + 	"2:\ttestb %%al,%%al\n\t"
  2.1189 + 	"jne 1b"
  2.1190 +-	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
  2.1191 ++	:"=g" (__res), "=&S" (d0), "=&a" (d1)
  2.1192 ++	:"0" (0),"1" (s),"2" (c)
  2.1193 ++	:"memory");
  2.1194 + return __res;
  2.1195 + }
  2.1196 + 
  2.1197 +@@ -189,7 +195,9 @@ __asm__ __volatile__(
  2.1198 + 	"scasb\n\t"
  2.1199 + 	"notl %0\n\t"
  2.1200 + 	"decl %0"
  2.1201 +-	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
  2.1202 ++	:"=c" (__res), "=&D" (d0)
  2.1203 ++	:"1" (s),"a" (0), "0" (0xffffffffu)
  2.1204 ++	:"memory");
  2.1205 + return __res;
  2.1206 + }
  2.1207 + 
  2.1208 +@@ -333,7 +341,9 @@ __asm__ __volatile__(
  2.1209 + 	"je 1f\n\t"
  2.1210 + 	"movl $1,%0\n"
  2.1211 + 	"1:\tdecl %0"
  2.1212 +-	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
  2.1213 ++	:"=D" (__res), "=&c" (d0)
  2.1214 ++	:"a" (c),"0" (cs),"1" (count)
  2.1215 ++	:"memory");
  2.1216 + return __res;
  2.1217 + }
  2.1218 + 
  2.1219 +@@ -369,7 +379,7 @@ __asm__ __volatile__(
  2.1220 + 	"je 2f\n\t"
  2.1221 + 	"stosb\n"
  2.1222 + 	"2:"
  2.1223 +-	: "=&c" (d0), "=&D" (d1)
  2.1224 ++	:"=&c" (d0), "=&D" (d1)
  2.1225 + 	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
  2.1226 + 	:"memory");
  2.1227 + return (s);	
  2.1228 +@@ -392,7 +402,8 @@ __asm__ __volatile__(
  2.1229 + 	"jne 1b\n"
  2.1230 + 	"3:\tsubl %2,%0"
  2.1231 + 	:"=a" (__res), "=&d" (d0)
  2.1232 +-	:"c" (s),"1" (count));
  2.1233 ++	:"c" (s),"1" (count)
  2.1234 ++	:"memory");
  2.1235 + return __res;
  2.1236 + }
  2.1237 + /* end of additional stuff */
  2.1238 +@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
  2.1239 + 		"dec %%edi\n"
  2.1240 + 		"1:"
  2.1241 + 		: "=D" (addr), "=c" (size)
  2.1242 +-		: "0" (addr), "1" (size), "a" (c));
  2.1243 ++		: "0" (addr), "1" (size), "a" (c)
  2.1244 ++		: "memory");
  2.1245 + 	return addr;
  2.1246 + }
  2.1247 + 
  2.1248 +diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
  2.1249 +--- a/include/asm-x86_64/smp.h
  2.1250 ++++ b/include/asm-x86_64/smp.h
  2.1251 +@@ -46,6 +46,8 @@ extern int pic_mode;
  2.1252 + extern int smp_num_siblings;
  2.1253 + extern void smp_flush_tlb(void);
  2.1254 + extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
  2.1255 ++extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
  2.1256 ++				     int retry, int wait);
  2.1257 + extern void smp_send_reschedule(int cpu);
  2.1258 + extern void smp_invalidate_rcv(void);		/* Process an NMI */
  2.1259 + extern void zap_low_mappings(void);
  2.1260 +diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
  2.1261 +--- a/include/linux/if_shaper.h
  2.1262 ++++ b/include/linux/if_shaper.h
  2.1263 +@@ -23,7 +23,7 @@ struct shaper
  2.1264 + 	__u32 shapeclock;
  2.1265 + 	unsigned long recovery;	/* Time we can next clock a packet out on
  2.1266 + 				   an empty queue */
  2.1267 +-	struct semaphore sem;
  2.1268 ++	spinlock_t lock;
  2.1269 +         struct net_device_stats stats;
  2.1270 + 	struct net_device *dev;
  2.1271 + 	int  (*hard_start_xmit) (struct sk_buff *skb,
  2.1272 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
  2.1273 +--- a/include/linux/skbuff.h
  2.1274 ++++ b/include/linux/skbuff.h
  2.1275 +@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
  2.1276 + {
  2.1277 + 	int hlen = skb_headlen(skb);
  2.1278 + 
  2.1279 +-	if (offset + len <= hlen)
  2.1280 ++	if (hlen - offset >= len)
  2.1281 + 		return skb->data + offset;
  2.1282 + 
  2.1283 + 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
  2.1284 +diff --git a/include/linux/zlib.h b/include/linux/zlib.h
  2.1285 +--- a/include/linux/zlib.h
  2.1286 ++++ b/include/linux/zlib.h
  2.1287 +@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
  2.1288 +    stream state was inconsistent (such as zalloc or state being NULL).
  2.1289 + */
  2.1290 + 
  2.1291 ++static inline unsigned long deflateBound(unsigned long s)
  2.1292 ++{
  2.1293 ++	return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
  2.1294 ++}
  2.1295 ++
  2.1296 + extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
  2.1297 + /*
  2.1298 +      Dynamically update the compression level and compression strategy.  The
  2.1299 +diff --git a/kernel/module.c b/kernel/module.c
  2.1300 +--- a/kernel/module.c
  2.1301 ++++ b/kernel/module.c
  2.1302 +@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
  2.1303 + /* Created by linker magic */
  2.1304 + extern char __per_cpu_start[], __per_cpu_end[];
  2.1305 + 
  2.1306 +-static void *percpu_modalloc(unsigned long size, unsigned long align)
  2.1307 ++static void *percpu_modalloc(unsigned long size, unsigned long align,
  2.1308 ++			     const char *name)
  2.1309 + {
  2.1310 + 	unsigned long extra;
  2.1311 + 	unsigned int i;
  2.1312 + 	void *ptr;
  2.1313 + 
  2.1314 +-	BUG_ON(align > SMP_CACHE_BYTES);
  2.1315 ++	if (align > SMP_CACHE_BYTES) {
  2.1316 ++		printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
  2.1317 ++		       name, align, SMP_CACHE_BYTES);
  2.1318 ++		align = SMP_CACHE_BYTES;
  2.1319 ++	}
  2.1320 + 
  2.1321 + 	ptr = __per_cpu_start;
  2.1322 + 	for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
  2.1323 +@@ -347,7 +352,8 @@ static int percpu_modinit(void)
  2.1324 + }	
  2.1325 + __initcall(percpu_modinit);
  2.1326 + #else /* ... !CONFIG_SMP */
  2.1327 +-static inline void *percpu_modalloc(unsigned long size, unsigned long align)
  2.1328 ++static inline void *percpu_modalloc(unsigned long size, unsigned long align,
  2.1329 ++				    const char *name)
  2.1330 + {
  2.1331 + 	return NULL;
  2.1332 + }
  2.1333 +@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
  2.1334 + 	if (pcpuindex) {
  2.1335 + 		/* We have a special allocation for this section. */
  2.1336 + 		percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
  2.1337 +-					 sechdrs[pcpuindex].sh_addralign);
  2.1338 ++					 sechdrs[pcpuindex].sh_addralign,
  2.1339 ++					 mod->name);
  2.1340 + 		if (!percpu) {
  2.1341 + 			err = -ENOMEM;
  2.1342 + 			goto free_mod;
  2.1343 +diff --git a/lib/inflate.c b/lib/inflate.c
  2.1344 +--- a/lib/inflate.c
  2.1345 ++++ b/lib/inflate.c
  2.1346 +@@ -326,7 +326,7 @@ DEBG("huft1 ");
  2.1347 +   {
  2.1348 +     *t = (struct huft *)NULL;
  2.1349 +     *m = 0;
  2.1350 +-    return 0;
  2.1351 ++    return 2;
  2.1352 +   }
  2.1353 + 
  2.1354 + DEBG("huft2 ");
  2.1355 +@@ -374,6 +374,7 @@ DEBG("huft5 ");
  2.1356 +     if ((j = *p++) != 0)
  2.1357 +       v[x[j]++] = i;
  2.1358 +   } while (++i < n);
  2.1359 ++  n = x[g];                   /* set n to length of v */
  2.1360 + 
  2.1361 + DEBG("h6 ");
  2.1362 + 
  2.1363 +@@ -410,12 +411,13 @@ DEBG1("1 ");
  2.1364 + DEBG1("2 ");
  2.1365 +           f -= a + 1;           /* deduct codes from patterns left */
  2.1366 +           xp = c + k;
  2.1367 +-          while (++j < z)       /* try smaller tables up to z bits */
  2.1368 +-          {
  2.1369 +-            if ((f <<= 1) <= *++xp)
  2.1370 +-              break;            /* enough codes to use up j bits */
  2.1371 +-            f -= *xp;           /* else deduct codes from patterns */
  2.1372 +-          }
  2.1373 ++          if (j < z)
  2.1374 ++            while (++j < z)       /* try smaller tables up to z bits */
  2.1375 ++            {
  2.1376 ++              if ((f <<= 1) <= *++xp)
  2.1377 ++                break;            /* enough codes to use up j bits */
  2.1378 ++              f -= *xp;           /* else deduct codes from patterns */
  2.1379 ++            }
  2.1380 +         }
  2.1381 + DEBG1("3 ");
  2.1382 +         z = 1 << j;             /* table entries for j-bit table */
  2.1383 +diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
  2.1384 +--- a/lib/zlib_inflate/inftrees.c
  2.1385 ++++ b/lib/zlib_inflate/inftrees.c
  2.1386 +@@ -141,7 +141,7 @@ static int huft_build(
  2.1387 +   {
  2.1388 +     *t = NULL;
  2.1389 +     *m = 0;
  2.1390 +-    return Z_OK;
  2.1391 ++    return Z_DATA_ERROR;
  2.1392 +   }
  2.1393 + 
  2.1394 + 
  2.1395 +diff --git a/mm/memory.c b/mm/memory.c
  2.1396 +--- a/mm/memory.c
  2.1397 ++++ b/mm/memory.c
  2.1398 +@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
  2.1399 + {
  2.1400 + 	pgd_t *pgd;
  2.1401 + 	unsigned long next;
  2.1402 +-	unsigned long end = addr + size;
  2.1403 ++	unsigned long end = addr + PAGE_ALIGN(size);
  2.1404 + 	struct mm_struct *mm = vma->vm_mm;
  2.1405 + 	int err;
  2.1406 + 
  2.1407 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
  2.1408 +--- a/mm/mempolicy.c
  2.1409 ++++ b/mm/mempolicy.c
  2.1410 +@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
  2.1411 + 	struct mempolicy *new;
  2.1412 + 	DECLARE_BITMAP(nodes, MAX_NUMNODES);
  2.1413 + 
  2.1414 +-	if (mode > MPOL_MAX)
  2.1415 ++	if (mode < 0 || mode > MPOL_MAX)
  2.1416 + 		return -EINVAL;
  2.1417 + 	err = get_nodes(nodes, nmask, maxnode, mode);
  2.1418 + 	if (err)
  2.1419 +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
  2.1420 +--- a/net/8021q/vlan.c
  2.1421 ++++ b/net/8021q/vlan.c
  2.1422 +@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
  2.1423 + 			if (!vlandev)
  2.1424 + 				continue;
  2.1425 + 
  2.1426 ++			if (netif_carrier_ok(dev)) {
  2.1427 ++				if (!netif_carrier_ok(vlandev))
  2.1428 ++					netif_carrier_on(vlandev);
  2.1429 ++			} else {
  2.1430 ++				if (netif_carrier_ok(vlandev))
  2.1431 ++					netif_carrier_off(vlandev);
  2.1432 ++			}
  2.1433 ++
  2.1434 + 			if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
  2.1435 + 				vlandev->state = (vlandev->state &~ VLAN_LINK_STATE_MASK) 
  2.1436 + 					| flgs;
  2.1437 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
  2.1438 +--- a/net/ipv4/ip_output.c
  2.1439 ++++ b/net/ipv4/ip_output.c
  2.1440 +@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
  2.1441 + #ifdef CONFIG_NETFILTER_DEBUG
  2.1442 + 	nf_debug_ip_loopback_xmit(newskb);
  2.1443 + #endif
  2.1444 +-	nf_reset(newskb);
  2.1445 + 	netif_rx(newskb);
  2.1446 + 	return 0;
  2.1447 + }
  2.1448 +@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
  2.1449 + 	nf_debug_ip_finish_output2(skb);
  2.1450 + #endif /*CONFIG_NETFILTER_DEBUG*/
  2.1451 + 
  2.1452 +-	nf_reset(skb);
  2.1453 +-
  2.1454 + 	if (hh) {
  2.1455 + 		int hh_alen;
  2.1456 + 
  2.1457 +diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
  2.1458 +--- a/net/ipv4/netfilter/ip_conntrack_core.c
  2.1459 ++++ b/net/ipv4/netfilter/ip_conntrack_core.c
  2.1460 +@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
  2.1461 + 		schedule();
  2.1462 + 		goto i_see_dead_people;
  2.1463 + 	}
  2.1464 ++	/* wait until all references to ip_conntrack_untracked are dropped */
  2.1465 ++	while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
  2.1466 ++		schedule();
  2.1467 + 
  2.1468 + 	kmem_cache_destroy(ip_conntrack_cachep);
  2.1469 + 	kmem_cache_destroy(ip_conntrack_expect_cachep);
  2.1470 +diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
  2.1471 +--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
  2.1472 ++++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
  2.1473 +@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
  2.1474 + 				        const struct net_device *out,
  2.1475 + 				        int (*okfn)(struct sk_buff *))
  2.1476 + {
  2.1477 ++#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
  2.1478 ++	/* Previously seen (loopback)?  Ignore.  Do this before
  2.1479 ++           fragment check. */
  2.1480 ++	if ((*pskb)->nfct)
  2.1481 ++		return NF_ACCEPT;
  2.1482 ++#endif
  2.1483 ++
  2.1484 + 	/* Gather fragments. */
  2.1485 + 	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
  2.1486 + 		*pskb = ip_ct_gather_frags(*pskb,
  2.1487 +diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
  2.1488 +--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
  2.1489 ++++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
  2.1490 +@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
  2.1491 + 		 enum ip_nat_manip_type maniptype,
  2.1492 + 		 const struct ip_conntrack *conntrack)
  2.1493 + {
  2.1494 +-	static u_int16_t port, *portptr;
  2.1495 ++	static u_int16_t port;
  2.1496 ++	u_int16_t *portptr;
  2.1497 + 	unsigned int range_size, min, i;
  2.1498 + 
  2.1499 + 	if (maniptype == IP_NAT_MANIP_SRC)
  2.1500 +diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
  2.1501 +--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
  2.1502 ++++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
  2.1503 +@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
  2.1504 + 		 enum ip_nat_manip_type maniptype,
  2.1505 + 		 const struct ip_conntrack *conntrack)
  2.1506 + {
  2.1507 +-	static u_int16_t port, *portptr;
  2.1508 ++	static u_int16_t port;
  2.1509 ++	u_int16_t *portptr;
  2.1510 + 	unsigned int range_size, min, i;
  2.1511 + 
  2.1512 + 	if (maniptype == IP_NAT_MANIP_SRC)
  2.1513 +diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
  2.1514 +--- a/net/ipv6/netfilter/ip6_queue.c
  2.1515 ++++ b/net/ipv6/netfilter/ip6_queue.c
  2.1516 +@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
  2.1517 + static void
  2.1518 + ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
  2.1519 + {
  2.1520 ++	local_bh_disable();
  2.1521 + 	nf_reinject(entry->skb, entry->info, verdict);
  2.1522 ++	local_bh_enable();
  2.1523 + 	kfree(entry);
  2.1524 + }
  2.1525 + 
  2.1526 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
  2.1527 +--- a/net/netlink/af_netlink.c
  2.1528 ++++ b/net/netlink/af_netlink.c
  2.1529 +@@ -315,8 +315,8 @@ err:
  2.1530 + static void netlink_remove(struct sock *sk)
  2.1531 + {
  2.1532 + 	netlink_table_grab();
  2.1533 +-	nl_table[sk->sk_protocol].hash.entries--;
  2.1534 +-	sk_del_node_init(sk);
  2.1535 ++	if (sk_del_node_init(sk))
  2.1536 ++		nl_table[sk->sk_protocol].hash.entries--;
  2.1537 + 	if (nlk_sk(sk)->groups)
  2.1538 + 		__sk_del_bind_node(sk);
  2.1539 + 	netlink_table_ungrab();
  2.1540 +@@ -429,7 +429,12 @@ retry:
  2.1541 + 	err = netlink_insert(sk, pid);
  2.1542 + 	if (err == -EADDRINUSE)
  2.1543 + 		goto retry;
  2.1544 +-	return 0;
  2.1545 ++
  2.1546 ++	/* If 2 threads race to autobind, that is fine.  */
  2.1547 ++	if (err == -EBUSY)
  2.1548 ++		err = 0;
  2.1549 ++
  2.1550 ++	return err;
  2.1551 + }
  2.1552 + 
  2.1553 + static inline int netlink_capable(struct socket *sock, unsigned int flag) 
  2.1554 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
  2.1555 +--- a/net/packet/af_packet.c
  2.1556 ++++ b/net/packet/af_packet.c
  2.1557 +@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
  2.1558 + 	dst_release(skb->dst);
  2.1559 + 	skb->dst = NULL;
  2.1560 + 
  2.1561 ++	/* drop conntrack reference */
  2.1562 ++	nf_reset(skb);
  2.1563 ++
  2.1564 + 	spkt = (struct sockaddr_pkt*)skb->cb;
  2.1565 + 
  2.1566 + 	skb_push(skb, skb->data-skb->mac.raw);
  2.1567 +@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
  2.1568 + 	dst_release(skb->dst);
  2.1569 + 	skb->dst = NULL;
  2.1570 + 
  2.1571 ++	/* drop conntrack reference */
  2.1572 ++	nf_reset(skb);
  2.1573 ++
  2.1574 + 	spin_lock(&sk->sk_receive_queue.lock);
  2.1575 + 	po->stats.tp_packets++;
  2.1576 + 	__skb_queue_tail(&sk->sk_receive_queue, skb);
  2.1577 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
  2.1578 +--- a/net/xfrm/xfrm_user.c
  2.1579 ++++ b/net/xfrm/xfrm_user.c
  2.1580 +@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
  2.1581 + 	if (nr > XFRM_MAX_DEPTH)
  2.1582 + 		return NULL;
  2.1583 + 
  2.1584 ++	if (p->dir > XFRM_POLICY_OUT)
  2.1585 ++		return NULL;
  2.1586 ++
  2.1587 + 	xp = xfrm_policy_alloc(GFP_KERNEL);
  2.1588 + 	if (xp == NULL) {
  2.1589 + 		*dir = -ENOBUFS;
  2.1590 +diff --git a/security/keys/keyring.c b/security/keys/keyring.c
  2.1591 +--- a/security/keys/keyring.c
  2.1592 ++++ b/security/keys/keyring.c
  2.1593 +@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
  2.1594 + 
  2.1595 + 	if (keyring->description) {
  2.1596 + 		write_lock(&keyring_name_lock);
  2.1597 +-		list_del(&keyring->type_data.link);
  2.1598 ++
  2.1599 ++		if (keyring->type_data.link.next != NULL &&
  2.1600 ++		    !list_empty(&keyring->type_data.link))
  2.1601 ++			list_del(&keyring->type_data.link);
  2.1602 ++
  2.1603 + 		write_unlock(&keyring_name_lock);
  2.1604 + 	}
  2.1605 + 
  2.1606 +diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
  2.1607 +--- a/security/keys/process_keys.c
  2.1608 ++++ b/security/keys/process_keys.c
  2.1609 +@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
  2.1610 + 		keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
  2.1611 + 		if (IS_ERR(keyring)) {
  2.1612 + 			ret = PTR_ERR(keyring);
  2.1613 +-			goto error;
  2.1614 ++			goto error2;
  2.1615 + 		}
  2.1616 + 	}
  2.1617 + 	else if (IS_ERR(keyring)) {
     3.1 --- a/patches/linux-2.6.12/patch-2.6.12.5	Tue Oct 18 19:28:16 2005 +0100
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,1614 +0,0 @@
     3.4 -diff --git a/Makefile b/Makefile
     3.5 ---- a/Makefile
     3.6 -+++ b/Makefile
     3.7 -@@ -1,7 +1,7 @@
     3.8 - VERSION = 2
     3.9 - PATCHLEVEL = 6
    3.10 - SUBLEVEL = 12
    3.11 --EXTRAVERSION =
    3.12 -+EXTRAVERSION = .5
    3.13 - NAME=Woozy Numbat
    3.14 - 
    3.15 - # *DOCUMENTATION*
    3.16 -@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
    3.17 - #(which is the most common case IMHO) to avoid unneeded clutter in the big tags file.
    3.18 - #Adding $(srctree) adds about 20M on i386 to the size of the output file!
    3.19 - 
    3.20 --ifeq ($(KBUILD_OUTPUT),)
    3.21 -+ifeq ($(src),$(obj))
    3.22 - __srctree =
    3.23 - else
    3.24 - __srctree = $(srctree)/
    3.25 -diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    3.26 ---- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    3.27 -+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
    3.28 -@@ -44,7 +44,7 @@
    3.29 - 
    3.30 - #define PFX "powernow-k8: "
    3.31 - #define BFX PFX "BIOS error: "
    3.32 --#define VERSION "version 1.40.2"
    3.33 -+#define VERSION "version 1.40.4"
    3.34 - #include "powernow-k8.h"
    3.35 - 
    3.36 - /* serialize freq changes  */
    3.37 -@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
    3.38 - {
    3.39 - 	struct powernow_k8_data *data;
    3.40 - 	cpumask_t oldmask = CPU_MASK_ALL;
    3.41 --	int rc;
    3.42 -+	int rc, i;
    3.43 - 
    3.44 - 	if (!check_supported_cpu(pol->cpu))
    3.45 - 		return -ENODEV;
    3.46 -@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
    3.47 - 	printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
    3.48 - 	       data->currfid, data->currvid);
    3.49 - 
    3.50 --	powernow_data[pol->cpu] = data;
    3.51 -+	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
    3.52 -+		powernow_data[i] = data;
    3.53 -+	}
    3.54 - 
    3.55 - 	return 0;
    3.56 - 
    3.57 -diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
    3.58 ---- a/arch/i386/kernel/process.c
    3.59 -+++ b/arch/i386/kernel/process.c
    3.60 -@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
    3.61 - 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
    3.62 - 		return -EINVAL;
    3.63 - 
    3.64 -+	memset(&info, 0, sizeof(info));
    3.65 -+
    3.66 - 	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
    3.67 - 
    3.68 - 	info.entry_number = idx;
    3.69 -diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
    3.70 ---- a/arch/ia64/kernel/ptrace.c
    3.71 -+++ b/arch/ia64/kernel/ptrace.c
    3.72 -@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
    3.73 - 				*data = (pt->cr_ipsr & IPSR_MASK);
    3.74 - 			return 0;
    3.75 - 
    3.76 -+		      case PT_AR_RSC:
    3.77 -+			if (write_access)
    3.78 -+				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
    3.79 -+			else
    3.80 -+				*data = pt->ar_rsc;
    3.81 -+			return 0;
    3.82 -+
    3.83 - 		      case PT_AR_RNAT:
    3.84 - 			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
    3.85 - 			rnat_addr = (long) ia64_rse_rnat_addr((long *)
    3.86 -@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
    3.87 - 		      case PT_AR_BSPSTORE:
    3.88 - 			ptr = pt_reg_addr(pt, ar_bspstore);
    3.89 - 			break;
    3.90 --		      case PT_AR_RSC:
    3.91 --			ptr = pt_reg_addr(pt, ar_rsc);
    3.92 --			break;
    3.93 - 		      case PT_AR_UNAT:
    3.94 - 			ptr = pt_reg_addr(pt, ar_unat);
    3.95 - 			break;
    3.96 -@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
    3.97 - static long
    3.98 - ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
    3.99 - {
   3.100 --	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
   3.101 -+	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
   3.102 - 	struct unw_frame_info info;
   3.103 - 	struct switch_stack *sw;
   3.104 - 	struct ia64_fpreg fpval;
   3.105 -@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
   3.106 - 	/* app regs */
   3.107 - 
   3.108 - 	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
   3.109 --	retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
   3.110 -+	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
   3.111 - 	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
   3.112 - 	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
   3.113 - 	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
   3.114 -@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
   3.115 - 	retval |= __get_user(nat_bits, &ppr->nat);
   3.116 - 
   3.117 - 	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
   3.118 -+	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
   3.119 - 	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
   3.120 - 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
   3.121 - 	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
   3.122 -diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
   3.123 ---- a/arch/ia64/kernel/signal.c
   3.124 -+++ b/arch/ia64/kernel/signal.c
   3.125 -@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
   3.126 - static long
   3.127 - restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
   3.128 - {
   3.129 --	unsigned long ip, flags, nat, um, cfm;
   3.130 -+	unsigned long ip, flags, nat, um, cfm, rsc;
   3.131 - 	long err;
   3.132 - 
   3.133 - 	/* Always make any pending restarted system calls return -EINTR */
   3.134 -@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
   3.135 - 	err |= __get_user(ip, &sc->sc_ip);			/* instruction pointer */
   3.136 - 	err |= __get_user(cfm, &sc->sc_cfm);
   3.137 - 	err |= __get_user(um, &sc->sc_um);			/* user mask */
   3.138 --	err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
   3.139 -+	err |= __get_user(rsc, &sc->sc_ar_rsc);
   3.140 - 	err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
   3.141 - 	err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
   3.142 - 	err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
   3.143 -@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
   3.144 - 	err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);	/* r15 */
   3.145 - 
   3.146 - 	scr->pt.cr_ifs = cfm | (1UL << 63);
   3.147 -+	scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
   3.148 - 
   3.149 - 	/* establish new instruction pointer: */
   3.150 - 	scr->pt.cr_iip = ip & ~0x3UL;
   3.151 -diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
   3.152 ---- a/arch/ppc/kernel/time.c
   3.153 -+++ b/arch/ppc/kernel/time.c
   3.154 -@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
   3.155 - 
   3.156 - extern unsigned long wall_jiffies;
   3.157 - 
   3.158 -+/* used for timezone offset */
   3.159 -+static long timezone_offset;
   3.160 -+
   3.161 - DEFINE_SPINLOCK(rtc_lock);
   3.162 - 
   3.163 - EXPORT_SYMBOL(rtc_lock);
   3.164 -@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
   3.165 - 		     xtime.tv_sec - last_rtc_update >= 659 &&
   3.166 - 		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
   3.167 - 		     jiffies - wall_jiffies == 1) {
   3.168 --		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0)
   3.169 -+		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
   3.170 - 				last_rtc_update = xtime.tv_sec+1;
   3.171 - 			else
   3.172 - 				/* Try again one minute later */
   3.173 -@@ -286,7 +289,7 @@ void __init time_init(void)
   3.174 - 	unsigned old_stamp, stamp, elapsed;
   3.175 - 
   3.176 -         if (ppc_md.time_init != NULL)
   3.177 --                time_offset = ppc_md.time_init();
   3.178 -+                timezone_offset = ppc_md.time_init();
   3.179 - 
   3.180 - 	if (__USE_RTC()) {
   3.181 - 		/* 601 processor: dec counts down by 128 every 128ns */
   3.182 -@@ -331,10 +334,10 @@ void __init time_init(void)
   3.183 - 	set_dec(tb_ticks_per_jiffy);
   3.184 - 
   3.185 - 	/* If platform provided a timezone (pmac), we correct the time */
   3.186 --        if (time_offset) {
   3.187 --		sys_tz.tz_minuteswest = -time_offset / 60;
   3.188 -+        if (timezone_offset) {
   3.189 -+		sys_tz.tz_minuteswest = -timezone_offset / 60;
   3.190 - 		sys_tz.tz_dsttime = 0;
   3.191 --		xtime.tv_sec -= time_offset;
   3.192 -+		xtime.tv_sec -= timezone_offset;
   3.193 -         }
   3.194 -         set_normalized_timespec(&wall_to_monotonic,
   3.195 -                                 -xtime.tv_sec, -xtime.tv_nsec);
   3.196 -diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
   3.197 ---- a/arch/ppc64/boot/zlib.c
   3.198 -+++ b/arch/ppc64/boot/zlib.c
   3.199 -@@ -1307,7 +1307,7 @@ local int huft_build(
   3.200 -   {
   3.201 -     *t = (inflate_huft *)Z_NULL;
   3.202 -     *m = 0;
   3.203 --    return Z_OK;
   3.204 -+    return Z_DATA_ERROR;
   3.205 -   }
   3.206 - 
   3.207 - 
   3.208 -@@ -1351,6 +1351,7 @@ local int huft_build(
   3.209 -     if ((j = *p++) != 0)
   3.210 -       v[x[j]++] = i;
   3.211 -   } while (++i < n);
   3.212 -+  n = x[g];			/* set n to length of v */
   3.213 - 
   3.214 - 
   3.215 -   /* Generate the Huffman codes and for each, make the table entries */
   3.216 -diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
   3.217 ---- a/arch/um/kernel/process.c
   3.218 -+++ b/arch/um/kernel/process.c
   3.219 -@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
   3.220 - 	return(arg.pid);
   3.221 - }
   3.222 - 
   3.223 --static int ptrace_child(void)
   3.224 -+static int ptrace_child(void *arg)
   3.225 - {
   3.226 - 	int ret;
   3.227 - 	int pid = os_getpid(), ppid = getppid();
   3.228 -@@ -159,16 +159,20 @@ static int ptrace_child(void)
   3.229 - 	_exit(ret);
   3.230 - }
   3.231 - 
   3.232 --static int start_ptraced_child(void)
   3.233 -+static int start_ptraced_child(void **stack_out)
   3.234 - {
   3.235 -+	void *stack;
   3.236 -+	unsigned long sp;
   3.237 - 	int pid, n, status;
   3.238 - 	
   3.239 --	pid = fork();
   3.240 --	if(pid == 0)
   3.241 --		ptrace_child();
   3.242 --
   3.243 -+	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
   3.244 -+		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   3.245 -+	if(stack == MAP_FAILED)
   3.246 -+		panic("check_ptrace : mmap failed, errno = %d", errno);
   3.247 -+	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
   3.248 -+	pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
   3.249 - 	if(pid < 0)
   3.250 --		panic("check_ptrace : fork failed, errno = %d", errno);
   3.251 -+		panic("check_ptrace : clone failed, errno = %d", errno);
   3.252 - 	CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
   3.253 - 	if(n < 0)
   3.254 - 		panic("check_ptrace : wait failed, errno = %d", errno);
   3.255 -@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
   3.256 - 		panic("check_ptrace : expected SIGSTOP, got status = %d",
   3.257 - 		      status);
   3.258 - 
   3.259 -+	*stack_out = stack;
   3.260 - 	return(pid);
   3.261 - }
   3.262 - 
   3.263 -@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
   3.264 -  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
   3.265 -  * So only for SYSEMU features we test mustpanic, while normal host features
   3.266 -  * must work anyway!*/
   3.267 --static int stop_ptraced_child(int pid, int exitcode, int mustexit)
   3.268 -+static int stop_ptraced_child(int pid, void *stack, int exitcode, int mustpanic)
   3.269 - {
   3.270 - 	int status, n, ret = 0;
   3.271 - 
   3.272 - 	if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
   3.273 --		panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
   3.274 -+		panic("check_ptrace : ptrace failed, errno = %d", errno);
   3.275 - 	CATCH_EINTR(n = waitpid(pid, &status, 0));
   3.276 - 	if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
   3.277 - 		int exit_with = WEXITSTATUS(status);
   3.278 -@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
   3.279 - 		printk("check_ptrace : child exited with exitcode %d, while "
   3.280 - 		      "expecting %d; status 0x%x", exit_with,
   3.281 - 		      exitcode, status);
   3.282 --		if (mustexit)
   3.283 -+		if (mustpanic)
   3.284 - 			panic("\n");
   3.285 - 		else
   3.286 - 			printk("\n");
   3.287 - 		ret = -1;
   3.288 - 	}
   3.289 - 
   3.290 -+	if(munmap(stack, PAGE_SIZE) < 0)
   3.291 -+		panic("check_ptrace : munmap failed, errno = %d", errno);
   3.292 - 	return ret;
   3.293 - }
   3.294 - 
   3.295 -@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
   3.296 - 
   3.297 - static void __init check_sysemu(void)
   3.298 - {
   3.299 -+	void *stack;
   3.300 - 	int pid, syscall, n, status, count=0;
   3.301 - 
   3.302 - 	printk("Checking syscall emulation patch for ptrace...");
   3.303 - 	sysemu_supported = 0;
   3.304 --	pid = start_ptraced_child();
   3.305 -+	pid = start_ptraced_child(&stack);
   3.306 - 
   3.307 - 	if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
   3.308 - 		goto fail;
   3.309 -@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
   3.310 - 		panic("check_sysemu : failed to modify system "
   3.311 - 		      "call return, errno = %d", errno);
   3.312 - 
   3.313 --	if (stop_ptraced_child(pid, 0, 0) < 0)
   3.314 -+	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
   3.315 - 		goto fail_stopped;
   3.316 - 
   3.317 - 	sysemu_supported = 1;
   3.318 -@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
   3.319 - 	set_using_sysemu(!force_sysemu_disabled);
   3.320 - 
   3.321 - 	printk("Checking advanced syscall emulation patch for ptrace...");
   3.322 --	pid = start_ptraced_child();
   3.323 -+	pid = start_ptraced_child(&stack);
   3.324 - 	while(1){
   3.325 - 		count++;
   3.326 - 		if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
   3.327 -@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
   3.328 - 			break;
   3.329 - 		}
   3.330 - 	}
   3.331 --	if (stop_ptraced_child(pid, 0, 0) < 0)
   3.332 -+	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
   3.333 - 		goto fail_stopped;
   3.334 - 
   3.335 - 	sysemu_supported = 2;
   3.336 -@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
   3.337 - 	return;
   3.338 - 
   3.339 - fail:
   3.340 --	stop_ptraced_child(pid, 1, 0);
   3.341 -+	stop_ptraced_child(pid, stack, 1, 0);
   3.342 - fail_stopped:
   3.343 - 	printk("missing\n");
   3.344 - }
   3.345 - 
   3.346 - void __init check_ptrace(void)
   3.347 - {
   3.348 -+	void *stack;
   3.349 - 	int pid, syscall, n, status;
   3.350 - 
   3.351 - 	printk("Checking that ptrace can change system call numbers...");
   3.352 --	pid = start_ptraced_child();
   3.353 -+	pid = start_ptraced_child(&stack);
   3.354 - 
   3.355 - 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) < 0)
   3.356 - 		panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", errno);
   3.357 -@@ -330,7 +339,7 @@ void __init check_ptrace(void)
   3.358 - 			break;
   3.359 - 		}
   3.360 - 	}
   3.361 --	stop_ptraced_child(pid, 0, 1);
   3.362 -+	stop_ptraced_child(pid, stack, 0, 1);
   3.363 - 	printk("OK\n");
   3.364 - 	check_sysemu();
   3.365 - }
   3.366 -@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
   3.367 - static inline int check_skas3_ptrace_support(void)
   3.368 - {
   3.369 - 	struct ptrace_faultinfo fi;
   3.370 -+	void *stack;
   3.371 - 	int pid, n, ret = 1;
   3.372 - 
   3.373 - 	printf("Checking for the skas3 patch in the host...");
   3.374 --	pid = start_ptraced_child();
   3.375 -+	pid = start_ptraced_child(&stack);
   3.376 - 
   3.377 - 	n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
   3.378 - 	if (n < 0) {
   3.379 -@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
   3.380 - 	}
   3.381 - 
   3.382 - 	init_registers(pid);
   3.383 --	stop_ptraced_child(pid, 1, 1);
   3.384 -+	stop_ptraced_child(pid, stack, 1, 1);
   3.385 - 
   3.386 - 	return(ret);
   3.387 - }
   3.388 -diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
   3.389 ---- a/arch/x86_64/ia32/syscall32.c
   3.390 -+++ b/arch/x86_64/ia32/syscall32.c
   3.391 -@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
   3.392 - 	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
   3.393 - 	struct vm_area_struct *vma;
   3.394 - 	struct mm_struct *mm = current->mm;
   3.395 -+	int ret;
   3.396 - 
   3.397 - 	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
   3.398 - 	if (!vma)
   3.399 -@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
   3.400 - 	vma->vm_mm = mm;
   3.401 - 
   3.402 - 	down_write(&mm->mmap_sem);
   3.403 --	insert_vm_struct(mm, vma);
   3.404 -+	if ((ret = insert_vm_struct(mm, vma))) {
   3.405 -+		up_write(&mm->mmap_sem);
   3.406 -+		kmem_cache_free(vm_area_cachep, vma);
   3.407 -+		return ret;
   3.408 -+	}
   3.409 - 	mm->total_vm += npages;
   3.410 - 	up_write(&mm->mmap_sem);
   3.411 - 	return 0;
   3.412 -diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
   3.413 ---- a/arch/x86_64/kernel/setup.c
   3.414 -+++ b/arch/x86_64/kernel/setup.c
   3.415 -@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
   3.416 - 	int cpu = smp_processor_id();
   3.417 - 	int node = 0;
   3.418 - 	unsigned bits;
   3.419 --	if (c->x86_num_cores == 1)
   3.420 --		return;
   3.421 - 
   3.422 - 	bits = 0;
   3.423 - 	while ((1 << bits) < c->x86_num_cores)
   3.424 -diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
   3.425 ---- a/arch/x86_64/kernel/smp.c
   3.426 -+++ b/arch/x86_64/kernel/smp.c
   3.427 -@@ -284,6 +284,71 @@ struct call_data_struct {
   3.428 - static struct call_data_struct * call_data;
   3.429 - 
   3.430 - /*
   3.431 -+ * this function sends a 'generic call function' IPI to one other CPU
   3.432 -+ * in the system.
   3.433 -+ */
   3.434 -+static void __smp_call_function_single (int cpu, void (*func) (void *info), void *info,
   3.435 -+				int nonatomic, int wait)
   3.436 -+{
   3.437 -+	struct call_data_struct data;
   3.438 -+	int cpus = 1;
   3.439 -+
   3.440 -+	data.func = func;
   3.441 -+	data.info = info;
   3.442 -+	atomic_set(&data.started, 0);
   3.443 -+	data.wait = wait;
   3.444 -+	if (wait)
   3.445 -+		atomic_set(&data.finished, 0);
   3.446 -+
   3.447 -+	call_data = &data;
   3.448 -+	wmb();
   3.449 -+	/* Send a message to all other CPUs and wait for them to respond */
   3.450 -+	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
   3.451 -+
   3.452 -+	/* Wait for response */
   3.453 -+	while (atomic_read(&data.started) != cpus)
   3.454 -+		cpu_relax();
   3.455 -+
   3.456 -+	if (!wait)
   3.457 -+		return;
   3.458 -+
   3.459 -+	while (atomic_read(&data.finished) != cpus)
   3.460 -+		cpu_relax();
   3.461 -+}
   3.462 -+
   3.463 -+/*
   3.464 -+ * Run a function on another CPU
   3.465 -+ *  <func>	The function to run. This must be fast and non-blocking.
   3.466 -+ *  <info>	An arbitrary pointer to pass to the function.
   3.467 -+ *  <nonatomic>	Currently unused.
   3.468 -+ *  <wait>	If true, wait until function has completed on other CPUs.
   3.469 -+ *  [RETURNS]   0 on success, else a negative status code.
   3.470 -+ *
   3.471 -+ * Does not return until the remote CPU is nearly ready to execute <func>
   3.472 -+ * or is or has executed.
   3.473 -+ */
   3.474 -+
   3.475 -+int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
   3.476 -+	int nonatomic, int wait)
   3.477 -+{
   3.478 -+	
   3.479 -+	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
   3.480 -+
   3.481 -+	if (cpu == me) {
   3.482 -+		printk("%s: trying to call self\n", __func__);
   3.483 -+		put_cpu();
   3.484 -+		return -EBUSY;
   3.485 -+	}
   3.486 -+	spin_lock_bh(&call_lock);
   3.487 -+
   3.488 -+	__smp_call_function_single(cpu, func,info,nonatomic,wait);	
   3.489 -+
   3.490 -+	spin_unlock_bh(&call_lock);
   3.491 -+	put_cpu();
   3.492 -+	return 0;
   3.493 -+}
   3.494 -+
   3.495 -+/*
   3.496 -  * this function sends a 'generic call function' IPI to all other CPUs
   3.497 -  * in the system.
   3.498 -  */
   3.499 -diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
   3.500 ---- a/arch/x86_64/kernel/smpboot.c
   3.501 -+++ b/arch/x86_64/kernel/smpboot.c
   3.502 -@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
   3.503 - {
   3.504 - 	unsigned long flags, i;
   3.505 - 
   3.506 --	if (smp_processor_id() != boot_cpu_id)
   3.507 --		return;
   3.508 --
   3.509 - 	go[MASTER] = 0;
   3.510 - 
   3.511 - 	local_irq_save(flags);
   3.512 -@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
   3.513 - 	return tcenter - best_tm;
   3.514 - }
   3.515 - 
   3.516 --static __cpuinit void sync_tsc(void)
   3.517 -+static __cpuinit void sync_tsc(unsigned int master)
   3.518 - {
   3.519 - 	int i, done = 0;
   3.520 - 	long delta, adj, adjust_latency = 0;
   3.521 -@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
   3.522 - 	} t[NUM_ROUNDS] __cpuinitdata;
   3.523 - #endif
   3.524 - 
   3.525 -+	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
   3.526 -+		smp_processor_id(), master);
   3.527 -+
   3.528 - 	go[MASTER] = 1;
   3.529 - 
   3.530 --	smp_call_function(sync_master, NULL, 1, 0);
   3.531 -+	/* It is dangerous to broadcast IPI as cpus are coming up,
   3.532 -+	 * as they may not be ready to accept them.  So since
   3.533 -+	 * we only need to send the ipi to the boot cpu direct
   3.534 -+	 * the message, and avoid the race.
   3.535 -+	 */
   3.536 -+	smp_call_function_single(master, sync_master, NULL, 1, 0);
   3.537 - 
   3.538 - 	while (go[MASTER])	/* wait for master to be ready */
   3.539 - 		no_cpu_relax();
   3.540 -@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
   3.541 - 	printk(KERN_INFO
   3.542 - 	       "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
   3.543 - 	       "maxerr %lu cycles)\n",
   3.544 --	       smp_processor_id(), boot_cpu_id, delta, rt);
   3.545 -+	       smp_processor_id(), master, delta, rt);
   3.546 - }
   3.547 - 
   3.548 - static void __cpuinit tsc_sync_wait(void)
   3.549 - {
   3.550 - 	if (notscsync || !cpu_has_tsc)
   3.551 - 		return;
   3.552 --	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
   3.553 --			boot_cpu_id);
   3.554 --	sync_tsc();
   3.555 -+	sync_tsc(0);
   3.556 - }
   3.557 - 
   3.558 - static __init int notscsync_setup(char *s)
   3.559 -diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
   3.560 ---- a/drivers/acpi/pci_irq.c
   3.561 -+++ b/drivers/acpi/pci_irq.c
   3.562 -@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
   3.563 - 		printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
   3.564 - 			pci_name(dev), ('A' + pin));
   3.565 - 		/* Interrupt Line values above 0xF are forbidden */
   3.566 --		if (dev->irq >= 0 && (dev->irq <= 0xF)) {
   3.567 -+		if (dev->irq > 0 && (dev->irq <= 0xF)) {
   3.568 - 			printk(" - using IRQ %d\n", dev->irq);
   3.569 -+			acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
   3.570 - 			return_VALUE(0);
   3.571 - 		}
   3.572 - 		else {
   3.573 -diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
   3.574 ---- a/drivers/char/rocket.c
   3.575 -+++ b/drivers/char/rocket.c
   3.576 -@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
   3.577 - 		ToRecv = space;
   3.578 - 
   3.579 - 	if (ToRecv <= 0)
   3.580 --		return;
   3.581 -+		goto done;
   3.582 - 
   3.583 - 	/*
   3.584 - 	 * if status indicates there are errored characters in the
   3.585 -@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
   3.586 - 	}
   3.587 - 	/*  Push the data up to the tty layer */
   3.588 - 	ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
   3.589 -+done:
   3.590 - 	tty_ldisc_deref(ld);
   3.591 - }
   3.592 - 
   3.593 -diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
   3.594 ---- a/drivers/char/tpm/tpm.c
   3.595 -+++ b/drivers/char/tpm/tpm.c
   3.596 -@@ -32,12 +32,6 @@
   3.597 - 
   3.598 - #define	TPM_BUFSIZE			2048
   3.599 - 
   3.600 --/* PCI configuration addresses */
   3.601 --#define	PCI_GEN_PMCON_1			0xA0
   3.602 --#define	PCI_GEN1_DEC			0xE4
   3.603 --#define	PCI_LPC_EN			0xE6
   3.604 --#define	PCI_GEN2_DEC			0xEC
   3.605 --
   3.606 - static LIST_HEAD(tpm_chip_list);
   3.607 - static DEFINE_SPINLOCK(driver_lock);
   3.608 - static int dev_mask[32];
   3.609 -@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
   3.610 - EXPORT_SYMBOL_GPL(tpm_time_expired);
   3.611 - 
   3.612 - /*
   3.613 -- * Initialize the LPC bus and enable the TPM ports
   3.614 -- */
   3.615 --int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
   3.616 --{
   3.617 --	u32 lpcenable, tmp;
   3.618 --	int is_lpcm = 0;
   3.619 --
   3.620 --	switch (pci_dev->vendor) {
   3.621 --	case PCI_VENDOR_ID_INTEL:
   3.622 --		switch (pci_dev->device) {
   3.623 --		case PCI_DEVICE_ID_INTEL_82801CA_12:
   3.624 --		case PCI_DEVICE_ID_INTEL_82801DB_12:
   3.625 --			is_lpcm = 1;
   3.626 --			break;
   3.627 --		}
   3.628 --		/* init ICH (enable LPC) */
   3.629 --		pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
   3.630 --		lpcenable |= 0x20000000;
   3.631 --		pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
   3.632 --
   3.633 --		if (is_lpcm) {
   3.634 --			pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
   3.635 --					      &lpcenable);
   3.636 --			if ((lpcenable & 0x20000000) == 0) {
   3.637 --				dev_err(&pci_dev->dev,
   3.638 --					"cannot enable LPC\n");
   3.639 --				return -ENODEV;
   3.640 --			}
   3.641 --		}
   3.642 --
   3.643 --		/* initialize TPM registers */
   3.644 --		pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
   3.645 --
   3.646 --		if (!is_lpcm)
   3.647 --			tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
   3.648 --		else
   3.649 --			tmp =
   3.650 --			    (tmp & 0xFFFF0000) | (base & 0xFFF0) |
   3.651 --			    0x00000001;
   3.652 --
   3.653 --		pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
   3.654 --
   3.655 --		if (is_lpcm) {
   3.656 --			pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
   3.657 --					      &tmp);
   3.658 --			tmp |= 0x00000004;	/* enable CLKRUN */
   3.659 --			pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
   3.660 --					       tmp);
   3.661 --		}
   3.662 --		tpm_write_index(0x0D, 0x55);	/* unlock 4F */
   3.663 --		tpm_write_index(0x0A, 0x00);	/* int disable */
   3.664 --		tpm_write_index(0x08, base);	/* base addr lo */
   3.665 --		tpm_write_index(0x09, (base & 0xFF00) >> 8);	/* base addr hi */
   3.666 --		tpm_write_index(0x0D, 0xAA);	/* lock 4F */
   3.667 --		break;
   3.668 --	case PCI_VENDOR_ID_AMD:
   3.669 --		/* nothing yet */
   3.670 --		break;
   3.671 --	}
   3.672 --
   3.673 --	return 0;
   3.674 --}
   3.675 --
   3.676 --EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
   3.677 --
   3.678 --/*
   3.679 -  * Internal kernel interface to transmit TPM commands
   3.680 -  */
   3.681 - static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
   3.682 -@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
   3.683 - 	if (chip == NULL)
   3.684 - 		return -ENODEV;
   3.685 - 
   3.686 --	spin_lock(&driver_lock);
   3.687 --	tpm_lpc_bus_init(pci_dev, chip->vendor->base);
   3.688 --	spin_unlock(&driver_lock);
   3.689 --
   3.690 - 	return 0;
   3.691 - }
   3.692 - 
   3.693 -diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
   3.694 ---- a/drivers/char/tpm/tpm.h
   3.695 -+++ b/drivers/char/tpm/tpm.h
   3.696 -@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
   3.697 - }
   3.698 - 
   3.699 - extern void tpm_time_expired(unsigned long);
   3.700 --extern int tpm_lpc_bus_init(struct pci_dev *, u16);
   3.701 --
   3.702 - extern int tpm_register_hardware(struct pci_dev *,
   3.703 - 				 struct tpm_vendor_specific *);
   3.704 - extern int tpm_open(struct inode *, struct file *);
   3.705 -diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
   3.706 ---- a/drivers/char/tpm/tpm_atmel.c
   3.707 -+++ b/drivers/char/tpm/tpm_atmel.c
   3.708 -@@ -22,7 +22,10 @@
   3.709 - #include "tpm.h"
   3.710 - 
   3.711 - /* Atmel definitions */
   3.712 --#define	TPM_ATML_BASE			0x400
   3.713 -+enum tpm_atmel_addr {
   3.714 -+	TPM_ATMEL_BASE_ADDR_LO = 0x08,
   3.715 -+	TPM_ATMEL_BASE_ADDR_HI = 0x09
   3.716 -+};
   3.717 - 
   3.718 - /* write status bits */
   3.719 - #define	ATML_STATUS_ABORT		0x01
   3.720 -@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
   3.721 - 	.cancel = tpm_atml_cancel,
   3.722 - 	.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
   3.723 - 	.req_complete_val = ATML_STATUS_DATA_AVAIL,
   3.724 --	.base = TPM_ATML_BASE,
   3.725 - 	.miscdev = { .fops = &atmel_ops, },
   3.726 - };
   3.727 - 
   3.728 -@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
   3.729 - {
   3.730 - 	u8 version[4];
   3.731 - 	int rc = 0;
   3.732 -+	int lo, hi;
   3.733 - 
   3.734 - 	if (pci_enable_device(pci_dev))
   3.735 - 		return -EIO;
   3.736 - 
   3.737 --	if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
   3.738 --		rc = -ENODEV;
   3.739 --		goto out_err;
   3.740 --	}
   3.741 -+	lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
   3.742 -+	hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
   3.743 -+
   3.744 -+	tpm_atmel.base = (hi<<8)|lo;
   3.745 -+	dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
   3.746 - 
   3.747 - 	/* verify that it is an Atmel part */
   3.748 - 	if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
   3.749 -diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
   3.750 ---- a/drivers/char/tpm/tpm_nsc.c
   3.751 -+++ b/drivers/char/tpm/tpm_nsc.c
   3.752 -@@ -24,6 +24,10 @@
   3.753 - /* National definitions */
   3.754 - #define	TPM_NSC_BASE			0x360
   3.755 - #define	TPM_NSC_IRQ			0x07
   3.756 -+#define	TPM_NSC_BASE0_HI		0x60
   3.757 -+#define	TPM_NSC_BASE0_LO		0x61
   3.758 -+#define	TPM_NSC_BASE1_HI		0x62
   3.759 -+#define	TPM_NSC_BASE1_LO		0x63
   3.760 - 
   3.761 - #define	NSC_LDN_INDEX			0x07
   3.762 - #define	NSC_SID_INDEX			0x20
   3.763 -@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
   3.764 - 	.cancel = tpm_nsc_cancel,
   3.765 - 	.req_complete_mask = NSC_STATUS_OBF,
   3.766 - 	.req_complete_val = NSC_STATUS_OBF,
   3.767 --	.base = TPM_NSC_BASE,
   3.768 - 	.miscdev = { .fops = &nsc_ops, },
   3.769 - 	
   3.770 - };
   3.771 -@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
   3.772 - 				  const struct pci_device_id *pci_id)
   3.773 - {
   3.774 - 	int rc = 0;
   3.775 -+	int lo, hi;
   3.776 -+
   3.777 -+	hi = tpm_read_index(TPM_NSC_BASE0_HI);
   3.778 -+	lo = tpm_read_index(TPM_NSC_BASE0_LO);
   3.779 -+
   3.780 -+	tpm_nsc.base = (hi<<8) | lo;
   3.781 - 
   3.782 - 	if (pci_enable_device(pci_dev))
   3.783 - 		return -EIO;
   3.784 - 
   3.785 --	if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
   3.786 --		rc = -ENODEV;
   3.787 --		goto out_err;
   3.788 --	}
   3.789 --
   3.790 - 	/* verify that it is a National part (SID) */
   3.791 - 	if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
   3.792 - 		rc = -ENODEV;
   3.793 -diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
   3.794 ---- a/drivers/char/tty_ioctl.c
   3.795 -+++ b/drivers/char/tty_ioctl.c
   3.796 -@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
   3.797 - 			ld = tty_ldisc_ref(tty);
   3.798 - 			switch (arg) {
   3.799 - 			case TCIFLUSH:
   3.800 --				if (ld->flush_buffer)
   3.801 -+				if (ld && ld->flush_buffer)
   3.802 - 					ld->flush_buffer(tty);
   3.803 - 				break;
   3.804 - 			case TCIOFLUSH:
   3.805 --				if (ld->flush_buffer)
   3.806 -+				if (ld && ld->flush_buffer)
   3.807 - 					ld->flush_buffer(tty);
   3.808 - 				/* fall through */
   3.809 - 			case TCOFLUSH:
   3.810 -diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
   3.811 ---- a/drivers/media/video/cx88/cx88-video.c
   3.812 -+++ b/drivers/media/video/cx88/cx88-video.c
   3.813 -@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
   3.814 - 			.default_value = 0,
   3.815 - 			.type          = V4L2_CTRL_TYPE_INTEGER,
   3.816 - 		},
   3.817 --		.off                   = 0,
   3.818 -+		.off                   = 128,
   3.819 - 		.reg                   = MO_HUE,
   3.820 - 		.mask                  = 0x00ff,
   3.821 - 		.shift                 = 0,
   3.822 -diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
   3.823 ---- a/drivers/net/e1000/e1000_main.c
   3.824 -+++ b/drivers/net/e1000/e1000_main.c
   3.825 -@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
   3.826 - 	tso = e1000_tso(adapter, skb);
   3.827 - 	if (tso < 0) {
   3.828 - 		dev_kfree_skb_any(skb);
   3.829 -+		spin_unlock_irqrestore(&adapter->tx_lock, flags);
   3.830 - 		return NETDEV_TX_OK;
   3.831 - 	}
   3.832 - 
   3.833 -diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
   3.834 ---- a/drivers/net/hamradio/Kconfig
   3.835 -+++ b/drivers/net/hamradio/Kconfig
   3.836 -@@ -17,7 +17,7 @@ config MKISS
   3.837 - 
   3.838 - config 6PACK
   3.839 - 	tristate "Serial port 6PACK driver"
   3.840 --	depends on AX25 && BROKEN_ON_SMP
   3.841 -+	depends on AX25
   3.842 - 	---help---
   3.843 - 	  6pack is a transmission protocol for the data exchange between your
   3.844 - 	  PC and your TNC (the Terminal Node Controller acts as a kind of
   3.845 -diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
   3.846 ---- a/drivers/net/shaper.c
   3.847 -+++ b/drivers/net/shaper.c
   3.848 -@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
   3.849 - {
   3.850 - 	struct shaper *shaper = dev->priv;
   3.851 -  	struct sk_buff *ptr;
   3.852 --   
   3.853 --	if (down_trylock(&shaper->sem))
   3.854 --		return -1;
   3.855 - 
   3.856 -+	spin_lock(&shaper->lock);
   3.857 -  	ptr=shaper->sendq.prev;
   3.858 -  	
   3.859 -  	/*
   3.860 -@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
   3.861 -                 shaper->stats.collisions++;
   3.862 -  	}
   3.863 - 	shaper_kick(shaper);
   3.864 --	up(&shaper->sem);
   3.865 -+	spin_unlock(&shaper->lock);
   3.866 -  	return 0;
   3.867 - }
   3.868 - 
   3.869 -@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
   3.870 - {
   3.871 - 	struct shaper *shaper = (struct shaper *)data;
   3.872 - 
   3.873 --	if (!down_trylock(&shaper->sem)) {
   3.874 --		shaper_kick(shaper);
   3.875 --		up(&shaper->sem);
   3.876 --	} else
   3.877 --		mod_timer(&shaper->timer, jiffies);
   3.878 -+	spin_lock(&shaper->lock);
   3.879 -+	shaper_kick(shaper);
   3.880 -+	spin_unlock(&shaper->lock);
   3.881 - }
   3.882 - 
   3.883 - /*
   3.884 -@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
   3.885 - 
   3.886 - 
   3.887 - /*
   3.888 -- *	Flush the shaper queues on a closedown
   3.889 -- */
   3.890 -- 
   3.891 --static void shaper_flush(struct shaper *shaper)
   3.892 --{
   3.893 --	struct sk_buff *skb;
   3.894 --
   3.895 --	down(&shaper->sem);
   3.896 --	while((skb=skb_dequeue(&shaper->sendq))!=NULL)
   3.897 --		dev_kfree_skb(skb);
   3.898 --	shaper_kick(shaper);
   3.899 --	up(&shaper->sem);
   3.900 --}
   3.901 --
   3.902 --/*
   3.903 -  *	Bring the interface up. We just disallow this until a 
   3.904 -  *	bind.
   3.905 -  */
   3.906 -@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
   3.907 - static int shaper_close(struct net_device *dev)
   3.908 - {
   3.909 - 	struct shaper *shaper=dev->priv;
   3.910 --	shaper_flush(shaper);
   3.911 -+	struct sk_buff *skb;
   3.912 -+
   3.913 -+	while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
   3.914 -+		dev_kfree_skb(skb);
   3.915 -+
   3.916 -+	spin_lock_bh(&shaper->lock);
   3.917 -+	shaper_kick(shaper);
   3.918 -+	spin_unlock_bh(&shaper->lock);
   3.919 -+
   3.920 - 	del_timer_sync(&shaper->timer);
   3.921 - 	return 0;
   3.922 - }
   3.923 -@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
   3.924 - 	init_timer(&sh->timer);
   3.925 - 	sh->timer.function=shaper_timer;
   3.926 - 	sh->timer.data=(unsigned long)sh;
   3.927 -+	spin_lock_init(&sh->lock);
   3.928 - }
   3.929 - 
   3.930 - /*
   3.931 -diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
   3.932 ---- a/drivers/pci/pci-driver.c
   3.933 -+++ b/drivers/pci/pci-driver.c
   3.934 -@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
   3.935 - 	/* FIXME, once all of the existing PCI drivers have been fixed to set
   3.936 - 	 * the pci shutdown function, this test can go away. */
   3.937 - 	if (!drv->driver.shutdown)
   3.938 --		drv->driver.shutdown = pci_device_shutdown,
   3.939 -+		drv->driver.shutdown = pci_device_shutdown;
   3.940 - 	drv->driver.owner = drv->owner;
   3.941 - 	drv->driver.kobj.ktype = &pci_driver_kobj_type;
   3.942 - 	pci_init_dynids(&drv->dynids);
   3.943 -diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
   3.944 ---- a/drivers/scsi/qla2xxx/qla_init.c
   3.945 -+++ b/drivers/scsi/qla2xxx/qla_init.c
   3.946 -@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
   3.947 - 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
   3.948 - 
   3.949 - 	fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
   3.950 --	if (!rport)
   3.951 -+	if (!rport) {
   3.952 - 		qla_printk(KERN_WARNING, ha,
   3.953 - 		    "Unable to allocate fc remote port!\n");
   3.954 -+		return;
   3.955 -+	}
   3.956 - 
   3.957 - 	if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
   3.958 - 		fcport->os_target_id = rport->scsi_target_id;
   3.959 -diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
   3.960 ---- a/drivers/scsi/qla2xxx/qla_os.c
   3.961 -+++ b/drivers/scsi/qla2xxx/qla_os.c
   3.962 -@@ -1150,7 +1150,7 @@ iospace_error_exit:
   3.963 -  */
   3.964 - int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
   3.965 - {
   3.966 --	int	ret;
   3.967 -+	int	ret = -ENODEV;
   3.968 - 	device_reg_t __iomem *reg;
   3.969 - 	struct Scsi_Host *host;
   3.970 - 	scsi_qla_host_t *ha;
   3.971 -@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
   3.972 - 	fc_port_t *fcport;
   3.973 - 
   3.974 - 	if (pci_enable_device(pdev))
   3.975 --		return -1;
   3.976 -+		goto probe_out;
   3.977 - 
   3.978 - 	host = scsi_host_alloc(&qla2x00_driver_template,
   3.979 - 	    sizeof(scsi_qla_host_t));
   3.980 -@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
   3.981 - 
   3.982 - 	/* Configure PCI I/O space */
   3.983 - 	ret = qla2x00_iospace_config(ha);
   3.984 --	if (ret != 0) {
   3.985 --		goto probe_alloc_failed;
   3.986 --	}
   3.987 -+	if (ret)
   3.988 -+		goto probe_failed;
   3.989 - 
   3.990 - 	/* Sanitize the information from PCI BIOS. */
   3.991 - 	host->irq = pdev->irq;
   3.992 -@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
   3.993 - 		qla_printk(KERN_WARNING, ha,
   3.994 - 		    "[ERROR] Failed to allocate memory for adapter\n");
   3.995 - 
   3.996 --		goto probe_alloc_failed;
   3.997 -+		ret = -ENOMEM;
   3.998 -+		goto probe_failed;
   3.999 - 	}
  3.1000 - 
  3.1001 --	pci_set_drvdata(pdev, ha);
  3.1002 --	host->this_id = 255;
  3.1003 --	host->cmd_per_lun = 3;
  3.1004 --	host->unique_id = ha->instance;
  3.1005 --	host->max_cmd_len = MAX_CMDSZ;
  3.1006 --	host->max_channel = ha->ports - 1;
  3.1007 --	host->max_id = ha->max_targets;
  3.1008 --	host->max_lun = ha->max_luns;
  3.1009 --	host->transportt = qla2xxx_transport_template;
  3.1010 --	if (scsi_add_host(host, &pdev->dev))
  3.1011 --		goto probe_alloc_failed;
  3.1012 --
  3.1013 --	qla2x00_alloc_sysfs_attr(ha);
  3.1014 --
  3.1015 - 	if (qla2x00_initialize_adapter(ha) &&
  3.1016 - 	    !(ha->device_flags & DFLG_NO_CABLE)) {
  3.1017 - 
  3.1018 -@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
  3.1019 - 		    "Adapter flags %x.\n",
  3.1020 - 		    ha->host_no, ha->device_flags));
  3.1021 - 
  3.1022 -+		ret = -ENODEV;
  3.1023 - 		goto probe_failed;
  3.1024 - 	}
  3.1025 - 
  3.1026 --	qla2x00_init_host_attr(ha);
  3.1027 --
  3.1028 - 	/*
  3.1029 - 	 * Startup the kernel thread for this host adapter
  3.1030 - 	 */
  3.1031 -@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
  3.1032 - 		qla_printk(KERN_WARNING, ha,
  3.1033 - 		    "Unable to start DPC thread!\n");
  3.1034 - 
  3.1035 -+		ret = -ENODEV;
  3.1036 - 		goto probe_failed;
  3.1037 - 	}
  3.1038 - 	wait_for_completion(&ha->dpc_inited);
  3.1039 - 
  3.1040 -+	host->this_id = 255;
  3.1041 -+	host->cmd_per_lun = 3;
  3.1042 -+	host->unique_id = ha->instance;
  3.1043 -+	host->max_cmd_len = MAX_CMDSZ;
  3.1044 -+	host->max_channel = ha->ports - 1;
  3.1045 -+	host->max_lun = MAX_LUNS;
  3.1046 -+	host->transportt = qla2xxx_transport_template;
  3.1047 -+
  3.1048 - 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
  3.1049 - 		ret = request_irq(host->irq, qla2100_intr_handler,
  3.1050 - 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
  3.1051 - 	else
  3.1052 - 		ret = request_irq(host->irq, qla2300_intr_handler,
  3.1053 - 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
  3.1054 --	if (ret != 0) {
  3.1055 -+	if (ret) {
  3.1056 - 		qla_printk(KERN_WARNING, ha,
  3.1057 - 		    "Failed to reserve interrupt %d already in use.\n",
  3.1058 - 		    host->irq);
  3.1059 -@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
  3.1060 - 		msleep(10);
  3.1061 - 	}
  3.1062 - 
  3.1063 -+	pci_set_drvdata(pdev, ha);
  3.1064 - 	ha->flags.init_done = 1;
  3.1065 - 	num_hosts++;
  3.1066 - 
  3.1067 -+	ret = scsi_add_host(host, &pdev->dev);
  3.1068 -+	if (ret)
  3.1069 -+		goto probe_failed;
  3.1070 -+
  3.1071 -+	qla2x00_alloc_sysfs_attr(ha);
  3.1072 -+
  3.1073 -+	qla2x00_init_host_attr(ha);
  3.1074 -+
  3.1075 - 	qla_printk(KERN_INFO, ha, "\n"
  3.1076 - 	    " QLogic Fibre Channel HBA Driver: %s\n"
  3.1077 - 	    "  QLogic %s - %s\n"
  3.1078 -@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
  3.1079 - probe_failed:
  3.1080 - 	fc_remove_host(ha->host);
  3.1081 - 
  3.1082 --	scsi_remove_host(host);
  3.1083 --
  3.1084 --probe_alloc_failed:
  3.1085 - 	qla2x00_free_device(ha);
  3.1086 - 
  3.1087 - 	scsi_host_put(host);
  3.1088 -@@ -1394,7 +1394,8 @@ probe_alloc_failed:
  3.1089 - probe_disable_device:
  3.1090 - 	pci_disable_device(pdev);
  3.1091 - 
  3.1092 --	return -1;
  3.1093 -+probe_out:
  3.1094 -+	return ret;
  3.1095 - }
  3.1096 - EXPORT_SYMBOL_GPL(qla2x00_probe_one);
  3.1097 - 
  3.1098 -diff --git a/fs/bio.c b/fs/bio.c
  3.1099 ---- a/fs/bio.c
  3.1100 -+++ b/fs/bio.c
  3.1101 -@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
  3.1102 - 	 */
  3.1103 - 	bio->bi_vcnt = bio_src->bi_vcnt;
  3.1104 - 	bio->bi_size = bio_src->bi_size;
  3.1105 -+	bio->bi_idx = bio_src->bi_idx;
  3.1106 - 	bio_phys_segments(q, bio);
  3.1107 - 	bio_hw_segments(q, bio);
  3.1108 - }
  3.1109 -diff --git a/fs/char_dev.c b/fs/char_dev.c
  3.1110 ---- a/fs/char_dev.c
  3.1111 -+++ b/fs/char_dev.c
  3.1112 -@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
  3.1113 - 	struct char_device_struct *cd = NULL, **cp;
  3.1114 - 	int i = major_to_index(major);
  3.1115 - 
  3.1116 --	up(&chrdevs_lock);
  3.1117 -+	down(&chrdevs_lock);
  3.1118 - 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
  3.1119 - 		if ((*cp)->major == major &&
  3.1120 - 		    (*cp)->baseminor == baseminor &&
  3.1121 -diff --git a/fs/exec.c b/fs/exec.c
  3.1122 ---- a/fs/exec.c
  3.1123 -+++ b/fs/exec.c
  3.1124 -@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
  3.1125 - 	}
  3.1126 - 	sig->group_exit_task = NULL;
  3.1127 - 	sig->notify_count = 0;
  3.1128 -+	sig->real_timer.data = (unsigned long)current;
  3.1129 - 	spin_unlock_irq(lock);
  3.1130 - 
  3.1131 - 	/*
  3.1132 -diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
  3.1133 ---- a/fs/isofs/compress.c
  3.1134 -+++ b/fs/isofs/compress.c
  3.1135 -@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
  3.1136 - 	cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
  3.1137 - 	brelse(bh);
  3.1138 - 
  3.1139 -+	if (cstart > cend)
  3.1140 -+		goto eio;
  3.1141 -+		
  3.1142 - 	csize = cend-cstart;
  3.1143 - 
  3.1144 -+	if (csize > deflateBound(1UL << zisofs_block_shift))
  3.1145 -+		goto eio;
  3.1146 -+
  3.1147 - 	/* Now page[] contains an array of pages, any of which can be NULL,
  3.1148 - 	   and the locks on which we hold.  We should now read the data and
  3.1149 - 	   release the pages.  If the pages are NULL the decompressed data
  3.1150 -diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
  3.1151 ---- a/include/asm-i386/string.h
  3.1152 -+++ b/include/asm-i386/string.h
  3.1153 -@@ -116,7 +116,8 @@ __asm__ __volatile__(
  3.1154 - 	"orb $1,%%al\n"
  3.1155 - 	"3:"
  3.1156 - 	:"=a" (__res), "=&S" (d0), "=&D" (d1)
  3.1157 --		     :"1" (cs),"2" (ct));
  3.1158 -+	:"1" (cs),"2" (ct)
  3.1159 -+	:"memory");
  3.1160 - return __res;
  3.1161 - }
  3.1162 - 
  3.1163 -@@ -138,8 +139,9 @@ __asm__ __volatile__(
  3.1164 - 	"3:\tsbbl %%eax,%%eax\n\t"
  3.1165 - 	"orb $1,%%al\n"
  3.1166 - 	"4:"
  3.1167 --		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
  3.1168 --		     :"1" (cs),"2" (ct),"3" (count));
  3.1169 -+	:"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
  3.1170 -+	:"1" (cs),"2" (ct),"3" (count)
  3.1171 -+	:"memory");
  3.1172 - return __res;
  3.1173 - }
  3.1174 - 
  3.1175 -@@ -158,7 +160,9 @@ __asm__ __volatile__(
  3.1176 - 	"movl $1,%1\n"
  3.1177 - 	"2:\tmovl %1,%0\n\t"
  3.1178 - 	"decl %0"
  3.1179 --	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
  3.1180 -+	:"=a" (__res), "=&S" (d0)
  3.1181 -+	:"1" (s),"0" (c)
  3.1182 -+	:"memory");
  3.1183 - return __res;
  3.1184 - }
  3.1185 - 
  3.1186 -@@ -175,7 +179,9 @@ __asm__ __volatile__(
  3.1187 - 	"leal -1(%%esi),%0\n"
  3.1188 - 	"2:\ttestb %%al,%%al\n\t"
  3.1189 - 	"jne 1b"
  3.1190 --	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
  3.1191 -+	:"=g" (__res), "=&S" (d0), "=&a" (d1)
  3.1192 -+	:"0" (0),"1" (s),"2" (c)
  3.1193 -+	:"memory");
  3.1194 - return __res;
  3.1195 - }
  3.1196 - 
  3.1197 -@@ -189,7 +195,9 @@ __asm__ __volatile__(
  3.1198 - 	"scasb\n\t"
  3.1199 - 	"notl %0\n\t"
  3.1200 - 	"decl %0"
  3.1201 --	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
  3.1202 -+	:"=c" (__res), "=&D" (d0)
  3.1203 -+	:"1" (s),"a" (0), "0" (0xffffffffu)
  3.1204 -+	:"memory");
  3.1205 - return __res;
  3.1206 - }
  3.1207 - 
  3.1208 -@@ -333,7 +341,9 @@ __asm__ __volatile__(
  3.1209 - 	"je 1f\n\t"
  3.1210 - 	"movl $1,%0\n"
  3.1211 - 	"1:\tdecl %0"
  3.1212 --	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
  3.1213 -+	:"=D" (__res), "=&c" (d0)
  3.1214 -+	:"a" (c),"0" (cs),"1" (count)
  3.1215 -+	:"memory");
  3.1216 - return __res;
  3.1217 - }
  3.1218 - 
  3.1219 -@@ -369,7 +379,7 @@ __asm__ __volatile__(
  3.1220 - 	"je 2f\n\t"
  3.1221 - 	"stosb\n"
  3.1222 - 	"2:"
  3.1223 --	: "=&c" (d0), "=&D" (d1)
  3.1224 -+	:"=&c" (d0), "=&D" (d1)
  3.1225 - 	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
  3.1226 - 	:"memory");
  3.1227 - return (s);	
  3.1228 -@@ -392,7 +402,8 @@ __asm__ __volatile__(
  3.1229 - 	"jne 1b\n"
  3.1230 - 	"3:\tsubl %2,%0"
  3.1231 - 	:"=a" (__res), "=&d" (d0)
  3.1232 --	:"c" (s),"1" (count));
  3.1233 -+	:"c" (s),"1" (count)
  3.1234 -+	:"memory");
  3.1235 - return __res;
  3.1236 - }
  3.1237 - /* end of additional stuff */
  3.1238 -@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
  3.1239 - 		"dec %%edi\n"
  3.1240 - 		"1:"
  3.1241 - 		: "=D" (addr), "=c" (size)
  3.1242 --		: "0" (addr), "1" (size), "a" (c));
  3.1243 -+		: "0" (addr), "1" (size), "a" (c)
  3.1244 -+		: "memory");
  3.1245 - 	return addr;
  3.1246 - }
  3.1247 - 
  3.1248 -diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
  3.1249 ---- a/include/asm-x86_64/smp.h
  3.1250 -+++ b/include/asm-x86_64/smp.h
  3.1251 -@@ -46,6 +46,8 @@ extern int pic_mode;
  3.1252 - extern int smp_num_siblings;
  3.1253 - extern void smp_flush_tlb(void);
  3.1254 - extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
  3.1255 -+extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
  3.1256 -+				     int retry, int wait);
  3.1257 - extern void smp_send_reschedule(int cpu);
  3.1258 - extern void smp_invalidate_rcv(void);		/* Process an NMI */
  3.1259 - extern void zap_low_mappings(void);
  3.1260 -diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
  3.1261 ---- a/include/linux/if_shaper.h
  3.1262 -+++ b/include/linux/if_shaper.h
  3.1263 -@@ -23,7 +23,7 @@ struct shaper
  3.1264 - 	__u32 shapeclock;
  3.1265 - 	unsigned long recovery;	/* Time we can next clock a packet out on
  3.1266 - 				   an empty queue */
  3.1267 --	struct semaphore sem;
  3.1268 -+	spinlock_t lock;
  3.1269 -         struct net_device_stats stats;
  3.1270 - 	struct net_device *dev;
  3.1271 - 	int  (*hard_start_xmit) (struct sk_buff *skb,
  3.1272 -diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
  3.1273 ---- a/include/linux/skbuff.h
  3.1274 -+++ b/include/linux/skbuff.h
  3.1275 -@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
  3.1276 - {
  3.1277 - 	int hlen = skb_headlen(skb);
  3.1278 - 
  3.1279 --	if (offset + len <= hlen)
  3.1280 -+	if (hlen - offset >= len)
  3.1281 - 		return skb->data + offset;
  3.1282 - 
  3.1283 - 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
  3.1284 -diff --git a/include/linux/zlib.h b/include/linux/zlib.h
  3.1285 ---- a/include/linux/zlib.h
  3.1286 -+++ b/include/linux/zlib.h
  3.1287 -@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
  3.1288 -    stream state was inconsistent (such as zalloc or state being NULL).
  3.1289 - */
  3.1290 - 
  3.1291 -+static inline unsigned long deflateBound(unsigned long s)
  3.1292 -+{
  3.1293 -+	return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
  3.1294 -+}
  3.1295 -+
  3.1296 - extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
  3.1297 - /*
  3.1298 -      Dynamically update the compression level and compression strategy.  The
  3.1299 -diff --git a/kernel/module.c b/kernel/module.c
  3.1300 ---- a/kernel/module.c
  3.1301 -+++ b/kernel/module.c
  3.1302 -@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
  3.1303 - /* Created by linker magic */
  3.1304 - extern char __per_cpu_start[], __per_cpu_end[];
  3.1305 - 
  3.1306 --static void *percpu_modalloc(unsigned long size, unsigned long align)
  3.1307 -+static void *percpu_modalloc(unsigned long size, unsigned long align,
  3.1308 -+			     const char *name)
  3.1309 - {
  3.1310 - 	unsigned long extra;
  3.1311 - 	unsigned int i;
  3.1312 - 	void *ptr;
  3.1313 - 
  3.1314 --	BUG_ON(align > SMP_CACHE_BYTES);
  3.1315 -+	if (align > SMP_CACHE_BYTES) {
  3.1316 -+		printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
  3.1317 -+		       name, align, SMP_CACHE_BYTES);
  3.1318 -+		align = SMP_CACHE_BYTES;
  3.1319 -+	}
  3.1320 - 
  3.1321 - 	ptr = __per_cpu_start;
  3.1322 - 	for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
  3.1323 -@@ -347,7 +352,8 @@ static int percpu_modinit(void)
  3.1324 - }	
  3.1325 - __initcall(percpu_modinit);
  3.1326 - #else /* ... !CONFIG_SMP */
  3.1327 --static inline void *percpu_modalloc(unsigned long size, unsigned long align)
  3.1328 -+static inline void *percpu_modalloc(unsigned long size, unsigned long align,
  3.1329 -+				    const char *name)
  3.1330 - {
  3.1331 - 	return NULL;
  3.1332 - }
  3.1333 -@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
  3.1334 - 	if (pcpuindex) {
  3.1335 - 		/* We have a special allocation for this section. */
  3.1336 - 		percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
  3.1337 --					 sechdrs[pcpuindex].sh_addralign);
  3.1338 -+					 sechdrs[pcpuindex].sh_addralign,
  3.1339 -+					 mod->name);
  3.1340 - 		if (!percpu) {
  3.1341 - 			err = -ENOMEM;
  3.1342 - 			goto free_mod;
  3.1343 -diff --git a/lib/inflate.c b/lib/inflate.c
  3.1344 ---- a/lib/inflate.c
  3.1345 -+++ b/lib/inflate.c
  3.1346 -@@ -326,7 +326,7 @@ DEBG("huft1 ");
  3.1347 -   {
  3.1348 -     *t = (struct huft *)NULL;
  3.1349 -     *m = 0;
  3.1350 --    return 0;
  3.1351 -+    return 2;
  3.1352 -   }
  3.1353 - 
  3.1354 - DEBG("huft2 ");
  3.1355 -@@ -374,6 +374,7 @@ DEBG("huft5 ");
  3.1356 -     if ((j = *p++) != 0)
  3.1357 -       v[x[j]++] = i;
  3.1358 -   } while (++i < n);
  3.1359 -+  n = x[g];                   /* set n to length of v */
  3.1360 - 
  3.1361 - DEBG("h6 ");
  3.1362 - 
  3.1363 -@@ -410,12 +411,13 @@ DEBG1("1 ");
  3.1364 - DEBG1("2 ");
  3.1365 -           f -= a + 1;           /* deduct codes from patterns left */
  3.1366 -           xp = c + k;
  3.1367 --          while (++j < z)       /* try smaller tables up to z bits */
  3.1368 --          {
  3.1369 --            if ((f <<= 1) <= *++xp)
  3.1370 --              break;            /* enough codes to use up j bits */
  3.1371 --            f -= *xp;           /* else deduct codes from patterns */
  3.1372 --          }
  3.1373 -+          if (j < z)
  3.1374 -+            while (++j < z)       /* try smaller tables up to z bits */
  3.1375 -+            {
  3.1376 -+              if ((f <<= 1) <= *++xp)
  3.1377 -+                break;            /* enough codes to use up j bits */
  3.1378 -+              f -= *xp;           /* else deduct codes from patterns */
  3.1379 -+            }
  3.1380 -         }
  3.1381 - DEBG1("3 ");
  3.1382 -         z = 1 << j;             /* table entries for j-bit table */
  3.1383 -diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
  3.1384 ---- a/lib/zlib_inflate/inftrees.c
  3.1385 -+++ b/lib/zlib_inflate/inftrees.c
  3.1386 -@@ -141,7 +141,7 @@ static int huft_build(
  3.1387 -   {
  3.1388 -     *t = NULL;
  3.1389 -     *m = 0;
  3.1390 --    return Z_OK;
  3.1391 -+    return Z_DATA_ERROR;
  3.1392 -   }
  3.1393 - 
  3.1394 - 
  3.1395 -diff --git a/mm/memory.c b/mm/memory.c
  3.1396 ---- a/mm/memory.c
  3.1397 -+++ b/mm/memory.c
  3.1398 -@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
  3.1399 - {
  3.1400 - 	pgd_t *pgd;
  3.1401 - 	unsigned long next;
  3.1402 --	unsigned long end = addr + size;
  3.1403 -+	unsigned long end = addr + PAGE_ALIGN(size);
  3.1404 - 	struct mm_struct *mm = vma->vm_mm;
  3.1405 - 	int err;
  3.1406 - 
  3.1407 -diff --git a/mm/mempolicy.c b/mm/mempolicy.c
  3.1408 ---- a/mm/mempolicy.c
  3.1409 -+++ b/mm/mempolicy.c
  3.1410 -@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
  3.1411 - 	struct mempolicy *new;
  3.1412 - 	DECLARE_BITMAP(nodes, MAX_NUMNODES);
  3.1413 - 
  3.1414 --	if (mode > MPOL_MAX)
  3.1415 -+	if (mode < 0 || mode > MPOL_MAX)
  3.1416 - 		return -EINVAL;
  3.1417 - 	err = get_nodes(nodes, nmask, maxnode, mode);
  3.1418 - 	if (err)
  3.1419 -diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
  3.1420 ---- a/net/8021q/vlan.c
  3.1421 -+++ b/net/8021q/vlan.c
  3.1422 -@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
  3.1423 - 			if (!vlandev)
  3.1424 - 				continue;
  3.1425 - 
  3.1426 -+			if (netif_carrier_ok(dev)) {
  3.1427 -+				if (!netif_carrier_ok(vlandev))
  3.1428 -+					netif_carrier_on(vlandev);
  3.1429 -+			} else {
  3.1430 -+				if (netif_carrier_ok(vlandev))
  3.1431 -+					netif_carrier_off(vlandev);
  3.1432 -+			}
  3.1433 -+
  3.1434 - 			if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
  3.1435 - 				vlandev->state = (vlandev->state &~ VLAN_LINK_STATE_MASK) 
  3.1436 - 					| flgs;
  3.1437 -diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
  3.1438 ---- a/net/ipv4/ip_output.c
  3.1439 -+++ b/net/ipv4/ip_output.c
  3.1440 -@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
  3.1441 - #ifdef CONFIG_NETFILTER_DEBUG
  3.1442 - 	nf_debug_ip_loopback_xmit(newskb);
  3.1443 - #endif
  3.1444 --	nf_reset(newskb);
  3.1445 - 	netif_rx(newskb);
  3.1446 - 	return 0;
  3.1447 - }
  3.1448 -@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
  3.1449 - 	nf_debug_ip_finish_output2(skb);
  3.1450 - #endif /*CONFIG_NETFILTER_DEBUG*/
  3.1451 - 
  3.1452 --	nf_reset(skb);
  3.1453 --
  3.1454 - 	if (hh) {
  3.1455 - 		int hh_alen;
  3.1456 - 
  3.1457 -diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
  3.1458 ---- a/net/ipv4/netfilter/ip_conntrack_core.c
  3.1459 -+++ b/net/ipv4/netfilter/ip_conntrack_core.c
  3.1460 -@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
  3.1461 - 		schedule();
  3.1462 - 		goto i_see_dead_people;
  3.1463 - 	}
  3.1464 -+	/* wait until all references to ip_conntrack_untracked are dropped */
  3.1465 -+	while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
  3.1466 -+		schedule();
  3.1467 - 
  3.1468 - 	kmem_cache_destroy(ip_conntrack_cachep);
  3.1469 - 	kmem_cache_destroy(ip_conntrack_expect_cachep);
  3.1470 -diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
  3.1471 ---- a/net/ipv4/netfilter/ip_conntrack_standalone.c
  3.1472 -+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
  3.1473 -@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
  3.1474 - 				        const struct net_device *out,
  3.1475 - 				        int (*okfn)(struct sk_buff *))
  3.1476 - {
  3.1477 -+#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
  3.1478 -+	/* Previously seen (loopback)?  Ignore.  Do this before
  3.1479 -+           fragment check. */
  3.1480 -+	if ((*pskb)->nfct)
  3.1481 -+		return NF_ACCEPT;
  3.1482 -+#endif
  3.1483 -+
  3.1484 - 	/* Gather fragments. */
  3.1485 - 	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
  3.1486 - 		*pskb = ip_ct_gather_frags(*pskb,
  3.1487 -diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
  3.1488 ---- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
  3.1489 -+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
  3.1490 -@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
  3.1491 - 		 enum ip_nat_manip_type maniptype,
  3.1492 - 		 const struct ip_conntrack *conntrack)
  3.1493 - {
  3.1494 --	static u_int16_t port, *portptr;
  3.1495 -+	static u_int16_t port;
  3.1496 -+	u_int16_t *portptr;
  3.1497 - 	unsigned int range_size, min, i;
  3.1498 - 
  3.1499 - 	if (maniptype == IP_NAT_MANIP_SRC)
  3.1500 -diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
  3.1501 ---- a/net/ipv4/netfilter/ip_nat_proto_udp.c
  3.1502 -+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
  3.1503 -@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
  3.1504 - 		 enum ip_nat_manip_type maniptype,
  3.1505 - 		 const struct ip_conntrack *conntrack)
  3.1506 - {
  3.1507 --	static u_int16_t port, *portptr;
  3.1508 -+	static u_int16_t port;
  3.1509 -+	u_int16_t *portptr;
  3.1510 - 	unsigned int range_size, min, i;
  3.1511 - 
  3.1512 - 	if (maniptype == IP_NAT_MANIP_SRC)
  3.1513 -diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
  3.1514 ---- a/net/ipv6/netfilter/ip6_queue.c
  3.1515 -+++ b/net/ipv6/netfilter/ip6_queue.c
  3.1516 -@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
  3.1517 - static void
  3.1518 - ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
  3.1519 - {
  3.1520 -+	local_bh_disable();
  3.1521 - 	nf_reinject(entry->skb, entry->info, verdict);
  3.1522 -+	local_bh_enable();
  3.1523 - 	kfree(entry);
  3.1524 - }
  3.1525 - 
  3.1526 -diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
  3.1527 ---- a/net/netlink/af_netlink.c
  3.1528 -+++ b/net/netlink/af_netlink.c
  3.1529 -@@ -315,8 +315,8 @@ err:
  3.1530 - static void netlink_remove(struct sock *sk)
  3.1531 - {
  3.1532 - 	netlink_table_grab();
  3.1533 --	nl_table[sk->sk_protocol].hash.entries--;
  3.1534 --	sk_del_node_init(sk);
  3.1535 -+	if (sk_del_node_init(sk))
  3.1536 -+		nl_table[sk->sk_protocol].hash.entries--;
  3.1537 - 	if (nlk_sk(sk)->groups)
  3.1538 - 		__sk_del_bind_node(sk);
  3.1539 - 	netlink_table_ungrab();
  3.1540 -@@ -429,7 +429,12 @@ retry:
  3.1541 - 	err = netlink_insert(sk, pid);
  3.1542 - 	if (err == -EADDRINUSE)
  3.1543 - 		goto retry;
  3.1544 --	return 0;
  3.1545 -+
  3.1546 -+	/* If 2 threads race to autobind, that is fine.  */
  3.1547 -+	if (err == -EBUSY)
  3.1548 -+		err = 0;
  3.1549 -+
  3.1550 -+	return err;
  3.1551 - }
  3.1552 - 
  3.1553 - static inline int netlink_capable(struct socket *sock, unsigned int flag) 
  3.1554 -diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
  3.1555 ---- a/net/packet/af_packet.c
  3.1556 -+++ b/net/packet/af_packet.c
  3.1557 -@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
  3.1558 - 	dst_release(skb->dst);
  3.1559 - 	skb->dst = NULL;
  3.1560 - 
  3.1561 -+	/* drop conntrack reference */
  3.1562 -+	nf_reset(skb);
  3.1563 -+
  3.1564 - 	spkt = (struct sockaddr_pkt*)skb->cb;
  3.1565 - 
  3.1566 - 	skb_push(skb, skb->data-skb->mac.raw);
  3.1567 -@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
  3.1568 - 	dst_release(skb->dst);
  3.1569 - 	skb->dst = NULL;
  3.1570 - 
  3.1571 -+	/* drop conntrack reference */
  3.1572 -+	nf_reset(skb);
  3.1573 -+
  3.1574 - 	spin_lock(&sk->sk_receive_queue.lock);
  3.1575 - 	po->stats.tp_packets++;
  3.1576 - 	__skb_queue_tail(&sk->sk_receive_queue, skb);
  3.1577 -diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
  3.1578 ---- a/net/xfrm/xfrm_user.c
  3.1579 -+++ b/net/xfrm/xfrm_user.c
  3.1580 -@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
  3.1581 - 	if (nr > XFRM_MAX_DEPTH)
  3.1582 - 		return NULL;
  3.1583 - 
  3.1584 -+	if (p->dir > XFRM_POLICY_OUT)
  3.1585 -+		return NULL;
  3.1586 -+
  3.1587 - 	xp = xfrm_policy_alloc(GFP_KERNEL);
  3.1588 - 	if (xp == NULL) {
  3.1589 - 		*dir = -ENOBUFS;
  3.1590 -diff --git a/security/keys/keyring.c b/security/keys/keyring.c
  3.1591 ---- a/security/keys/keyring.c
  3.1592 -+++ b/security/keys/keyring.c
  3.1593 -@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
  3.1594 - 
  3.1595 - 	if (keyring->description) {
  3.1596 - 		write_lock(&keyring_name_lock);
  3.1597 --		list_del(&keyring->type_data.link);
  3.1598 -+
  3.1599 -+		if (keyring->type_data.link.next != NULL &&
  3.1600 -+		    !list_empty(&keyring->type_data.link))
  3.1601 -+			list_del(&keyring->type_data.link);
  3.1602 -+
  3.1603 - 		write_unlock(&keyring_name_lock);
  3.1604 - 	}
  3.1605 - 
  3.1606 -diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
  3.1607 ---- a/security/keys/process_keys.c
  3.1608 -+++ b/security/keys/process_keys.c
  3.1609 -@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
  3.1610 - 		keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
  3.1611 - 		if (IS_ERR(keyring)) {
  3.1612 - 			ret = PTR_ERR(keyring);
  3.1613 --			goto error;
  3.1614 -+			goto error2;
  3.1615 - 		}
  3.1616 - 	}
  3.1617 - 	else if (IS_ERR(keyring)) {