ia64/xen-unstable

changeset 12591:447ac06f74d3

[LINUX] Update to Linux 2.6.16.31

Updated linux-2.6-xen-sparse/arch/x86_64/kernel/acpi/Makefile

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Ian Campbell <ian.campbell@xensource.com>
date Mon Nov 27 13:50:02 2006 +0000 (2006-11-27)
parents a5aadc4c6bb6
children aaaa249e6f3b
files buildconfigs/mk.linux-2.6-xen linux-2.6-xen-sparse/arch/x86_64/kernel/acpi/Makefile patches/linux-2.6.16.30/blktap-aio-16_03_06.patch patches/linux-2.6.16.30/device_bind.patch patches/linux-2.6.16.30/fix-hz-suspend.patch patches/linux-2.6.16.30/fix-ide-cd-pio-mode.patch patches/linux-2.6.16.30/i386-mach-io-check-nmi.patch patches/linux-2.6.16.30/ipv6-no-autoconf.patch patches/linux-2.6.16.30/net-csum.patch patches/linux-2.6.16.30/net-gso-0-base.patch patches/linux-2.6.16.30/net-gso-1-check-dodgy.patch patches/linux-2.6.16.30/net-gso-2-checksum-fix.patch patches/linux-2.6.16.30/net-gso-3-fix-errorcheck.patch patches/linux-2.6.16.30/net-gso-4-kill-warnon.patch patches/linux-2.6.16.30/net-gso-5-rcv-mss.patch patches/linux-2.6.16.30/pci-mmconfig-fix-from-2.6.17.patch patches/linux-2.6.16.30/pmd-shared.patch patches/linux-2.6.16.30/rcu_needs_cpu.patch patches/linux-2.6.16.30/rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch patches/linux-2.6.16.30/series patches/linux-2.6.16.30/smp-alts.patch patches/linux-2.6.16.30/tpm_plugin_2.6.17.patch patches/linux-2.6.16.30/x86-elfnote-as-preprocessor-macro.patch patches/linux-2.6.16.30/x86-increase-interrupt-vector-range.patch patches/linux-2.6.16.30/x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.30/x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.30/xen-hotplug.patch patches/linux-2.6.16.30/xenoprof-generic.patch patches/linux-2.6.16.31/blktap-aio-16_03_06.patch patches/linux-2.6.16.31/device_bind.patch patches/linux-2.6.16.31/fix-hz-suspend.patch patches/linux-2.6.16.31/fix-ide-cd-pio-mode.patch patches/linux-2.6.16.31/i386-mach-io-check-nmi.patch patches/linux-2.6.16.31/ipv6-no-autoconf.patch patches/linux-2.6.16.31/net-csum.patch patches/linux-2.6.16.31/net-gso-0-base.patch patches/linux-2.6.16.31/net-gso-1-check-dodgy.patch patches/linux-2.6.16.31/net-gso-2-checksum-fix.patch patches/linux-2.6.16.31/net-gso-3-fix-errorcheck.patch patches/linux-2.6.16.31/net-gso-4-kill-warnon.patch patches/linux-2.6.16.31/net-gso-5-rcv-mss.patch patches/linux-2.6.16.31/pci-mmconfig-fix-from-2.6.17.patch patches/linux-2.6.16.31/pmd-shared.patch patches/linux-2.6.16.31/rcu_needs_cpu.patch patches/linux-2.6.16.31/rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch patches/linux-2.6.16.31/series patches/linux-2.6.16.31/smp-alts.patch patches/linux-2.6.16.31/tpm_plugin_2.6.17.patch patches/linux-2.6.16.31/x86-elfnote-as-preprocessor-macro.patch patches/linux-2.6.16.31/x86-increase-interrupt-vector-range.patch patches/linux-2.6.16.31/x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.31/x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.31/xen-hotplug.patch patches/linux-2.6.16.31/xenoprof-generic.patch
line diff
     1.1 --- a/buildconfigs/mk.linux-2.6-xen	Mon Nov 27 13:50:01 2006 +0000
     1.2 +++ b/buildconfigs/mk.linux-2.6-xen	Mon Nov 27 13:50:02 2006 +0000
     1.3 @@ -1,5 +1,5 @@
     1.4  LINUX_SERIES = 2.6
     1.5 -LINUX_VER    = 2.6.16.30
     1.6 +LINUX_VER    = 2.6.16.31
     1.7  
     1.8  EXTRAVERSION ?= xen
     1.9  
     2.1 --- a/linux-2.6-xen-sparse/arch/x86_64/kernel/acpi/Makefile	Mon Nov 27 13:50:01 2006 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/acpi/Makefile	Mon Nov 27 13:50:02 2006 +0000
     2.3 @@ -4,6 +4,7 @@ obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wake
     2.4  
     2.5  ifneq ($(CONFIG_ACPI_PROCESSOR),)
     2.6  obj-y			+= processor.o
     2.7 +processor-y		:= ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
     2.8  endif
     2.9  
    2.10  boot-$(CONFIG_XEN)		:= ../../../i386/kernel/acpi/boot-xen.o
     3.1 --- a/patches/linux-2.6.16.30/blktap-aio-16_03_06.patch	Mon Nov 27 13:50:01 2006 +0000
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,294 +0,0 @@
     3.4 -diff -pruN ../orig-linux-2.6.16.29/fs/aio.c ./fs/aio.c
     3.5 ---- ../orig-linux-2.6.16.29/fs/aio.c	2006-09-12 19:02:10.000000000 +0100
     3.6 -+++ ./fs/aio.c	2006-09-19 13:58:49.000000000 +0100
     3.7 -@@ -34,6 +34,11 @@
     3.8 - #include <asm/uaccess.h>
     3.9 - #include <asm/mmu_context.h>
    3.10 - 
    3.11 -+#ifdef CONFIG_EPOLL
    3.12 -+#include <linux/poll.h>
    3.13 -+#include <linux/eventpoll.h>
    3.14 -+#endif
    3.15 -+
    3.16 - #if DEBUG > 1
    3.17 - #define dprintk		printk
    3.18 - #else
    3.19 -@@ -1016,6 +1021,10 @@ put_rq:
    3.20 - 	if (waitqueue_active(&ctx->wait))
    3.21 - 		wake_up(&ctx->wait);
    3.22 - 
    3.23 -+#ifdef CONFIG_EPOLL
    3.24 -+	if (ctx->file && waitqueue_active(&ctx->poll_wait))
    3.25 -+		wake_up(&ctx->poll_wait);
    3.26 -+#endif
    3.27 - 	if (ret)
    3.28 - 		put_ioctx(ctx);
    3.29 - 
    3.30 -@@ -1025,6 +1034,8 @@ put_rq:
    3.31 - /* aio_read_evt
    3.32 -  *	Pull an event off of the ioctx's event ring.  Returns the number of 
    3.33 -  *	events fetched (0 or 1 ;-)
    3.34 -+ *	If ent parameter is 0, just returns the number of events that would
    3.35 -+ *	be fetched.
    3.36 -  *	FIXME: make this use cmpxchg.
    3.37 -  *	TODO: make the ringbuffer user mmap()able (requires FIXME).
    3.38 -  */
    3.39 -@@ -1047,13 +1058,18 @@ static int aio_read_evt(struct kioctx *i
    3.40 - 
    3.41 - 	head = ring->head % info->nr;
    3.42 - 	if (head != ring->tail) {
    3.43 --		struct io_event *evp = aio_ring_event(info, head, KM_USER1);
    3.44 --		*ent = *evp;
    3.45 --		head = (head + 1) % info->nr;
    3.46 --		smp_mb(); /* finish reading the event before updatng the head */
    3.47 --		ring->head = head;
    3.48 --		ret = 1;
    3.49 --		put_aio_ring_event(evp, KM_USER1);
    3.50 -+		if (ent) { /* event requested */
    3.51 -+			struct io_event *evp =
    3.52 -+				aio_ring_event(info, head, KM_USER1);
    3.53 -+			*ent = *evp;
    3.54 -+			head = (head + 1) % info->nr;
    3.55 -+			/* finish reading the event before updatng the head */
    3.56 -+			smp_mb();
    3.57 -+			ring->head = head;
    3.58 -+			ret = 1;
    3.59 -+			put_aio_ring_event(evp, KM_USER1);
    3.60 -+		} else /* only need to know availability */
    3.61 -+			ret = 1;
    3.62 - 	}
    3.63 - 	spin_unlock(&info->ring_lock);
    3.64 - 
    3.65 -@@ -1236,9 +1252,78 @@ static void io_destroy(struct kioctx *io
    3.66 - 
    3.67 - 	aio_cancel_all(ioctx);
    3.68 - 	wait_for_all_aios(ioctx);
    3.69 -+#ifdef CONFIG_EPOLL
    3.70 -+	/* forget the poll file, but it's up to the user to close it */
    3.71 -+	if (ioctx->file) {
    3.72 -+		ioctx->file->private_data = 0;
    3.73 -+		ioctx->file = 0;
    3.74 -+	}
    3.75 -+#endif
    3.76 - 	put_ioctx(ioctx);	/* once for the lookup */
    3.77 - }
    3.78 - 
    3.79 -+#ifdef CONFIG_EPOLL
    3.80 -+
    3.81 -+static int aio_queue_fd_close(struct inode *inode, struct file *file)
    3.82 -+{
    3.83 -+	struct kioctx *ioctx = file->private_data;
    3.84 -+	if (ioctx) {
    3.85 -+		file->private_data = 0;
    3.86 -+		spin_lock_irq(&ioctx->ctx_lock);
    3.87 -+		ioctx->file = 0;
    3.88 -+		spin_unlock_irq(&ioctx->ctx_lock);
    3.89 -+	}
    3.90 -+	return 0;
    3.91 -+}
    3.92 -+
    3.93 -+static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
    3.94 -+{	unsigned int pollflags = 0;
    3.95 -+	struct kioctx *ioctx = file->private_data;
    3.96 -+
    3.97 -+	if (ioctx) {
    3.98 -+
    3.99 -+		spin_lock_irq(&ioctx->ctx_lock);
   3.100 -+		/* Insert inside our poll wait queue */
   3.101 -+		poll_wait(file, &ioctx->poll_wait, wait);
   3.102 -+
   3.103 -+		/* Check our condition */
   3.104 -+		if (aio_read_evt(ioctx, 0))
   3.105 -+			pollflags = POLLIN | POLLRDNORM;
   3.106 -+		spin_unlock_irq(&ioctx->ctx_lock);
   3.107 -+	}
   3.108 -+
   3.109 -+	return pollflags;
   3.110 -+}
   3.111 -+
   3.112 -+static struct file_operations aioq_fops = {
   3.113 -+	.release	= aio_queue_fd_close,
   3.114 -+	.poll		= aio_queue_fd_poll
   3.115 -+};
   3.116 -+
   3.117 -+/* make_aio_fd:
   3.118 -+ *  Create a file descriptor that can be used to poll the event queue.
   3.119 -+ *  Based and piggybacked on the excellent epoll code.
   3.120 -+ */
   3.121 -+
   3.122 -+static int make_aio_fd(struct kioctx *ioctx)
   3.123 -+{
   3.124 -+	int error, fd;
   3.125 -+	struct inode *inode;
   3.126 -+	struct file *file;
   3.127 -+
   3.128 -+	error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
   3.129 -+	if (error)
   3.130 -+		return error;
   3.131 -+
   3.132 -+	/* associate the file with the IO context */
   3.133 -+	file->private_data = ioctx;
   3.134 -+	ioctx->file = file;
   3.135 -+	init_waitqueue_head(&ioctx->poll_wait);
   3.136 -+	return fd;
   3.137 -+}
   3.138 -+#endif
   3.139 -+
   3.140 -+
   3.141 - /* sys_io_setup:
   3.142 -  *	Create an aio_context capable of receiving at least nr_events.
   3.143 -  *	ctxp must not point to an aio_context that already exists, and
   3.144 -@@ -1251,18 +1336,30 @@ static void io_destroy(struct kioctx *io
   3.145 -  *	resources are available.  May fail with -EFAULT if an invalid
   3.146 -  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
   3.147 -  *	implemented.
   3.148 -+ *
   3.149 -+ *	To request a selectable fd, the user context has to be initialized
   3.150 -+ *	to 1, instead of 0, and the return value is the fd.
   3.151 -+ *	This keeps the system call compatible, since a non-zero value
   3.152 -+ *	was not allowed so far.
   3.153 -  */
   3.154 - asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
   3.155 - {
   3.156 - 	struct kioctx *ioctx = NULL;
   3.157 - 	unsigned long ctx;
   3.158 - 	long ret;
   3.159 -+	int make_fd = 0;
   3.160 - 
   3.161 - 	ret = get_user(ctx, ctxp);
   3.162 - 	if (unlikely(ret))
   3.163 - 		goto out;
   3.164 - 
   3.165 - 	ret = -EINVAL;
   3.166 -+#ifdef CONFIG_EPOLL
   3.167 -+	if (ctx == 1) {
   3.168 -+		make_fd = 1;
   3.169 -+		ctx = 0;
   3.170 -+	}
   3.171 -+#endif
   3.172 - 	if (unlikely(ctx || nr_events == 0)) {
   3.173 - 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
   3.174 - 		         ctx, nr_events);
   3.175 -@@ -1273,8 +1370,12 @@ asmlinkage long sys_io_setup(unsigned nr
   3.176 - 	ret = PTR_ERR(ioctx);
   3.177 - 	if (!IS_ERR(ioctx)) {
   3.178 - 		ret = put_user(ioctx->user_id, ctxp);
   3.179 --		if (!ret)
   3.180 --			return 0;
   3.181 -+#ifdef CONFIG_EPOLL
   3.182 -+		if (make_fd && ret >= 0)
   3.183 -+			ret = make_aio_fd(ioctx);
   3.184 -+#endif
   3.185 -+		if (ret >= 0)
   3.186 -+			return ret;
   3.187 - 
   3.188 - 		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
   3.189 - 		io_destroy(ioctx);
   3.190 -diff -pruN ../orig-linux-2.6.16.29/fs/eventpoll.c ./fs/eventpoll.c
   3.191 ---- ../orig-linux-2.6.16.29/fs/eventpoll.c	2006-09-12 19:02:10.000000000 +0100
   3.192 -+++ ./fs/eventpoll.c	2006-09-19 13:58:49.000000000 +0100
   3.193 -@@ -235,8 +235,6 @@ struct ep_pqueue {
   3.194 - 
   3.195 - static void ep_poll_safewake_init(struct poll_safewake *psw);
   3.196 - static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
   3.197 --static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   3.198 --		    struct eventpoll *ep);
   3.199 - static int ep_alloc(struct eventpoll **pep);
   3.200 - static void ep_free(struct eventpoll *ep);
   3.201 - static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
   3.202 -@@ -266,7 +264,7 @@ static int ep_events_transfer(struct eve
   3.203 - static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
   3.204 - 		   int maxevents, long timeout);
   3.205 - static int eventpollfs_delete_dentry(struct dentry *dentry);
   3.206 --static struct inode *ep_eventpoll_inode(void);
   3.207 -+static struct inode *ep_eventpoll_inode(struct file_operations *fops);
   3.208 - static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
   3.209 - 					      int flags, const char *dev_name,
   3.210 - 					      void *data);
   3.211 -@@ -525,7 +523,7 @@ asmlinkage long sys_epoll_create(int siz
   3.212 - 	 * Creates all the items needed to setup an eventpoll file. That is,
   3.213 - 	 * a file structure, and inode and a free file descriptor.
   3.214 - 	 */
   3.215 --	error = ep_getfd(&fd, &inode, &file, ep);
   3.216 -+	error = ep_getfd(&fd, &inode, &file, ep, &eventpoll_fops);
   3.217 - 	if (error)
   3.218 - 		goto eexit_2;
   3.219 - 
   3.220 -@@ -710,8 +708,8 @@ eexit_1:
   3.221 - /*
   3.222 -  * Creates the file descriptor to be used by the epoll interface.
   3.223 -  */
   3.224 --static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   3.225 --		    struct eventpoll *ep)
   3.226 -+int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   3.227 -+		    struct eventpoll *ep, struct file_operations *fops)
   3.228 - {
   3.229 - 	struct qstr this;
   3.230 - 	char name[32];
   3.231 -@@ -727,7 +725,7 @@ static int ep_getfd(int *efd, struct ino
   3.232 - 		goto eexit_1;
   3.233 - 
   3.234 - 	/* Allocates an inode from the eventpoll file system */
   3.235 --	inode = ep_eventpoll_inode();
   3.236 -+	inode = ep_eventpoll_inode(fops);
   3.237 - 	error = PTR_ERR(inode);
   3.238 - 	if (IS_ERR(inode))
   3.239 - 		goto eexit_2;
   3.240 -@@ -758,7 +756,7 @@ static int ep_getfd(int *efd, struct ino
   3.241 - 
   3.242 - 	file->f_pos = 0;
   3.243 - 	file->f_flags = O_RDONLY;
   3.244 --	file->f_op = &eventpoll_fops;
   3.245 -+	file->f_op = fops;
   3.246 - 	file->f_mode = FMODE_READ;
   3.247 - 	file->f_version = 0;
   3.248 - 	file->private_data = ep;
   3.249 -@@ -1574,7 +1572,7 @@ static int eventpollfs_delete_dentry(str
   3.250 - }
   3.251 - 
   3.252 - 
   3.253 --static struct inode *ep_eventpoll_inode(void)
   3.254 -+static struct inode *ep_eventpoll_inode(struct file_operations *fops)
   3.255 - {
   3.256 - 	int error = -ENOMEM;
   3.257 - 	struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
   3.258 -@@ -1582,7 +1580,7 @@ static struct inode *ep_eventpoll_inode(
   3.259 - 	if (!inode)
   3.260 - 		goto eexit_1;
   3.261 - 
   3.262 --	inode->i_fop = &eventpoll_fops;
   3.263 -+	inode->i_fop = fops;
   3.264 - 
   3.265 - 	/*
   3.266 - 	 * Mark the inode dirty from the very beginning,
   3.267 -diff -pruN ../orig-linux-2.6.16.29/include/linux/aio.h ./include/linux/aio.h
   3.268 ---- ../orig-linux-2.6.16.29/include/linux/aio.h	2006-09-12 19:02:10.000000000 +0100
   3.269 -+++ ./include/linux/aio.h	2006-09-19 13:58:49.000000000 +0100
   3.270 -@@ -191,6 +191,11 @@ struct kioctx {
   3.271 - 	struct aio_ring_info	ring_info;
   3.272 - 
   3.273 - 	struct work_struct	wq;
   3.274 -+#ifdef CONFIG_EPOLL
   3.275 -+	// poll integration
   3.276 -+	wait_queue_head_t       poll_wait;
   3.277 -+	struct file		*file;
   3.278 -+#endif
   3.279 - };
   3.280 - 
   3.281 - /* prototypes */
   3.282 -diff -pruN ../orig-linux-2.6.16.29/include/linux/eventpoll.h ./include/linux/eventpoll.h
   3.283 ---- ../orig-linux-2.6.16.29/include/linux/eventpoll.h	2006-09-12 19:02:10.000000000 +0100
   3.284 -+++ ./include/linux/eventpoll.h	2006-09-19 13:58:49.000000000 +0100
   3.285 -@@ -86,6 +86,12 @@ static inline void eventpoll_release(str
   3.286 - }
   3.287 - 
   3.288 - 
   3.289 -+/*
   3.290 -+ * called by aio code to create fd that can poll the  aio event queueQ
   3.291 -+ */
   3.292 -+struct eventpoll;
   3.293 -+int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   3.294 -+             struct eventpoll *ep, struct file_operations *fops);
   3.295 - #else
   3.296 - 
   3.297 - static inline void eventpoll_init_file(struct file *file) {}
     4.1 --- a/patches/linux-2.6.16.30/device_bind.patch	Mon Nov 27 13:50:01 2006 +0000
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,15 +0,0 @@
     4.4 -diff -pruN ../orig-linux-2.6.16.29/drivers/base/bus.c ./drivers/base/bus.c
     4.5 ---- ../orig-linux-2.6.16.29/drivers/base/bus.c	2006-09-12 19:02:10.000000000 +0100
     4.6 -+++ ./drivers/base/bus.c	2006-09-19 13:58:54.000000000 +0100
     4.7 -@@ -188,6 +188,11 @@ static ssize_t driver_bind(struct device
     4.8 - 		up(&dev->sem);
     4.9 - 		if (dev->parent)
    4.10 - 			up(&dev->parent->sem);
    4.11 -+
    4.12 -+		if (err > 0) 		/* success */
    4.13 -+			err = count;
    4.14 -+		else if (err == 0)	/* driver didn't accept device */
    4.15 -+			err = -ENODEV;
    4.16 - 	}
    4.17 - 	put_device(dev);
    4.18 - 	put_bus(bus);
     5.1 --- a/patches/linux-2.6.16.30/fix-hz-suspend.patch	Mon Nov 27 13:50:01 2006 +0000
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,26 +0,0 @@
     5.4 -diff -pruN ../orig-linux-2.6.16.29/kernel/timer.c ./kernel/timer.c
     5.5 ---- ../orig-linux-2.6.16.29/kernel/timer.c	2006-09-12 19:02:10.000000000 +0100
     5.6 -+++ ./kernel/timer.c	2006-09-19 13:58:58.000000000 +0100
     5.7 -@@ -555,6 +555,22 @@ found:
     5.8 - 	}
     5.9 - 	spin_unlock(&base->t_base.lock);
    5.10 - 
    5.11 -+	/*
    5.12 -+	 * It can happen that other CPUs service timer IRQs and increment
    5.13 -+	 * jiffies, but we have not yet got a local timer tick to process
    5.14 -+	 * the timer wheels.  In that case, the expiry time can be before
    5.15 -+	 * jiffies, but since the high-resolution timer here is relative to
    5.16 -+	 * jiffies, the default expression when high-resolution timers are
    5.17 -+	 * not active,
    5.18 -+	 *
    5.19 -+	 *   time_before(MAX_JIFFY_OFFSET + jiffies, expires)
    5.20 -+	 *
    5.21 -+	 * would falsely evaluate to true.  If that is the case, just
    5.22 -+	 * return jiffies so that we can immediately fire the local timer
    5.23 -+	 */
    5.24 -+	if (time_before(expires, jiffies))
    5.25 -+		return jiffies;
    5.26 -+
    5.27 - 	if (time_before(hr_expires, expires))
    5.28 - 		return hr_expires;
    5.29 - 
     6.1 --- a/patches/linux-2.6.16.30/fix-ide-cd-pio-mode.patch	Mon Nov 27 13:50:01 2006 +0000
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,18 +0,0 @@
     6.4 -diff -pruN ../orig-linux-2.6.16.29/drivers/ide/ide-lib.c ./drivers/ide/ide-lib.c
     6.5 ---- ../orig-linux-2.6.16.29/drivers/ide/ide-lib.c	2006-09-12 19:02:10.000000000 +0100
     6.6 -+++ ./drivers/ide/ide-lib.c	2006-09-19 13:59:03.000000000 +0100
     6.7 -@@ -410,10 +410,10 @@ void ide_toggle_bounce(ide_drive_t *driv
     6.8 - {
     6.9 - 	u64 addr = BLK_BOUNCE_HIGH;	/* dma64_addr_t */
    6.10 - 
    6.11 --	if (!PCI_DMA_BUS_IS_PHYS) {
    6.12 --		addr = BLK_BOUNCE_ANY;
    6.13 --	} else if (on && drive->media == ide_disk) {
    6.14 --		if (HWIF(drive)->pci_dev)
    6.15 -+	if (on && drive->media == ide_disk) {
    6.16 -+		if (!PCI_DMA_BUS_IS_PHYS)
    6.17 -+			addr = BLK_BOUNCE_ANY;
    6.18 -+		else if (HWIF(drive)->pci_dev)
    6.19 - 			addr = HWIF(drive)->pci_dev->dma_mask;
    6.20 - 	}
    6.21 - 
     7.1 --- a/patches/linux-2.6.16.30/i386-mach-io-check-nmi.patch	Mon Nov 27 13:50:01 2006 +0000
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,45 +0,0 @@
     7.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/traps.c ./arch/i386/kernel/traps.c
     7.5 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/traps.c	2006-09-12 19:02:10.000000000 +0100
     7.6 -+++ ./arch/i386/kernel/traps.c	2006-09-19 13:59:06.000000000 +0100
     7.7 -@@ -567,18 +567,11 @@ static void mem_parity_error(unsigned ch
     7.8 - 
     7.9 - static void io_check_error(unsigned char reason, struct pt_regs * regs)
    7.10 - {
    7.11 --	unsigned long i;
    7.12 --
    7.13 - 	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
    7.14 - 	show_registers(regs);
    7.15 - 
    7.16 - 	/* Re-enable the IOCK line, wait for a few seconds */
    7.17 --	reason = (reason & 0xf) | 8;
    7.18 --	outb(reason, 0x61);
    7.19 --	i = 2000;
    7.20 --	while (--i) udelay(1000);
    7.21 --	reason &= ~8;
    7.22 --	outb(reason, 0x61);
    7.23 -+	clear_io_check_error(reason);
    7.24 - }
    7.25 - 
    7.26 - static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
    7.27 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/mach-default/mach_traps.h ./include/asm-i386/mach-default/mach_traps.h
    7.28 ---- ../orig-linux-2.6.16.29/include/asm-i386/mach-default/mach_traps.h	2006-09-12 19:02:10.000000000 +0100
    7.29 -+++ ./include/asm-i386/mach-default/mach_traps.h	2006-09-19 13:59:06.000000000 +0100
    7.30 -@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
    7.31 - 	outb(reason, 0x61);
    7.32 - }
    7.33 - 
    7.34 -+static inline void clear_io_check_error(unsigned char reason)
    7.35 -+{
    7.36 -+	unsigned long i;
    7.37 -+
    7.38 -+	reason = (reason & 0xf) | 8;
    7.39 -+	outb(reason, 0x61);
    7.40 -+	i = 2000;
    7.41 -+	while (--i) udelay(1000);
    7.42 -+	reason &= ~8;
    7.43 -+	outb(reason, 0x61);
    7.44 -+}
    7.45 -+
    7.46 - static inline unsigned char get_nmi_reason(void)
    7.47 - {
    7.48 - 	return inb(0x61);
     8.1 --- a/patches/linux-2.6.16.30/ipv6-no-autoconf.patch	Mon Nov 27 13:50:01 2006 +0000
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,19 +0,0 @@
     8.4 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/addrconf.c ./net/ipv6/addrconf.c
     8.5 ---- ../orig-linux-2.6.16.29/net/ipv6/addrconf.c	2006-09-12 19:02:10.000000000 +0100
     8.6 -+++ ./net/ipv6/addrconf.c	2006-09-19 13:59:11.000000000 +0100
     8.7 -@@ -2471,6 +2471,7 @@ static void addrconf_dad_start(struct in
     8.8 - 	spin_lock_bh(&ifp->lock);
     8.9 - 
    8.10 - 	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
    8.11 -+	    !(dev->flags&IFF_MULTICAST) ||
    8.12 - 	    !(ifp->flags&IFA_F_TENTATIVE)) {
    8.13 - 		ifp->flags &= ~IFA_F_TENTATIVE;
    8.14 - 		spin_unlock_bh(&ifp->lock);
    8.15 -@@ -2555,6 +2556,7 @@ static void addrconf_dad_completed(struc
    8.16 - 	if (ifp->idev->cnf.forwarding == 0 &&
    8.17 - 	    ifp->idev->cnf.rtr_solicits > 0 &&
    8.18 - 	    (dev->flags&IFF_LOOPBACK) == 0 &&
    8.19 -+	    (dev->flags & IFF_MULTICAST) &&
    8.20 - 	    (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
    8.21 - 		struct in6_addr all_routers;
    8.22 - 
     9.1 --- a/patches/linux-2.6.16.30/net-csum.patch	Mon Nov 27 13:50:01 2006 +0000
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,63 +0,0 @@
     9.4 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_tcp.c ./net/ipv4/netfilter/ip_nat_proto_tcp.c
     9.5 ---- ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-12 19:02:10.000000000 +0100
     9.6 -+++ ./net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-19 13:59:15.000000000 +0100
     9.7 -@@ -129,7 +129,12 @@ tcp_manip_pkt(struct sk_buff **pskb,
     9.8 - 	if (hdrsize < sizeof(*hdr))
     9.9 - 		return 1;
    9.10 - 
    9.11 --	hdr->check = ip_nat_cheat_check(~oldip, newip,
    9.12 -+#ifdef CONFIG_XEN
    9.13 -+	if ((*pskb)->proto_csum_blank)
    9.14 -+		hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
    9.15 -+	else
    9.16 -+#endif
    9.17 -+		hdr->check = ip_nat_cheat_check(~oldip, newip,
    9.18 - 					ip_nat_cheat_check(oldport ^ 0xFFFF,
    9.19 - 							   newport,
    9.20 - 							   hdr->check));
    9.21 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_udp.c ./net/ipv4/netfilter/ip_nat_proto_udp.c
    9.22 ---- ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-12 19:02:10.000000000 +0100
    9.23 -+++ ./net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-19 13:59:15.000000000 +0100
    9.24 -@@ -113,11 +113,17 @@ udp_manip_pkt(struct sk_buff **pskb,
    9.25 - 		newport = tuple->dst.u.udp.port;
    9.26 - 		portptr = &hdr->dest;
    9.27 - 	}
    9.28 --	if (hdr->check) /* 0 is a special case meaning no checksum */
    9.29 --		hdr->check = ip_nat_cheat_check(~oldip, newip,
    9.30 -+	if (hdr->check) { /* 0 is a special case meaning no checksum */
    9.31 -+#ifdef CONFIG_XEN
    9.32 -+		if ((*pskb)->proto_csum_blank)
    9.33 -+			hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
    9.34 -+		else
    9.35 -+#endif
    9.36 -+			hdr->check = ip_nat_cheat_check(~oldip, newip,
    9.37 - 					ip_nat_cheat_check(*portptr ^ 0xFFFF,
    9.38 - 							   newport,
    9.39 - 							   hdr->check));
    9.40 -+	}
    9.41 - 	*portptr = newport;
    9.42 - 	return 1;
    9.43 - }
    9.44 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
    9.45 ---- ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c	2006-09-12 19:02:10.000000000 +0100
    9.46 -+++ ./net/ipv4/xfrm4_output.c	2006-09-19 13:59:15.000000000 +0100
    9.47 -@@ -17,6 +17,8 @@
    9.48 - #include <net/xfrm.h>
    9.49 - #include <net/icmp.h>
    9.50 - 
    9.51 -+extern int skb_checksum_setup(struct sk_buff *skb);
    9.52 -+
    9.53 - /* Add encapsulation header.
    9.54 -  *
    9.55 -  * In transport mode, the IP header will be moved forward to make space
    9.56 -@@ -103,6 +105,10 @@ static int xfrm4_output_one(struct sk_bu
    9.57 - 	struct xfrm_state *x = dst->xfrm;
    9.58 - 	int err;
    9.59 - 	
    9.60 -+	err = skb_checksum_setup(skb);
    9.61 -+	if (err)
    9.62 -+		goto error_nolock;
    9.63 -+
    9.64 - 	if (skb->ip_summed == CHECKSUM_HW) {
    9.65 - 		err = skb_checksum_help(skb, 0);
    9.66 - 		if (err)
    10.1 --- a/patches/linux-2.6.16.30/net-gso-0-base.patch	Mon Nov 27 13:50:01 2006 +0000
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,2835 +0,0 @@
    10.4 -diff -pruN ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt ./Documentation/networking/netdevices.txt
    10.5 ---- ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt	2006-09-12 19:02:10.000000000 +0100
    10.6 -+++ ./Documentation/networking/netdevices.txt	2006-09-19 13:59:20.000000000 +0100
    10.7 -@@ -42,9 +42,9 @@ dev->get_stats:
    10.8 - 	Context: nominally process, but don't sleep inside an rwlock
    10.9 - 
   10.10 - dev->hard_start_xmit:
   10.11 --	Synchronization: dev->xmit_lock spinlock.
   10.12 -+	Synchronization: netif_tx_lock spinlock.
   10.13 - 	When the driver sets NETIF_F_LLTX in dev->features this will be
   10.14 --	called without holding xmit_lock. In this case the driver 
   10.15 -+	called without holding netif_tx_lock. In this case the driver
   10.16 - 	has to lock by itself when needed. It is recommended to use a try lock
   10.17 - 	for this and return -1 when the spin lock fails. 
   10.18 - 	The locking there should also properly protect against 
   10.19 -@@ -62,12 +62,12 @@ dev->hard_start_xmit:
   10.20 - 	  Only valid when NETIF_F_LLTX is set.
   10.21 - 
   10.22 - dev->tx_timeout:
   10.23 --	Synchronization: dev->xmit_lock spinlock.
   10.24 -+	Synchronization: netif_tx_lock spinlock.
   10.25 - 	Context: BHs disabled
   10.26 - 	Notes: netif_queue_stopped() is guaranteed true
   10.27 - 
   10.28 - dev->set_multicast_list:
   10.29 --	Synchronization: dev->xmit_lock spinlock.
   10.30 -+	Synchronization: netif_tx_lock spinlock.
   10.31 - 	Context: BHs disabled
   10.32 - 
   10.33 - dev->poll:
   10.34 -diff -pruN ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c ./drivers/block/aoe/aoenet.c
   10.35 ---- ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c	2006-09-12 19:02:10.000000000 +0100
   10.36 -+++ ./drivers/block/aoe/aoenet.c	2006-09-19 13:59:20.000000000 +0100
   10.37 -@@ -95,9 +95,8 @@ mac_addr(char addr[6])
   10.38 - static struct sk_buff *
   10.39 - skb_check(struct sk_buff *skb)
   10.40 - {
   10.41 --	if (skb_is_nonlinear(skb))
   10.42 - 	if ((skb = skb_share_check(skb, GFP_ATOMIC)))
   10.43 --	if (skb_linearize(skb, GFP_ATOMIC) < 0) {
   10.44 -+	if (skb_linearize(skb)) {
   10.45 - 		dev_kfree_skb(skb);
   10.46 - 		return NULL;
   10.47 - 	}
   10.48 -diff -pruN ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c
   10.49 ---- ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-12 19:02:10.000000000 +0100
   10.50 -+++ ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-19 13:59:20.000000000 +0100
   10.51 -@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_
   10.52 - 
   10.53 - 	ipoib_mcast_stop_thread(dev, 0);
   10.54 - 
   10.55 --	spin_lock_irqsave(&dev->xmit_lock, flags);
   10.56 -+	local_irq_save(flags);
   10.57 -+	netif_tx_lock(dev);
   10.58 - 	spin_lock(&priv->lock);
   10.59 - 
   10.60 - 	/*
   10.61 -@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_
   10.62 - 	}
   10.63 - 
   10.64 - 	spin_unlock(&priv->lock);
   10.65 --	spin_unlock_irqrestore(&dev->xmit_lock, flags);
   10.66 -+	netif_tx_unlock(dev);
   10.67 -+	local_irq_restore(flags);
   10.68 - 
   10.69 - 	/* We have to cancel outside of the spinlock */
   10.70 - 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
   10.71 -diff -pruN ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c ./drivers/media/dvb/dvb-core/dvb_net.c
   10.72 ---- ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c	2006-09-12 19:02:10.000000000 +0100
   10.73 -+++ ./drivers/media/dvb/dvb-core/dvb_net.c	2006-09-19 13:59:20.000000000 +0100
   10.74 -@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void 
   10.75 - 
   10.76 - 	dvb_net_feed_stop(dev);
   10.77 - 	priv->rx_mode = RX_MODE_UNI;
   10.78 --	spin_lock_bh(&dev->xmit_lock);
   10.79 -+	netif_tx_lock_bh(dev);
   10.80 - 
   10.81 - 	if (dev->flags & IFF_PROMISC) {
   10.82 - 		dprintk("%s: promiscuous mode\n", dev->name);
   10.83 -@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void 
   10.84 - 		}
   10.85 - 	}
   10.86 - 
   10.87 --	spin_unlock_bh(&dev->xmit_lock);
   10.88 -+	netif_tx_unlock_bh(dev);
   10.89 - 	dvb_net_feed_start(dev);
   10.90 - }
   10.91 - 
   10.92 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/8139cp.c ./drivers/net/8139cp.c
   10.93 ---- ../orig-linux-2.6.16.29/drivers/net/8139cp.c	2006-09-12 19:02:10.000000000 +0100
   10.94 -+++ ./drivers/net/8139cp.c	2006-09-19 13:59:20.000000000 +0100
   10.95 -@@ -794,7 +794,7 @@ static int cp_start_xmit (struct sk_buff
   10.96 - 	entry = cp->tx_head;
   10.97 - 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
   10.98 - 	if (dev->features & NETIF_F_TSO)
   10.99 --		mss = skb_shinfo(skb)->tso_size;
  10.100 -+		mss = skb_shinfo(skb)->gso_size;
  10.101 - 
  10.102 - 	if (skb_shinfo(skb)->nr_frags == 0) {
  10.103 - 		struct cp_desc *txd = &cp->tx_ring[entry];
  10.104 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/bnx2.c ./drivers/net/bnx2.c
  10.105 ---- ../orig-linux-2.6.16.29/drivers/net/bnx2.c	2006-09-12 19:02:10.000000000 +0100
  10.106 -+++ ./drivers/net/bnx2.c	2006-09-19 13:59:20.000000000 +0100
  10.107 -@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
  10.108 - 		skb = tx_buf->skb;
  10.109 - #ifdef BCM_TSO 
  10.110 - 		/* partial BD completions possible with TSO packets */
  10.111 --		if (skb_shinfo(skb)->tso_size) {
  10.112 -+		if (skb_shinfo(skb)->gso_size) {
  10.113 - 			u16 last_idx, last_ring_idx;
  10.114 - 
  10.115 - 			last_idx = sw_cons +
  10.116 -@@ -1948,7 +1948,7 @@ bnx2_poll(struct net_device *dev, int *b
  10.117 - 	return 1;
  10.118 - }
  10.119 - 
  10.120 --/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
  10.121 -+/* Called with rtnl_lock from vlan functions and also netif_tx_lock
  10.122 -  * from set_multicast.
  10.123 -  */
  10.124 - static void
  10.125 -@@ -4403,7 +4403,7 @@ bnx2_vlan_rx_kill_vid(struct net_device 
  10.126 - }
  10.127 - #endif
  10.128 - 
  10.129 --/* Called with dev->xmit_lock.
  10.130 -+/* Called with netif_tx_lock.
  10.131 -  * hard_start_xmit is pseudo-lockless - a lock is only required when
  10.132 -  * the tx queue is full. This way, we get the benefit of lockless
  10.133 -  * operations most of the time without the complexities to handle
  10.134 -@@ -4441,7 +4441,7 @@ bnx2_start_xmit(struct sk_buff *skb, str
  10.135 - 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
  10.136 - 	}
  10.137 - #ifdef BCM_TSO 
  10.138 --	if ((mss = skb_shinfo(skb)->tso_size) &&
  10.139 -+	if ((mss = skb_shinfo(skb)->gso_size) &&
  10.140 - 		(skb->len > (bp->dev->mtu + ETH_HLEN))) {
  10.141 - 		u32 tcp_opt_len, ip_tcp_len;
  10.142 - 
  10.143 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c ./drivers/net/bonding/bond_main.c
  10.144 ---- ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c	2006-09-12 19:02:10.000000000 +0100
  10.145 -+++ ./drivers/net/bonding/bond_main.c	2006-09-19 13:59:20.000000000 +0100
  10.146 -@@ -1145,8 +1145,7 @@ int bond_sethwaddr(struct net_device *bo
  10.147 - }
  10.148 - 
  10.149 - #define BOND_INTERSECT_FEATURES \
  10.150 --	(NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\
  10.151 --	NETIF_F_TSO|NETIF_F_UFO)
  10.152 -+	(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
  10.153 - 
  10.154 - /* 
  10.155 -  * Compute the common dev->feature set available to all slaves.  Some
  10.156 -@@ -1164,9 +1163,7 @@ static int bond_compute_features(struct 
  10.157 - 		features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
  10.158 - 
  10.159 - 	if ((features & NETIF_F_SG) && 
  10.160 --	    !(features & (NETIF_F_IP_CSUM |
  10.161 --			  NETIF_F_NO_CSUM |
  10.162 --			  NETIF_F_HW_CSUM)))
  10.163 -+	    !(features & NETIF_F_ALL_CSUM))
  10.164 - 		features &= ~NETIF_F_SG;
  10.165 - 
  10.166 - 	/* 
  10.167 -@@ -4147,7 +4144,7 @@ static int bond_init(struct net_device *
  10.168 - 	 */
  10.169 - 	bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
  10.170 - 
  10.171 --	/* don't acquire bond device's xmit_lock when 
  10.172 -+	/* don't acquire bond device's netif_tx_lock when
  10.173 - 	 * transmitting */
  10.174 - 	bond_dev->features |= NETIF_F_LLTX;
  10.175 - 
  10.176 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c ./drivers/net/chelsio/sge.c
  10.177 ---- ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c	2006-09-12 19:02:10.000000000 +0100
  10.178 -+++ ./drivers/net/chelsio/sge.c	2006-09-19 13:59:20.000000000 +0100
  10.179 -@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
  10.180 - 	struct cpl_tx_pkt *cpl;
  10.181 - 
  10.182 - #ifdef NETIF_F_TSO
  10.183 --	if (skb_shinfo(skb)->tso_size) {
  10.184 -+	if (skb_shinfo(skb)->gso_size) {
  10.185 - 		int eth_type;
  10.186 - 		struct cpl_tx_pkt_lso *hdr;
  10.187 - 
  10.188 -@@ -1434,7 +1434,7 @@ int t1_start_xmit(struct sk_buff *skb, s
  10.189 - 		hdr->ip_hdr_words = skb->nh.iph->ihl;
  10.190 - 		hdr->tcp_hdr_words = skb->h.th->doff;
  10.191 - 		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
  10.192 --						skb_shinfo(skb)->tso_size));
  10.193 -+						skb_shinfo(skb)->gso_size));
  10.194 - 		hdr->len = htonl(skb->len - sizeof(*hdr));
  10.195 - 		cpl = (struct cpl_tx_pkt *)hdr;
  10.196 - 		sge->stats.tx_lso_pkts++;
  10.197 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c ./drivers/net/e1000/e1000_main.c
  10.198 ---- ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c	2006-09-12 19:02:10.000000000 +0100
  10.199 -+++ ./drivers/net/e1000/e1000_main.c	2006-09-19 13:59:20.000000000 +0100
  10.200 -@@ -2526,7 +2526,7 @@ e1000_tso(struct e1000_adapter *adapter,
  10.201 - 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
  10.202 - 	int err;
  10.203 - 
  10.204 --	if (skb_shinfo(skb)->tso_size) {
  10.205 -+	if (skb_shinfo(skb)->gso_size) {
  10.206 - 		if (skb_header_cloned(skb)) {
  10.207 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  10.208 - 			if (err)
  10.209 -@@ -2534,7 +2534,7 @@ e1000_tso(struct e1000_adapter *adapter,
  10.210 - 		}
  10.211 - 
  10.212 - 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  10.213 --		mss = skb_shinfo(skb)->tso_size;
  10.214 -+		mss = skb_shinfo(skb)->gso_size;
  10.215 - 		if (skb->protocol == ntohs(ETH_P_IP)) {
  10.216 - 			skb->nh.iph->tot_len = 0;
  10.217 - 			skb->nh.iph->check = 0;
  10.218 -@@ -2651,7 +2651,7 @@ e1000_tx_map(struct e1000_adapter *adapt
  10.219 - 		 * tso gets written back prematurely before the data is fully
  10.220 - 		 * DMAd to the controller */
  10.221 - 		if (!skb->data_len && tx_ring->last_tx_tso &&
  10.222 --				!skb_shinfo(skb)->tso_size) {
  10.223 -+				!skb_shinfo(skb)->gso_size) {
  10.224 - 			tx_ring->last_tx_tso = 0;
  10.225 - 			size -= 4;
  10.226 - 		}
  10.227 -@@ -2893,7 +2893,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
  10.228 - 	}
  10.229 - 
  10.230 - #ifdef NETIF_F_TSO
  10.231 --	mss = skb_shinfo(skb)->tso_size;
  10.232 -+	mss = skb_shinfo(skb)->gso_size;
  10.233 - 	/* The controller does a simple calculation to 
  10.234 - 	 * make sure there is enough room in the FIFO before
  10.235 - 	 * initiating the DMA for each buffer.  The calc is:
  10.236 -@@ -2935,7 +2935,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
  10.237 - #ifdef NETIF_F_TSO
  10.238 - 	/* Controller Erratum workaround */
  10.239 - 	if (!skb->data_len && tx_ring->last_tx_tso &&
  10.240 --		!skb_shinfo(skb)->tso_size)
  10.241 -+		!skb_shinfo(skb)->gso_size)
  10.242 - 		count++;
  10.243 - #endif
  10.244 - 
  10.245 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/forcedeth.c ./drivers/net/forcedeth.c
  10.246 ---- ../orig-linux-2.6.16.29/drivers/net/forcedeth.c	2006-09-12 19:02:10.000000000 +0100
  10.247 -+++ ./drivers/net/forcedeth.c	2006-09-19 13:59:20.000000000 +0100
  10.248 -@@ -482,9 +482,9 @@ typedef union _ring_type {
  10.249 -  * critical parts:
  10.250 -  * - rx is (pseudo-) lockless: it relies on the single-threading provided
  10.251 -  *	by the arch code for interrupts.
  10.252 -- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
  10.253 -+ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  10.254 -  *	needs dev->priv->lock :-(
  10.255 -- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
  10.256 -+ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  10.257 -  */
  10.258 - 
  10.259 - /* in dev: base, irq */
  10.260 -@@ -1016,7 +1016,7 @@ static void drain_ring(struct net_device
  10.261 - 
  10.262 - /*
  10.263 -  * nv_start_xmit: dev->hard_start_xmit function
  10.264 -- * Called with dev->xmit_lock held.
  10.265 -+ * Called with netif_tx_lock held.
  10.266 -  */
  10.267 - static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  10.268 - {
  10.269 -@@ -1105,8 +1105,8 @@ static int nv_start_xmit(struct sk_buff 
  10.270 - 	np->tx_skbuff[nr] = skb;
  10.271 - 
  10.272 - #ifdef NETIF_F_TSO
  10.273 --	if (skb_shinfo(skb)->tso_size)
  10.274 --		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
  10.275 -+	if (skb_shinfo(skb)->gso_size)
  10.276 -+		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  10.277 - 	else
  10.278 - #endif
  10.279 - 	tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
  10.280 -@@ -1203,7 +1203,7 @@ static void nv_tx_done(struct net_device
  10.281 - 
  10.282 - /*
  10.283 -  * nv_tx_timeout: dev->tx_timeout function
  10.284 -- * Called with dev->xmit_lock held.
  10.285 -+ * Called with netif_tx_lock held.
  10.286 -  */
  10.287 - static void nv_tx_timeout(struct net_device *dev)
  10.288 - {
  10.289 -@@ -1524,7 +1524,7 @@ static int nv_change_mtu(struct net_devi
  10.290 - 		 * Changing the MTU is a rare event, it shouldn't matter.
  10.291 - 		 */
  10.292 - 		disable_irq(dev->irq);
  10.293 --		spin_lock_bh(&dev->xmit_lock);
  10.294 -+		netif_tx_lock_bh(dev);
  10.295 - 		spin_lock(&np->lock);
  10.296 - 		/* stop engines */
  10.297 - 		nv_stop_rx(dev);
  10.298 -@@ -1559,7 +1559,7 @@ static int nv_change_mtu(struct net_devi
  10.299 - 		nv_start_rx(dev);
  10.300 - 		nv_start_tx(dev);
  10.301 - 		spin_unlock(&np->lock);
  10.302 --		spin_unlock_bh(&dev->xmit_lock);
  10.303 -+		netif_tx_unlock_bh(dev);
  10.304 - 		enable_irq(dev->irq);
  10.305 - 	}
  10.306 - 	return 0;
  10.307 -@@ -1594,7 +1594,7 @@ static int nv_set_mac_address(struct net
  10.308 - 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  10.309 - 
  10.310 - 	if (netif_running(dev)) {
  10.311 --		spin_lock_bh(&dev->xmit_lock);
  10.312 -+		netif_tx_lock_bh(dev);
  10.313 - 		spin_lock_irq(&np->lock);
  10.314 - 
  10.315 - 		/* stop rx engine */
  10.316 -@@ -1606,7 +1606,7 @@ static int nv_set_mac_address(struct net
  10.317 - 		/* restart rx engine */
  10.318 - 		nv_start_rx(dev);
  10.319 - 		spin_unlock_irq(&np->lock);
  10.320 --		spin_unlock_bh(&dev->xmit_lock);
  10.321 -+		netif_tx_unlock_bh(dev);
  10.322 - 	} else {
  10.323 - 		nv_copy_mac_to_hw(dev);
  10.324 - 	}
  10.325 -@@ -1615,7 +1615,7 @@ static int nv_set_mac_address(struct net
  10.326 - 
  10.327 - /*
  10.328 -  * nv_set_multicast: dev->set_multicast function
  10.329 -- * Called with dev->xmit_lock held.
  10.330 -+ * Called with netif_tx_lock held.
  10.331 -  */
  10.332 - static void nv_set_multicast(struct net_device *dev)
  10.333 - {
  10.334 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c ./drivers/net/hamradio/6pack.c
  10.335 ---- ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c	2006-09-12 19:02:10.000000000 +0100
  10.336 -+++ ./drivers/net/hamradio/6pack.c	2006-09-19 13:59:20.000000000 +0100
  10.337 -@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net
  10.338 - {
  10.339 - 	struct sockaddr_ax25 *sa = addr;
  10.340 - 
  10.341 --	spin_lock_irq(&dev->xmit_lock);
  10.342 -+	netif_tx_lock_bh(dev);
  10.343 - 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
  10.344 --	spin_unlock_irq(&dev->xmit_lock);
  10.345 -+	netif_tx_unlock_bh(dev);
  10.346 - 
  10.347 - 	return 0;
  10.348 - }
  10.349 -@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_stru
  10.350 - 			break;
  10.351 - 		}
  10.352 - 
  10.353 --		spin_lock_irq(&dev->xmit_lock);
  10.354 -+		netif_tx_lock_bh(dev);
  10.355 - 		memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
  10.356 --		spin_unlock_irq(&dev->xmit_lock);
  10.357 -+		netif_tx_unlock_bh(dev);
  10.358 - 
  10.359 - 		err = 0;
  10.360 - 		break;
  10.361 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c ./drivers/net/hamradio/mkiss.c
  10.362 ---- ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c	2006-09-12 19:02:10.000000000 +0100
  10.363 -+++ ./drivers/net/hamradio/mkiss.c	2006-09-19 13:59:20.000000000 +0100
  10.364 -@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net
  10.365 - {
  10.366 - 	struct sockaddr_ax25 *sa = addr;
  10.367 - 
  10.368 --	spin_lock_irq(&dev->xmit_lock);
  10.369 -+	netif_tx_lock_bh(dev);
  10.370 - 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
  10.371 --	spin_unlock_irq(&dev->xmit_lock);
  10.372 -+	netif_tx_unlock_bh(dev);
  10.373 - 
  10.374 - 	return 0;
  10.375 - }
  10.376 -@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct
  10.377 - 			break;
  10.378 - 		}
  10.379 - 
  10.380 --		spin_lock_irq(&dev->xmit_lock);
  10.381 -+		netif_tx_lock_bh(dev);
  10.382 - 		memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
  10.383 --		spin_unlock_irq(&dev->xmit_lock);
  10.384 -+		netif_tx_unlock_bh(dev);
  10.385 - 
  10.386 - 		err = 0;
  10.387 - 		break;
  10.388 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/ifb.c ./drivers/net/ifb.c
  10.389 ---- ../orig-linux-2.6.16.29/drivers/net/ifb.c	2006-09-12 19:02:10.000000000 +0100
  10.390 -+++ ./drivers/net/ifb.c	2006-09-19 13:59:20.000000000 +0100
  10.391 -@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev
  10.392 - 	dp->st_task_enter++;
  10.393 - 	if ((skb = skb_peek(&dp->tq)) == NULL) {
  10.394 - 		dp->st_txq_refl_try++;
  10.395 --		if (spin_trylock(&_dev->xmit_lock)) {
  10.396 -+		if (netif_tx_trylock(_dev)) {
  10.397 - 			dp->st_rxq_enter++;
  10.398 - 			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
  10.399 - 				skb_queue_tail(&dp->tq, skb);
  10.400 - 				dp->st_rx2tx_tran++;
  10.401 - 			}
  10.402 --			spin_unlock(&_dev->xmit_lock);
  10.403 -+			netif_tx_unlock(_dev);
  10.404 - 		} else {
  10.405 - 			/* reschedule */
  10.406 - 			dp->st_rxq_notenter++;
  10.407 -@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev
  10.408 - 		}
  10.409 - 	}
  10.410 - 
  10.411 --	if (spin_trylock(&_dev->xmit_lock)) {
  10.412 -+	if (netif_tx_trylock(_dev)) {
  10.413 - 		dp->st_rxq_check++;
  10.414 - 		if ((skb = skb_peek(&dp->rq)) == NULL) {
  10.415 - 			dp->tasklet_pending = 0;
  10.416 -@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev
  10.417 - 				netif_wake_queue(_dev);
  10.418 - 		} else {
  10.419 - 			dp->st_rxq_rsch++;
  10.420 --			spin_unlock(&_dev->xmit_lock);
  10.421 -+			netif_tx_unlock(_dev);
  10.422 - 			goto resched;
  10.423 - 		}
  10.424 --		spin_unlock(&_dev->xmit_lock);
  10.425 -+		netif_tx_unlock(_dev);
  10.426 - 	} else {
  10.427 - resched:
  10.428 - 		dp->tasklet_pending = 1;
  10.429 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c ./drivers/net/irda/vlsi_ir.c
  10.430 ---- ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c	2006-09-12 19:02:10.000000000 +0100
  10.431 -+++ ./drivers/net/irda/vlsi_ir.c	2006-09-19 13:59:20.000000000 +0100
  10.432 -@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct s
  10.433 - 			    ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
  10.434 - 			    	break;
  10.435 - 			udelay(100);
  10.436 --			/* must not sleep here - we are called under xmit_lock! */
  10.437 -+			/* must not sleep here - called under netif_tx_lock! */
  10.438 - 		}
  10.439 - 	}
  10.440 - 
  10.441 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c ./drivers/net/ixgb/ixgb_main.c
  10.442 ---- ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c	2006-09-12 19:02:10.000000000 +0100
  10.443 -+++ ./drivers/net/ixgb/ixgb_main.c	2006-09-19 13:59:20.000000000 +0100
  10.444 -@@ -1163,7 +1163,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
  10.445 - 	uint16_t ipcse, tucse, mss;
  10.446 - 	int err;
  10.447 - 
  10.448 --	if(likely(skb_shinfo(skb)->tso_size)) {
  10.449 -+	if(likely(skb_shinfo(skb)->gso_size)) {
  10.450 - 		if (skb_header_cloned(skb)) {
  10.451 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  10.452 - 			if (err)
  10.453 -@@ -1171,7 +1171,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
  10.454 - 		}
  10.455 - 
  10.456 - 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  10.457 --		mss = skb_shinfo(skb)->tso_size;
  10.458 -+		mss = skb_shinfo(skb)->gso_size;
  10.459 - 		skb->nh.iph->tot_len = 0;
  10.460 - 		skb->nh.iph->check = 0;
  10.461 - 		skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
  10.462 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/loopback.c ./drivers/net/loopback.c
  10.463 ---- ../orig-linux-2.6.16.29/drivers/net/loopback.c	2006-09-12 19:02:10.000000000 +0100
  10.464 -+++ ./drivers/net/loopback.c	2006-09-19 13:59:20.000000000 +0100
  10.465 -@@ -74,7 +74,7 @@ static void emulate_large_send_offload(s
  10.466 - 	struct iphdr *iph = skb->nh.iph;
  10.467 - 	struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
  10.468 - 	unsigned int doffset = (iph->ihl + th->doff) * 4;
  10.469 --	unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
  10.470 -+	unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
  10.471 - 	unsigned int offset = 0;
  10.472 - 	u32 seq = ntohl(th->seq);
  10.473 - 	u16 id  = ntohs(iph->id);
  10.474 -@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff 
  10.475 - #endif
  10.476 - 
  10.477 - #ifdef LOOPBACK_TSO
  10.478 --	if (skb_shinfo(skb)->tso_size) {
  10.479 -+	if (skb_shinfo(skb)->gso_size) {
  10.480 - 		BUG_ON(skb->protocol != htons(ETH_P_IP));
  10.481 - 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
  10.482 - 
  10.483 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c ./drivers/net/mv643xx_eth.c
  10.484 ---- ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c	2006-09-12 19:02:10.000000000 +0100
  10.485 -+++ ./drivers/net/mv643xx_eth.c	2006-09-19 13:59:20.000000000 +0100
  10.486 -@@ -1107,7 +1107,7 @@ static int mv643xx_eth_start_xmit(struct
  10.487 - 
  10.488 - #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
  10.489 - 	if (has_tiny_unaligned_frags(skb)) {
  10.490 --		if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
  10.491 -+		if (__skb_linearize(skb)) {
  10.492 - 			stats->tx_dropped++;
  10.493 - 			printk(KERN_DEBUG "%s: failed to linearize tiny "
  10.494 - 					"unaligned fragment\n", dev->name);
  10.495 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/natsemi.c ./drivers/net/natsemi.c
  10.496 ---- ../orig-linux-2.6.16.29/drivers/net/natsemi.c	2006-09-12 19:02:10.000000000 +0100
  10.497 -+++ ./drivers/net/natsemi.c	2006-09-19 13:59:20.000000000 +0100
  10.498 -@@ -323,12 +323,12 @@ performance critical codepaths:
  10.499 - The rx process only runs in the interrupt handler. Access from outside
  10.500 - the interrupt handler is only permitted after disable_irq().
  10.501 - 
  10.502 --The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
  10.503 -+The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
  10.504 - is set, then access is permitted under spin_lock_irq(&np->lock).
  10.505 - 
  10.506 - Thus configuration functions that want to access everything must call
  10.507 - 	disable_irq(dev->irq);
  10.508 --	spin_lock_bh(dev->xmit_lock);
  10.509 -+	netif_tx_lock_bh(dev);
  10.510 - 	spin_lock_irq(&np->lock);
  10.511 - 
  10.512 - IV. Notes
  10.513 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/r8169.c ./drivers/net/r8169.c
  10.514 ---- ../orig-linux-2.6.16.29/drivers/net/r8169.c	2006-09-12 19:02:10.000000000 +0100
  10.515 -+++ ./drivers/net/r8169.c	2006-09-19 13:59:20.000000000 +0100
  10.516 -@@ -2171,7 +2171,7 @@ static int rtl8169_xmit_frags(struct rtl
  10.517 - static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
  10.518 - {
  10.519 - 	if (dev->features & NETIF_F_TSO) {
  10.520 --		u32 mss = skb_shinfo(skb)->tso_size;
  10.521 -+		u32 mss = skb_shinfo(skb)->gso_size;
  10.522 - 
  10.523 - 		if (mss)
  10.524 - 			return LargeSend | ((mss & MSSMask) << MSSShift);
  10.525 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/s2io.c ./drivers/net/s2io.c
  10.526 ---- ../orig-linux-2.6.16.29/drivers/net/s2io.c	2006-09-12 19:02:10.000000000 +0100
  10.527 -+++ ./drivers/net/s2io.c	2006-09-19 13:59:20.000000000 +0100
  10.528 -@@ -3522,8 +3522,8 @@ static int s2io_xmit(struct sk_buff *skb
  10.529 - 	txdp->Control_1 = 0;
  10.530 - 	txdp->Control_2 = 0;
  10.531 - #ifdef NETIF_F_TSO
  10.532 --	mss = skb_shinfo(skb)->tso_size;
  10.533 --	if (mss) {
  10.534 -+	mss = skb_shinfo(skb)->gso_size;
  10.535 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
  10.536 - 		txdp->Control_1 |= TXD_TCP_LSO_EN;
  10.537 - 		txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
  10.538 - 	}
  10.539 -@@ -3543,10 +3543,10 @@ static int s2io_xmit(struct sk_buff *skb
  10.540 - 	}
  10.541 - 
  10.542 - 	frg_len = skb->len - skb->data_len;
  10.543 --	if (skb_shinfo(skb)->ufo_size) {
  10.544 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
  10.545 - 		int ufo_size;
  10.546 - 
  10.547 --		ufo_size = skb_shinfo(skb)->ufo_size;
  10.548 -+		ufo_size = skb_shinfo(skb)->gso_size;
  10.549 - 		ufo_size &= ~7;
  10.550 - 		txdp->Control_1 |= TXD_UFO_EN;
  10.551 - 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
  10.552 -@@ -3572,7 +3572,7 @@ static int s2io_xmit(struct sk_buff *skb
  10.553 - 	txdp->Host_Control = (unsigned long) skb;
  10.554 - 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
  10.555 - 
  10.556 --	if (skb_shinfo(skb)->ufo_size)
  10.557 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  10.558 - 		txdp->Control_1 |= TXD_UFO_EN;
  10.559 - 
  10.560 - 	frg_cnt = skb_shinfo(skb)->nr_frags;
  10.561 -@@ -3587,12 +3587,12 @@ static int s2io_xmit(struct sk_buff *skb
  10.562 - 		    (sp->pdev, frag->page, frag->page_offset,
  10.563 - 		     frag->size, PCI_DMA_TODEVICE);
  10.564 - 		txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
  10.565 --		if (skb_shinfo(skb)->ufo_size)
  10.566 -+		if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  10.567 - 			txdp->Control_1 |= TXD_UFO_EN;
  10.568 - 	}
  10.569 - 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
  10.570 - 
  10.571 --	if (skb_shinfo(skb)->ufo_size)
  10.572 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  10.573 - 		frg_cnt++; /* as Txd0 was used for inband header */
  10.574 - 
  10.575 - 	tx_fifo = mac_control->tx_FIFO_start[queue];
  10.576 -@@ -3606,7 +3606,7 @@ static int s2io_xmit(struct sk_buff *skb
  10.577 - 	if (mss)
  10.578 - 		val64 |= TX_FIFO_SPECIAL_FUNC;
  10.579 - #endif
  10.580 --	if (skb_shinfo(skb)->ufo_size)
  10.581 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  10.582 - 		val64 |= TX_FIFO_SPECIAL_FUNC;
  10.583 - 	writeq(val64, &tx_fifo->List_Control);
  10.584 - 
  10.585 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/sky2.c ./drivers/net/sky2.c
  10.586 ---- ../orig-linux-2.6.16.29/drivers/net/sky2.c	2006-09-12 19:02:10.000000000 +0100
  10.587 -+++ ./drivers/net/sky2.c	2006-09-19 13:59:20.000000000 +0100
  10.588 -@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
  10.589 - 	count = sizeof(dma_addr_t) / sizeof(u32);
  10.590 - 	count += skb_shinfo(skb)->nr_frags * count;
  10.591 - 
  10.592 --	if (skb_shinfo(skb)->tso_size)
  10.593 -+	if (skb_shinfo(skb)->gso_size)
  10.594 - 		++count;
  10.595 - 
  10.596 - 	if (skb->ip_summed == CHECKSUM_HW)
  10.597 -@@ -1197,7 +1197,7 @@ static int sky2_xmit_frame(struct sk_buf
  10.598 - 	}
  10.599 - 
  10.600 - 	/* Check for TCP Segmentation Offload */
  10.601 --	mss = skb_shinfo(skb)->tso_size;
  10.602 -+	mss = skb_shinfo(skb)->gso_size;
  10.603 - 	if (mss != 0) {
  10.604 - 		/* just drop the packet if non-linear expansion fails */
  10.605 - 		if (skb_header_cloned(skb) &&
  10.606 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/tg3.c ./drivers/net/tg3.c
  10.607 ---- ../orig-linux-2.6.16.29/drivers/net/tg3.c	2006-09-12 19:02:10.000000000 +0100
  10.608 -+++ ./drivers/net/tg3.c	2006-09-19 13:59:20.000000000 +0100
  10.609 -@@ -3664,7 +3664,7 @@ static int tg3_start_xmit(struct sk_buff
  10.610 - #if TG3_TSO_SUPPORT != 0
  10.611 - 	mss = 0;
  10.612 - 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
  10.613 --	    (mss = skb_shinfo(skb)->tso_size) != 0) {
  10.614 -+	    (mss = skb_shinfo(skb)->gso_size) != 0) {
  10.615 - 		int tcp_opt_len, ip_tcp_len;
  10.616 - 
  10.617 - 		if (skb_header_cloned(skb) &&
  10.618 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c ./drivers/net/tulip/winbond-840.c
  10.619 ---- ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c	2006-09-12 19:02:10.000000000 +0100
  10.620 -+++ ./drivers/net/tulip/winbond-840.c	2006-09-19 13:59:20.000000000 +0100
  10.621 -@@ -1605,11 +1605,11 @@ static void __devexit w840_remove1 (stru
  10.622 -  * - get_stats:
  10.623 -  * 	spin_lock_irq(np->lock), doesn't touch hw if not present
  10.624 -  * - hard_start_xmit:
  10.625 -- * 	netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
  10.626 -+ * 	synchronize_irq + netif_tx_disable;
  10.627 -  * - tx_timeout:
  10.628 -- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
  10.629 -+ * 	netif_device_detach + netif_tx_disable;
  10.630 -  * - set_multicast_list
  10.631 -- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
  10.632 -+ * 	netif_device_detach + netif_tx_disable;
  10.633 -  * - interrupt handler
  10.634 -  * 	doesn't touch hw if not present, synchronize_irq waits for
  10.635 -  * 	running instances of the interrupt handler.
  10.636 -@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev 
  10.637 - 		netif_device_detach(dev);
  10.638 - 		update_csr6(dev, 0);
  10.639 - 		iowrite32(0, ioaddr + IntrEnable);
  10.640 --		netif_stop_queue(dev);
  10.641 - 		spin_unlock_irq(&np->lock);
  10.642 - 
  10.643 --		spin_unlock_wait(&dev->xmit_lock);
  10.644 - 		synchronize_irq(dev->irq);
  10.645 -+		netif_tx_disable(dev);
  10.646 - 	
  10.647 - 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
  10.648 - 
  10.649 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/typhoon.c ./drivers/net/typhoon.c
  10.650 ---- ../orig-linux-2.6.16.29/drivers/net/typhoon.c	2006-09-12 19:02:10.000000000 +0100
  10.651 -+++ ./drivers/net/typhoon.c	2006-09-19 13:59:20.000000000 +0100
  10.652 -@@ -340,7 +340,7 @@ enum state_values {
  10.653 - #endif
  10.654 - 
  10.655 - #if defined(NETIF_F_TSO)
  10.656 --#define skb_tso_size(x)		(skb_shinfo(x)->tso_size)
  10.657 -+#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
  10.658 - #define TSO_NUM_DESCRIPTORS	2
  10.659 - #define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
  10.660 - #else
  10.661 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/via-velocity.c ./drivers/net/via-velocity.c
  10.662 ---- ../orig-linux-2.6.16.29/drivers/net/via-velocity.c	2006-09-12 19:02:10.000000000 +0100
  10.663 -+++ ./drivers/net/via-velocity.c	2006-09-19 13:59:20.000000000 +0100
  10.664 -@@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff 
  10.665 - 
  10.666 - 	int pktlen = skb->len;
  10.667 - 
  10.668 -+#ifdef VELOCITY_ZERO_COPY_SUPPORT
  10.669 -+	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
  10.670 -+		kfree_skb(skb);
  10.671 -+		return 0;
  10.672 -+	}
  10.673 -+#endif
  10.674 -+
  10.675 - 	spin_lock_irqsave(&vptr->lock, flags);
  10.676 - 
  10.677 - 	index = vptr->td_curr[qnum];
  10.678 -@@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff 
  10.679 - 	 */
  10.680 - 	if (pktlen < ETH_ZLEN) {
  10.681 - 		/* Cannot occur until ZC support */
  10.682 --		if(skb_linearize(skb, GFP_ATOMIC))
  10.683 --			return 0; 
  10.684 - 		pktlen = ETH_ZLEN;
  10.685 - 		memcpy(tdinfo->buf, skb->data, skb->len);
  10.686 - 		memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
  10.687 -@@ -1933,7 +1938,6 @@ static int velocity_xmit(struct sk_buff 
  10.688 - 		int nfrags = skb_shinfo(skb)->nr_frags;
  10.689 - 		tdinfo->skb = skb;
  10.690 - 		if (nfrags > 6) {
  10.691 --			skb_linearize(skb, GFP_ATOMIC);
  10.692 - 			memcpy(tdinfo->buf, skb->data, skb->len);
  10.693 - 			tdinfo->skb_dma[0] = tdinfo->buf_dma;
  10.694 - 			td_ptr->tdesc0.pktsize = 
  10.695 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c ./drivers/net/wireless/orinoco.c
  10.696 ---- ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c	2006-09-12 19:02:10.000000000 +0100
  10.697 -+++ ./drivers/net/wireless/orinoco.c	2006-09-19 13:59:20.000000000 +0100
  10.698 -@@ -1835,7 +1835,9 @@ static int __orinoco_program_rids(struct
  10.699 - 	/* Set promiscuity / multicast*/
  10.700 - 	priv->promiscuous = 0;
  10.701 - 	priv->mc_count = 0;
  10.702 --	__orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
  10.703 -+
  10.704 -+	/* FIXME: what about netif_tx_lock */
  10.705 -+	__orinoco_set_multicast_list(dev);
  10.706 - 
  10.707 - 	return 0;
  10.708 - }
  10.709 -diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c ./drivers/s390/net/qeth_eddp.c
  10.710 ---- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c	2006-09-12 19:02:10.000000000 +0100
  10.711 -+++ ./drivers/s390/net/qeth_eddp.c	2006-09-19 13:59:20.000000000 +0100
  10.712 -@@ -421,7 +421,7 @@ __qeth_eddp_fill_context_tcp(struct qeth
  10.713 -        }
  10.714 - 	tcph = eddp->skb->h.th;
  10.715 - 	while (eddp->skb_offset < eddp->skb->len) {
  10.716 --		data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
  10.717 -+		data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
  10.718 - 			       (int)(eddp->skb->len - eddp->skb_offset));
  10.719 - 		/* prepare qdio hdr */
  10.720 - 		if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
  10.721 -@@ -516,20 +516,20 @@ qeth_eddp_calc_num_pages(struct qeth_edd
  10.722 - 	
  10.723 - 	QETH_DBF_TEXT(trace, 5, "eddpcanp");
  10.724 - 	/* can we put multiple skbs in one page? */
  10.725 --	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
  10.726 -+	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
  10.727 - 	if (skbs_per_page > 1){
  10.728 --		ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
  10.729 -+		ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
  10.730 - 				 skbs_per_page + 1;
  10.731 - 		ctx->elements_per_skb = 1;
  10.732 - 	} else {
  10.733 - 		/* no -> how many elements per skb? */
  10.734 --		ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
  10.735 -+		ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
  10.736 - 				     PAGE_SIZE) >> PAGE_SHIFT;
  10.737 - 		ctx->num_pages = ctx->elements_per_skb *
  10.738 --				 (skb_shinfo(skb)->tso_segs + 1);
  10.739 -+				 (skb_shinfo(skb)->gso_segs + 1);
  10.740 - 	}
  10.741 - 	ctx->num_elements = ctx->elements_per_skb *
  10.742 --			    (skb_shinfo(skb)->tso_segs + 1);
  10.743 -+			    (skb_shinfo(skb)->gso_segs + 1);
  10.744 - }
  10.745 - 
  10.746 - static inline struct qeth_eddp_context *
  10.747 -diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c ./drivers/s390/net/qeth_main.c
  10.748 ---- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c	2006-09-12 19:02:10.000000000 +0100
  10.749 -+++ ./drivers/s390/net/qeth_main.c	2006-09-19 13:59:20.000000000 +0100
  10.750 -@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
  10.751 - 	queue = card->qdio.out_qs
  10.752 - 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
  10.753 - 
  10.754 --	if (skb_shinfo(skb)->tso_size)
  10.755 -+	if (skb_shinfo(skb)->gso_size)
  10.756 - 		large_send = card->options.large_send;
  10.757 - 
  10.758 - 	/*are we able to do TSO ? If so ,prepare and send it from here */
  10.759 -@@ -4501,7 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
  10.760 - 		card->stats.tx_packets++;
  10.761 - 		card->stats.tx_bytes += skb->len;
  10.762 - #ifdef CONFIG_QETH_PERF_STATS
  10.763 --		if (skb_shinfo(skb)->tso_size &&
  10.764 -+		if (skb_shinfo(skb)->gso_size &&
  10.765 - 		   !(large_send == QETH_LARGE_SEND_NO)) {
  10.766 - 			card->perf_stats.large_send_bytes += skb->len;
  10.767 - 			card->perf_stats.large_send_cnt++;
  10.768 -diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h ./drivers/s390/net/qeth_tso.h
  10.769 ---- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h	2006-09-12 19:02:10.000000000 +0100
  10.770 -+++ ./drivers/s390/net/qeth_tso.h	2006-09-19 13:59:20.000000000 +0100
  10.771 -@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *c
  10.772 - 	hdr->ext.hdr_version = 1;
  10.773 - 	hdr->ext.hdr_len     = 28;
  10.774 - 	/*insert non-fix values */
  10.775 --	hdr->ext.mss = skb_shinfo(skb)->tso_size;
  10.776 -+	hdr->ext.mss = skb_shinfo(skb)->gso_size;
  10.777 - 	hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
  10.778 - 	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
  10.779 - 				       sizeof(struct qeth_hdr_tso));
  10.780 -diff -pruN ../orig-linux-2.6.16.29/include/linux/ethtool.h ./include/linux/ethtool.h
  10.781 ---- ../orig-linux-2.6.16.29/include/linux/ethtool.h	2006-09-12 19:02:10.000000000 +0100
  10.782 -+++ ./include/linux/ethtool.h	2006-09-19 13:59:20.000000000 +0100
  10.783 -@@ -408,6 +408,8 @@ struct ethtool_ops {
  10.784 - #define ETHTOOL_GPERMADDR	0x00000020 /* Get permanent hardware address */
  10.785 - #define ETHTOOL_GUFO		0x00000021 /* Get UFO enable (ethtool_value) */
  10.786 - #define ETHTOOL_SUFO		0x00000022 /* Set UFO enable (ethtool_value) */
  10.787 -+#define ETHTOOL_GGSO		0x00000023 /* Get GSO enable (ethtool_value) */
  10.788 -+#define ETHTOOL_SGSO		0x00000024 /* Set GSO enable (ethtool_value) */
  10.789 - 
  10.790 - /* compatibility with older code */
  10.791 - #define SPARC_ETH_GSET		ETHTOOL_GSET
  10.792 -diff -pruN ../orig-linux-2.6.16.29/include/linux/netdevice.h ./include/linux/netdevice.h
  10.793 ---- ../orig-linux-2.6.16.29/include/linux/netdevice.h	2006-09-12 19:02:10.000000000 +0100
  10.794 -+++ ./include/linux/netdevice.h	2006-09-19 13:59:20.000000000 +0100
  10.795 -@@ -230,7 +230,8 @@ enum netdev_state_t
  10.796 - 	__LINK_STATE_SCHED,
  10.797 - 	__LINK_STATE_NOCARRIER,
  10.798 - 	__LINK_STATE_RX_SCHED,
  10.799 --	__LINK_STATE_LINKWATCH_PENDING
  10.800 -+	__LINK_STATE_LINKWATCH_PENDING,
  10.801 -+	__LINK_STATE_QDISC_RUNNING,
  10.802 - };
  10.803 - 
  10.804 - 
  10.805 -@@ -306,9 +307,17 @@ struct net_device
  10.806 - #define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */
  10.807 - #define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */
  10.808 - #define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */
  10.809 --#define NETIF_F_TSO		2048	/* Can offload TCP/IP segmentation */
  10.810 -+#define NETIF_F_GSO		2048	/* Enable software GSO. */
  10.811 - #define NETIF_F_LLTX		4096	/* LockLess TX */
  10.812 --#define NETIF_F_UFO             8192    /* Can offload UDP Large Send*/
  10.813 -+
  10.814 -+	/* Segmentation offload features */
  10.815 -+#define NETIF_F_GSO_SHIFT	16
  10.816 -+#define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  10.817 -+#define NETIF_F_UFO		(SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
  10.818 -+#define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  10.819 -+
  10.820 -+#define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  10.821 -+#define NETIF_F_ALL_CSUM	(NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
  10.822 - 
  10.823 - 	struct net_device	*next_sched;
  10.824 - 
  10.825 -@@ -394,6 +403,9 @@ struct net_device
  10.826 - 	struct list_head	qdisc_list;
  10.827 - 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
  10.828 - 
  10.829 -+	/* Partially transmitted GSO packet. */
  10.830 -+	struct sk_buff		*gso_skb;
  10.831 -+
  10.832 - 	/* ingress path synchronizer */
  10.833 - 	spinlock_t		ingress_lock;
  10.834 - 	struct Qdisc		*qdisc_ingress;
  10.835 -@@ -402,7 +414,7 @@ struct net_device
  10.836 -  * One part is mostly used on xmit path (device)
  10.837 -  */
  10.838 - 	/* hard_start_xmit synchronizer */
  10.839 --	spinlock_t		xmit_lock ____cacheline_aligned_in_smp;
  10.840 -+	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
  10.841 - 	/* cpu id of processor entered to hard_start_xmit or -1,
  10.842 - 	   if nobody entered there.
  10.843 - 	 */
  10.844 -@@ -527,6 +539,8 @@ struct packet_type {
  10.845 - 					 struct net_device *,
  10.846 - 					 struct packet_type *,
  10.847 - 					 struct net_device *);
  10.848 -+	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
  10.849 -+						int features);
  10.850 - 	void			*af_packet_priv;
  10.851 - 	struct list_head	list;
  10.852 - };
  10.853 -@@ -693,7 +707,8 @@ extern int		dev_change_name(struct net_d
  10.854 - extern int		dev_set_mtu(struct net_device *, int);
  10.855 - extern int		dev_set_mac_address(struct net_device *,
  10.856 - 					    struct sockaddr *);
  10.857 --extern void		dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
  10.858 -+extern int		dev_hard_start_xmit(struct sk_buff *skb,
  10.859 -+					    struct net_device *dev);
  10.860 - 
  10.861 - extern void		dev_init(void);
  10.862 - 
  10.863 -@@ -900,11 +915,43 @@ static inline void __netif_rx_complete(s
  10.864 - 	clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
  10.865 - }
  10.866 - 
  10.867 -+static inline void netif_tx_lock(struct net_device *dev)
  10.868 -+{
  10.869 -+	spin_lock(&dev->_xmit_lock);
  10.870 -+	dev->xmit_lock_owner = smp_processor_id();
  10.871 -+}
  10.872 -+
  10.873 -+static inline void netif_tx_lock_bh(struct net_device *dev)
  10.874 -+{
  10.875 -+	spin_lock_bh(&dev->_xmit_lock);
  10.876 -+	dev->xmit_lock_owner = smp_processor_id();
  10.877 -+}
  10.878 -+
  10.879 -+static inline int netif_tx_trylock(struct net_device *dev)
  10.880 -+{
  10.881 -+	int err = spin_trylock(&dev->_xmit_lock);
  10.882 -+	if (!err)
  10.883 -+		dev->xmit_lock_owner = smp_processor_id();
  10.884 -+	return err;
  10.885 -+}
  10.886 -+
  10.887 -+static inline void netif_tx_unlock(struct net_device *dev)
  10.888 -+{
  10.889 -+	dev->xmit_lock_owner = -1;
  10.890 -+	spin_unlock(&dev->_xmit_lock);
  10.891 -+}
  10.892 -+
  10.893 -+static inline void netif_tx_unlock_bh(struct net_device *dev)
  10.894 -+{
  10.895 -+	dev->xmit_lock_owner = -1;
  10.896 -+	spin_unlock_bh(&dev->_xmit_lock);
  10.897 -+}
  10.898 -+
  10.899 - static inline void netif_tx_disable(struct net_device *dev)
  10.900 - {
  10.901 --	spin_lock_bh(&dev->xmit_lock);
  10.902 -+	netif_tx_lock_bh(dev);
  10.903 - 	netif_stop_queue(dev);
  10.904 --	spin_unlock_bh(&dev->xmit_lock);
  10.905 -+	netif_tx_unlock_bh(dev);
  10.906 - }
  10.907 - 
  10.908 - /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  10.909 -@@ -932,6 +979,7 @@ extern int		netdev_max_backlog;
  10.910 - extern int		weight_p;
  10.911 - extern int		netdev_set_master(struct net_device *dev, struct net_device *master);
  10.912 - extern int skb_checksum_help(struct sk_buff *skb, int inward);
  10.913 -+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  10.914 - #ifdef CONFIG_BUG
  10.915 - extern void netdev_rx_csum_fault(struct net_device *dev);
  10.916 - #else
  10.917 -@@ -951,6 +999,18 @@ extern void dev_seq_stop(struct seq_file
  10.918 - 
  10.919 - extern void linkwatch_run_queue(void);
  10.920 - 
  10.921 -+static inline int skb_gso_ok(struct sk_buff *skb, int features)
  10.922 -+{
  10.923 -+	int feature = skb_shinfo(skb)->gso_size ?
  10.924 -+		      skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
  10.925 -+	return (features & feature) == feature;
  10.926 -+}
  10.927 -+
  10.928 -+static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  10.929 -+{
  10.930 -+	return !skb_gso_ok(skb, dev->features);
  10.931 -+}
  10.932 -+
  10.933 - #endif /* __KERNEL__ */
  10.934 - 
  10.935 - #endif	/* _LINUX_DEV_H */
  10.936 -diff -pruN ../orig-linux-2.6.16.29/include/linux/skbuff.h ./include/linux/skbuff.h
  10.937 ---- ../orig-linux-2.6.16.29/include/linux/skbuff.h	2006-09-12 19:02:10.000000000 +0100
  10.938 -+++ ./include/linux/skbuff.h	2006-09-19 13:59:20.000000000 +0100
  10.939 -@@ -134,9 +134,10 @@ struct skb_frag_struct {
  10.940 - struct skb_shared_info {
  10.941 - 	atomic_t	dataref;
  10.942 - 	unsigned short	nr_frags;
  10.943 --	unsigned short	tso_size;
  10.944 --	unsigned short	tso_segs;
  10.945 --	unsigned short  ufo_size;
  10.946 -+	unsigned short	gso_size;
  10.947 -+	/* Warning: this field is not always filled in (UFO)! */
  10.948 -+	unsigned short	gso_segs;
  10.949 -+	unsigned short  gso_type;
  10.950 - 	unsigned int    ip6_frag_id;
  10.951 - 	struct sk_buff	*frag_list;
  10.952 - 	skb_frag_t	frags[MAX_SKB_FRAGS];
  10.953 -@@ -168,6 +169,14 @@ enum {
  10.954 - 	SKB_FCLONE_CLONE,
  10.955 - };
  10.956 - 
  10.957 -+enum {
  10.958 -+	SKB_GSO_TCPV4 = 1 << 0,
  10.959 -+	SKB_GSO_UDPV4 = 1 << 1,
  10.960 -+
  10.961 -+	/* This indicates the skb is from an untrusted source. */
  10.962 -+	SKB_GSO_DODGY = 1 << 2,
  10.963 -+};
  10.964 -+
  10.965 - /** 
  10.966 -  *	struct sk_buff - socket buffer
  10.967 -  *	@next: Next buffer in list
  10.968 -@@ -1148,18 +1157,34 @@ static inline int skb_can_coalesce(struc
  10.969 - 	return 0;
  10.970 - }
  10.971 - 
  10.972 -+static inline int __skb_linearize(struct sk_buff *skb)
  10.973 -+{
  10.974 -+	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
  10.975 -+}
  10.976 -+
  10.977 - /**
  10.978 -  *	skb_linearize - convert paged skb to linear one
  10.979 -  *	@skb: buffer to linarize
  10.980 -- *	@gfp: allocation mode
  10.981 -  *
  10.982 -  *	If there is no free memory -ENOMEM is returned, otherwise zero
  10.983 -  *	is returned and the old skb data released.
  10.984 -  */
  10.985 --extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
  10.986 --static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
  10.987 -+static inline int skb_linearize(struct sk_buff *skb)
  10.988 -+{
  10.989 -+	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
  10.990 -+}
  10.991 -+
  10.992 -+/**
  10.993 -+ *	skb_linearize_cow - make sure skb is linear and writable
  10.994 -+ *	@skb: buffer to process
  10.995 -+ *
  10.996 -+ *	If there is no free memory -ENOMEM is returned, otherwise zero
  10.997 -+ *	is returned and the old skb data released.
  10.998 -+ */
  10.999 -+static inline int skb_linearize_cow(struct sk_buff *skb)
 10.1000 - {
 10.1001 --	return __skb_linearize(skb, gfp);
 10.1002 -+	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
 10.1003 -+	       __skb_linearize(skb) : 0;
 10.1004 - }
 10.1005 - 
 10.1006 - /**
 10.1007 -@@ -1254,6 +1279,7 @@ extern void	       skb_split(struct sk_b
 10.1008 - 				 struct sk_buff *skb1, const u32 len);
 10.1009 - 
 10.1010 - extern void	       skb_release_data(struct sk_buff *skb);
 10.1011 -+extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
 10.1012 - 
 10.1013 - static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
 10.1014 - 				       int len, void *buffer)
 10.1015 -diff -pruN ../orig-linux-2.6.16.29/include/net/pkt_sched.h ./include/net/pkt_sched.h
 10.1016 ---- ../orig-linux-2.6.16.29/include/net/pkt_sched.h	2006-09-12 19:02:10.000000000 +0100
 10.1017 -+++ ./include/net/pkt_sched.h	2006-09-19 13:59:20.000000000 +0100
 10.1018 -@@ -218,12 +218,13 @@ extern struct qdisc_rate_table *qdisc_ge
 10.1019 - 		struct rtattr *tab);
 10.1020 - extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
 10.1021 - 
 10.1022 --extern int qdisc_restart(struct net_device *dev);
 10.1023 -+extern void __qdisc_run(struct net_device *dev);
 10.1024 - 
 10.1025 - static inline void qdisc_run(struct net_device *dev)
 10.1026 - {
 10.1027 --	while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
 10.1028 --		/* NOTHING */;
 10.1029 -+	if (!netif_queue_stopped(dev) &&
 10.1030 -+	    !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
 10.1031 -+		__qdisc_run(dev);
 10.1032 - }
 10.1033 - 
 10.1034 - extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
 10.1035 -diff -pruN ../orig-linux-2.6.16.29/include/net/protocol.h ./include/net/protocol.h
 10.1036 ---- ../orig-linux-2.6.16.29/include/net/protocol.h	2006-09-12 19:02:10.000000000 +0100
 10.1037 -+++ ./include/net/protocol.h	2006-09-19 13:59:20.000000000 +0100
 10.1038 -@@ -37,6 +37,8 @@
 10.1039 - struct net_protocol {
 10.1040 - 	int			(*handler)(struct sk_buff *skb);
 10.1041 - 	void			(*err_handler)(struct sk_buff *skb, u32 info);
 10.1042 -+	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
 10.1043 -+					       int features);
 10.1044 - 	int			no_policy;
 10.1045 - };
 10.1046 - 
 10.1047 -diff -pruN ../orig-linux-2.6.16.29/include/net/sock.h ./include/net/sock.h
 10.1048 ---- ../orig-linux-2.6.16.29/include/net/sock.h	2006-09-12 19:02:10.000000000 +0100
 10.1049 -+++ ./include/net/sock.h	2006-09-19 13:59:20.000000000 +0100
 10.1050 -@@ -1064,9 +1064,13 @@ static inline void sk_setup_caps(struct 
 10.1051 - {
 10.1052 - 	__sk_dst_set(sk, dst);
 10.1053 - 	sk->sk_route_caps = dst->dev->features;
 10.1054 -+	if (sk->sk_route_caps & NETIF_F_GSO)
 10.1055 -+		sk->sk_route_caps |= NETIF_F_TSO;
 10.1056 - 	if (sk->sk_route_caps & NETIF_F_TSO) {
 10.1057 - 		if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
 10.1058 - 			sk->sk_route_caps &= ~NETIF_F_TSO;
 10.1059 -+		else 
 10.1060 -+			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
 10.1061 - 	}
 10.1062 - }
 10.1063 - 
 10.1064 -diff -pruN ../orig-linux-2.6.16.29/include/net/tcp.h ./include/net/tcp.h
 10.1065 ---- ../orig-linux-2.6.16.29/include/net/tcp.h	2006-09-12 19:02:10.000000000 +0100
 10.1066 -+++ ./include/net/tcp.h	2006-09-19 13:59:20.000000000 +0100
 10.1067 -@@ -552,13 +552,13 @@ struct tcp_skb_cb {
 10.1068 -  */
 10.1069 - static inline int tcp_skb_pcount(const struct sk_buff *skb)
 10.1070 - {
 10.1071 --	return skb_shinfo(skb)->tso_segs;
 10.1072 -+	return skb_shinfo(skb)->gso_segs;
 10.1073 - }
 10.1074 - 
 10.1075 - /* This is valid iff tcp_skb_pcount() > 1. */
 10.1076 - static inline int tcp_skb_mss(const struct sk_buff *skb)
 10.1077 - {
 10.1078 --	return skb_shinfo(skb)->tso_size;
 10.1079 -+	return skb_shinfo(skb)->gso_size;
 10.1080 - }
 10.1081 - 
 10.1082 - static inline void tcp_dec_pcount_approx(__u32 *count,
 10.1083 -@@ -1063,6 +1063,8 @@ extern struct request_sock_ops tcp_reque
 10.1084 - 
 10.1085 - extern int tcp_v4_destroy_sock(struct sock *sk);
 10.1086 - 
 10.1087 -+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
 10.1088 -+
 10.1089 - #ifdef CONFIG_PROC_FS
 10.1090 - extern int  tcp4_proc_init(void);
 10.1091 - extern void tcp4_proc_exit(void);
 10.1092 -diff -pruN ../orig-linux-2.6.16.29/net/atm/clip.c ./net/atm/clip.c
 10.1093 ---- ../orig-linux-2.6.16.29/net/atm/clip.c	2006-09-12 19:02:10.000000000 +0100
 10.1094 -+++ ./net/atm/clip.c	2006-09-19 13:59:20.000000000 +0100
 10.1095 -@@ -101,7 +101,7 @@ static void unlink_clip_vcc(struct clip_
 10.1096 - 		printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc);
 10.1097 - 		return;
 10.1098 - 	}
 10.1099 --	spin_lock_bh(&entry->neigh->dev->xmit_lock);	/* block clip_start_xmit() */
 10.1100 -+	netif_tx_lock_bh(entry->neigh->dev);	/* block clip_start_xmit() */
 10.1101 - 	entry->neigh->used = jiffies;
 10.1102 - 	for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
 10.1103 - 		if (*walk == clip_vcc) {
 10.1104 -@@ -125,7 +125,7 @@ static void unlink_clip_vcc(struct clip_
 10.1105 - 	printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
 10.1106 - 	  "0x%p)\n",entry,clip_vcc);
 10.1107 - out:
 10.1108 --	spin_unlock_bh(&entry->neigh->dev->xmit_lock);
 10.1109 -+	netif_tx_unlock_bh(entry->neigh->dev);
 10.1110 - }
 10.1111 - 
 10.1112 - /* The neighbour entry n->lock is held. */
 10.1113 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_device.c ./net/bridge/br_device.c
 10.1114 ---- ../orig-linux-2.6.16.29/net/bridge/br_device.c	2006-09-12 19:02:10.000000000 +0100
 10.1115 -+++ ./net/bridge/br_device.c	2006-09-19 13:59:20.000000000 +0100
 10.1116 -@@ -146,9 +146,9 @@ static int br_set_tx_csum(struct net_dev
 10.1117 - 	struct net_bridge *br = netdev_priv(dev);
 10.1118 - 
 10.1119 - 	if (data)
 10.1120 --		br->feature_mask |= NETIF_F_IP_CSUM;
 10.1121 -+		br->feature_mask |= NETIF_F_NO_CSUM;
 10.1122 - 	else
 10.1123 --		br->feature_mask &= ~NETIF_F_IP_CSUM;
 10.1124 -+		br->feature_mask &= ~NETIF_F_ALL_CSUM;
 10.1125 - 
 10.1126 - 	br_features_recompute(br);
 10.1127 - 	return 0;
 10.1128 -@@ -185,6 +185,6 @@ void br_dev_setup(struct net_device *dev
 10.1129 - 	dev->set_mac_address = br_set_mac_address;
 10.1130 - 	dev->priv_flags = IFF_EBRIDGE;
 10.1131 - 
 10.1132 -- 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
 10.1133 -- 		| NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM;
 10.1134 -+ 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
 10.1135 -+ 			NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
 10.1136 - }
 10.1137 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_forward.c ./net/bridge/br_forward.c
 10.1138 ---- ../orig-linux-2.6.16.29/net/bridge/br_forward.c	2006-09-12 19:02:10.000000000 +0100
 10.1139 -+++ ./net/bridge/br_forward.c	2006-09-19 13:59:20.000000000 +0100
 10.1140 -@@ -32,7 +32,7 @@ static inline int should_deliver(const s
 10.1141 - int br_dev_queue_push_xmit(struct sk_buff *skb)
 10.1142 - {
 10.1143 - 	/* drop mtu oversized packets except tso */
 10.1144 --	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
 10.1145 -+	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
 10.1146 - 		kfree_skb(skb);
 10.1147 - 	else {
 10.1148 - #ifdef CONFIG_BRIDGE_NETFILTER
 10.1149 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_if.c ./net/bridge/br_if.c
 10.1150 ---- ../orig-linux-2.6.16.29/net/bridge/br_if.c	2006-09-12 19:02:10.000000000 +0100
 10.1151 -+++ ./net/bridge/br_if.c	2006-09-19 13:59:20.000000000 +0100
 10.1152 -@@ -385,17 +385,28 @@ void br_features_recompute(struct net_br
 10.1153 - 	struct net_bridge_port *p;
 10.1154 - 	unsigned long features, checksum;
 10.1155 - 
 10.1156 --	features = br->feature_mask &~ NETIF_F_IP_CSUM;
 10.1157 --	checksum = br->feature_mask & NETIF_F_IP_CSUM;
 10.1158 -+	checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
 10.1159 -+	features = br->feature_mask & ~NETIF_F_ALL_CSUM;
 10.1160 - 
 10.1161 - 	list_for_each_entry(p, &br->port_list, list) {
 10.1162 --		if (!(p->dev->features 
 10.1163 --		      & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)))
 10.1164 -+		unsigned long feature = p->dev->features;
 10.1165 -+
 10.1166 -+		if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
 10.1167 -+			checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
 10.1168 -+		if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
 10.1169 -+			checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
 10.1170 -+		if (!(feature & NETIF_F_IP_CSUM))
 10.1171 - 			checksum = 0;
 10.1172 --		features &= p->dev->features;
 10.1173 -+
 10.1174 -+		if (feature & NETIF_F_GSO)
 10.1175 -+			feature |= NETIF_F_TSO;
 10.1176 -+		feature |= NETIF_F_GSO;
 10.1177 -+
 10.1178 -+		features &= feature;
 10.1179 - 	}
 10.1180 - 
 10.1181 --	br->dev->features = features | checksum | NETIF_F_LLTX;
 10.1182 -+	br->dev->features = features | checksum | NETIF_F_LLTX |
 10.1183 -+			    NETIF_F_GSO_ROBUST;
 10.1184 - }
 10.1185 - 
 10.1186 - /* called with RTNL */
 10.1187 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c ./net/bridge/br_netfilter.c
 10.1188 ---- ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c	2006-09-12 19:02:10.000000000 +0100
 10.1189 -+++ ./net/bridge/br_netfilter.c	2006-09-19 13:59:20.000000000 +0100
 10.1190 -@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
 10.1191 - {
 10.1192 - 	if (skb->protocol == htons(ETH_P_IP) &&
 10.1193 - 	    skb->len > skb->dev->mtu &&
 10.1194 --	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
 10.1195 -+	    !skb_shinfo(skb)->gso_size)
 10.1196 - 		return ip_fragment(skb, br_dev_queue_push_xmit);
 10.1197 - 	else
 10.1198 - 		return br_dev_queue_push_xmit(skb);
 10.1199 -diff -pruN ../orig-linux-2.6.16.29/net/core/dev.c ./net/core/dev.c
 10.1200 ---- ../orig-linux-2.6.16.29/net/core/dev.c	2006-09-12 19:02:10.000000000 +0100
 10.1201 -+++ ./net/core/dev.c	2006-09-19 13:59:20.000000000 +0100
 10.1202 -@@ -115,6 +115,7 @@
 10.1203 - #include <net/iw_handler.h>
 10.1204 - #endif	/* CONFIG_NET_RADIO */
 10.1205 - #include <asm/current.h>
 10.1206 -+#include <linux/err.h>
 10.1207 - 
 10.1208 - /*
 10.1209 -  *	The list of packet types we will receive (as opposed to discard)
 10.1210 -@@ -1032,7 +1033,7 @@ static inline void net_timestamp(struct 
 10.1211 -  *	taps currently in use.
 10.1212 -  */
 10.1213 - 
 10.1214 --void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 10.1215 -+static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 10.1216 - {
 10.1217 - 	struct packet_type *ptype;
 10.1218 - 
 10.1219 -@@ -1106,6 +1107,45 @@ out:	
 10.1220 - 	return ret;
 10.1221 - }
 10.1222 - 
 10.1223 -+/**
 10.1224 -+ *	skb_gso_segment - Perform segmentation on skb.
 10.1225 -+ *	@skb: buffer to segment
 10.1226 -+ *	@features: features for the output path (see dev->features)
 10.1227 -+ *
 10.1228 -+ *	This function segments the given skb and returns a list of segments.
 10.1229 -+ *
 10.1230 -+ *	It may return NULL if the skb requires no segmentation.  This is
 10.1231 -+ *	only possible when GSO is used for verifying header integrity.
 10.1232 -+ */
 10.1233 -+struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
 10.1234 -+{
 10.1235 -+	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
 10.1236 -+	struct packet_type *ptype;
 10.1237 -+	int type = skb->protocol;
 10.1238 -+
 10.1239 -+	BUG_ON(skb_shinfo(skb)->frag_list);
 10.1240 -+	BUG_ON(skb->ip_summed != CHECKSUM_HW);
 10.1241 -+
 10.1242 -+	skb->mac.raw = skb->data;
 10.1243 -+	skb->mac_len = skb->nh.raw - skb->data;
 10.1244 -+	__skb_pull(skb, skb->mac_len);
 10.1245 -+
 10.1246 -+	rcu_read_lock();
 10.1247 -+	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
 10.1248 -+		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
 10.1249 -+			segs = ptype->gso_segment(skb, features);
 10.1250 -+			break;
 10.1251 -+		}
 10.1252 -+	}
 10.1253 -+	rcu_read_unlock();
 10.1254 -+
 10.1255 -+	__skb_push(skb, skb->data - skb->mac.raw);
 10.1256 -+
 10.1257 -+	return segs;
 10.1258 -+}
 10.1259 -+
 10.1260 -+EXPORT_SYMBOL(skb_gso_segment);
 10.1261 -+
 10.1262 - /* Take action when hardware reception checksum errors are detected. */
 10.1263 - #ifdef CONFIG_BUG
 10.1264 - void netdev_rx_csum_fault(struct net_device *dev)
 10.1265 -@@ -1142,75 +1182,108 @@ static inline int illegal_highdma(struct
 10.1266 - #define illegal_highdma(dev, skb)	(0)
 10.1267 - #endif
 10.1268 - 
 10.1269 --/* Keep head the same: replace data */
 10.1270 --int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
 10.1271 -+struct dev_gso_cb {
 10.1272 -+	void (*destructor)(struct sk_buff *skb);
 10.1273 -+};
 10.1274 -+
 10.1275 -+#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
 10.1276 -+
 10.1277 -+static void dev_gso_skb_destructor(struct sk_buff *skb)
 10.1278 -+{
 10.1279 -+	struct dev_gso_cb *cb;
 10.1280 -+
 10.1281 -+	do {
 10.1282 -+		struct sk_buff *nskb = skb->next;
 10.1283 -+
 10.1284 -+		skb->next = nskb->next;
 10.1285 -+		nskb->next = NULL;
 10.1286 -+		kfree_skb(nskb);
 10.1287 -+	} while (skb->next);
 10.1288 -+
 10.1289 -+	cb = DEV_GSO_CB(skb);
 10.1290 -+	if (cb->destructor)
 10.1291 -+		cb->destructor(skb);
 10.1292 -+}
 10.1293 -+
 10.1294 -+/**
 10.1295 -+ *	dev_gso_segment - Perform emulated hardware segmentation on skb.
 10.1296 -+ *	@skb: buffer to segment
 10.1297 -+ *
 10.1298 -+ *	This function segments the given skb and stores the list of segments
 10.1299 -+ *	in skb->next.
 10.1300 -+ */
 10.1301 -+static int dev_gso_segment(struct sk_buff *skb)
 10.1302 - {
 10.1303 --	unsigned int size;
 10.1304 --	u8 *data;
 10.1305 --	long offset;
 10.1306 --	struct skb_shared_info *ninfo;
 10.1307 --	int headerlen = skb->data - skb->head;
 10.1308 --	int expand = (skb->tail + skb->data_len) - skb->end;
 10.1309 --
 10.1310 --	if (skb_shared(skb))
 10.1311 --		BUG();
 10.1312 --
 10.1313 --	if (expand <= 0)
 10.1314 --		expand = 0;
 10.1315 --
 10.1316 --	size = skb->end - skb->head + expand;
 10.1317 --	size = SKB_DATA_ALIGN(size);
 10.1318 --	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
 10.1319 --	if (!data)
 10.1320 --		return -ENOMEM;
 10.1321 --
 10.1322 --	/* Copy entire thing */
 10.1323 --	if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
 10.1324 --		BUG();
 10.1325 --
 10.1326 --	/* Set up shinfo */
 10.1327 --	ninfo = (struct skb_shared_info*)(data + size);
 10.1328 --	atomic_set(&ninfo->dataref, 1);
 10.1329 --	ninfo->tso_size = skb_shinfo(skb)->tso_size;
 10.1330 --	ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
 10.1331 --	ninfo->nr_frags = 0;
 10.1332 --	ninfo->frag_list = NULL;
 10.1333 --
 10.1334 --	/* Offset between the two in bytes */
 10.1335 --	offset = data - skb->head;
 10.1336 --
 10.1337 --	/* Free old data. */
 10.1338 --	skb_release_data(skb);
 10.1339 --
 10.1340 --	skb->head = data;
 10.1341 --	skb->end  = data + size;
 10.1342 --
 10.1343 --	/* Set up new pointers */
 10.1344 --	skb->h.raw   += offset;
 10.1345 --	skb->nh.raw  += offset;
 10.1346 --	skb->mac.raw += offset;
 10.1347 --	skb->tail    += offset;
 10.1348 --	skb->data    += offset;
 10.1349 -+	struct net_device *dev = skb->dev;
 10.1350 -+	struct sk_buff *segs;
 10.1351 -+	int features = dev->features & ~(illegal_highdma(dev, skb) ?
 10.1352 -+					 NETIF_F_SG : 0);
 10.1353 -+
 10.1354 -+	segs = skb_gso_segment(skb, features);
 10.1355 -+
 10.1356 -+	/* Verifying header integrity only. */
 10.1357 -+	if (!segs)
 10.1358 -+		return 0;
 10.1359 - 
 10.1360 --	/* We are no longer a clone, even if we were. */
 10.1361 --	skb->cloned    = 0;
 10.1362 -+	if (unlikely(IS_ERR(segs)))
 10.1363 -+		return PTR_ERR(segs);
 10.1364 -+
 10.1365 -+	skb->next = segs;
 10.1366 -+	DEV_GSO_CB(skb)->destructor = skb->destructor;
 10.1367 -+	skb->destructor = dev_gso_skb_destructor;
 10.1368 - 
 10.1369 --	skb->tail     += skb->data_len;
 10.1370 --	skb->data_len  = 0;
 10.1371 -+	return 0;
 10.1372 -+}
 10.1373 -+
 10.1374 -+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 10.1375 -+{
 10.1376 -+	if (likely(!skb->next)) {
 10.1377 -+		if (netdev_nit)
 10.1378 -+			dev_queue_xmit_nit(skb, dev);
 10.1379 -+
 10.1380 -+		if (netif_needs_gso(dev, skb)) {
 10.1381 -+			if (unlikely(dev_gso_segment(skb)))
 10.1382 -+				goto out_kfree_skb;
 10.1383 -+			if (skb->next)
 10.1384 -+				goto gso;
 10.1385 -+		}
 10.1386 -+
 10.1387 -+		return dev->hard_start_xmit(skb, dev);
 10.1388 -+	}
 10.1389 -+
 10.1390 -+gso:
 10.1391 -+	do {
 10.1392 -+		struct sk_buff *nskb = skb->next;
 10.1393 -+		int rc;
 10.1394 -+
 10.1395 -+		skb->next = nskb->next;
 10.1396 -+		nskb->next = NULL;
 10.1397 -+		rc = dev->hard_start_xmit(nskb, dev);
 10.1398 -+		if (unlikely(rc)) {
 10.1399 -+			nskb->next = skb->next;
 10.1400 -+			skb->next = nskb;
 10.1401 -+			return rc;
 10.1402 -+		}
 10.1403 -+		if (unlikely(netif_queue_stopped(dev) && skb->next))
 10.1404 -+			return NETDEV_TX_BUSY;
 10.1405 -+	} while (skb->next);
 10.1406 -+	
 10.1407 -+	skb->destructor = DEV_GSO_CB(skb)->destructor;
 10.1408 -+
 10.1409 -+out_kfree_skb:
 10.1410 -+	kfree_skb(skb);
 10.1411 - 	return 0;
 10.1412 - }
 10.1413 - 
 10.1414 - #define HARD_TX_LOCK(dev, cpu) {			\
 10.1415 - 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 10.1416 --		spin_lock(&dev->xmit_lock);		\
 10.1417 --		dev->xmit_lock_owner = cpu;		\
 10.1418 -+		netif_tx_lock(dev);			\
 10.1419 - 	}						\
 10.1420 - }
 10.1421 - 
 10.1422 - #define HARD_TX_UNLOCK(dev) {				\
 10.1423 - 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 10.1424 --		dev->xmit_lock_owner = -1;		\
 10.1425 --		spin_unlock(&dev->xmit_lock);		\
 10.1426 -+		netif_tx_unlock(dev);			\
 10.1427 - 	}						\
 10.1428 - }
 10.1429 - 
 10.1430 -@@ -1246,9 +1319,13 @@ int dev_queue_xmit(struct sk_buff *skb)
 10.1431 - 	struct Qdisc *q;
 10.1432 - 	int rc = -ENOMEM;
 10.1433 - 
 10.1434 -+	/* GSO will handle the following emulations directly. */
 10.1435 -+	if (netif_needs_gso(dev, skb))
 10.1436 -+		goto gso;
 10.1437 -+
 10.1438 - 	if (skb_shinfo(skb)->frag_list &&
 10.1439 - 	    !(dev->features & NETIF_F_FRAGLIST) &&
 10.1440 --	    __skb_linearize(skb, GFP_ATOMIC))
 10.1441 -+	    __skb_linearize(skb))
 10.1442 - 		goto out_kfree_skb;
 10.1443 - 
 10.1444 - 	/* Fragmented skb is linearized if device does not support SG,
 10.1445 -@@ -1257,25 +1334,26 @@ int dev_queue_xmit(struct sk_buff *skb)
 10.1446 - 	 */
 10.1447 - 	if (skb_shinfo(skb)->nr_frags &&
 10.1448 - 	    (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
 10.1449 --	    __skb_linearize(skb, GFP_ATOMIC))
 10.1450 -+	    __skb_linearize(skb))
 10.1451 - 		goto out_kfree_skb;
 10.1452 - 
 10.1453 - 	/* If packet is not checksummed and device does not support
 10.1454 - 	 * checksumming for this protocol, complete checksumming here.
 10.1455 - 	 */
 10.1456 - 	if (skb->ip_summed == CHECKSUM_HW &&
 10.1457 --	    (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
 10.1458 -+	    (!(dev->features & NETIF_F_GEN_CSUM) &&
 10.1459 - 	     (!(dev->features & NETIF_F_IP_CSUM) ||
 10.1460 - 	      skb->protocol != htons(ETH_P_IP))))
 10.1461 - 	      	if (skb_checksum_help(skb, 0))
 10.1462 - 	      		goto out_kfree_skb;
 10.1463 - 
 10.1464 -+gso:
 10.1465 - 	spin_lock_prefetch(&dev->queue_lock);
 10.1466 - 
 10.1467 - 	/* Disable soft irqs for various locks below. Also 
 10.1468 - 	 * stops preemption for RCU. 
 10.1469 - 	 */
 10.1470 --	local_bh_disable(); 
 10.1471 -+	rcu_read_lock_bh(); 
 10.1472 - 
 10.1473 - 	/* Updates of qdisc are serialized by queue_lock. 
 10.1474 - 	 * The struct Qdisc which is pointed to by qdisc is now a 
 10.1475 -@@ -1309,8 +1387,8 @@ int dev_queue_xmit(struct sk_buff *skb)
 10.1476 - 	/* The device has no queue. Common case for software devices:
 10.1477 - 	   loopback, all the sorts of tunnels...
 10.1478 - 
 10.1479 --	   Really, it is unlikely that xmit_lock protection is necessary here.
 10.1480 --	   (f.e. loopback and IP tunnels are clean ignoring statistics
 10.1481 -+	   Really, it is unlikely that netif_tx_lock protection is necessary
 10.1482 -+	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
 10.1483 - 	   counters.)
 10.1484 - 	   However, it is possible, that they rely on protection
 10.1485 - 	   made by us here.
 10.1486 -@@ -1326,11 +1404,8 @@ int dev_queue_xmit(struct sk_buff *skb)
 10.1487 - 			HARD_TX_LOCK(dev, cpu);
 10.1488 - 
 10.1489 - 			if (!netif_queue_stopped(dev)) {
 10.1490 --				if (netdev_nit)
 10.1491 --					dev_queue_xmit_nit(skb, dev);
 10.1492 --
 10.1493 - 				rc = 0;
 10.1494 --				if (!dev->hard_start_xmit(skb, dev)) {
 10.1495 -+				if (!dev_hard_start_xmit(skb, dev)) {
 10.1496 - 					HARD_TX_UNLOCK(dev);
 10.1497 - 					goto out;
 10.1498 - 				}
 10.1499 -@@ -1349,13 +1424,13 @@ int dev_queue_xmit(struct sk_buff *skb)
 10.1500 - 	}
 10.1501 - 
 10.1502 - 	rc = -ENETDOWN;
 10.1503 --	local_bh_enable();
 10.1504 -+	rcu_read_unlock_bh();
 10.1505 - 
 10.1506 - out_kfree_skb:
 10.1507 - 	kfree_skb(skb);
 10.1508 - 	return rc;
 10.1509 - out:
 10.1510 --	local_bh_enable();
 10.1511 -+	rcu_read_unlock_bh();
 10.1512 - 	return rc;
 10.1513 - }
 10.1514 - 
 10.1515 -@@ -2670,7 +2745,7 @@ int register_netdevice(struct net_device
 10.1516 - 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
 10.1517 - 
 10.1518 - 	spin_lock_init(&dev->queue_lock);
 10.1519 --	spin_lock_init(&dev->xmit_lock);
 10.1520 -+	spin_lock_init(&dev->_xmit_lock);
 10.1521 - 	dev->xmit_lock_owner = -1;
 10.1522 - #ifdef CONFIG_NET_CLS_ACT
 10.1523 - 	spin_lock_init(&dev->ingress_lock);
 10.1524 -@@ -2714,9 +2789,7 @@ int register_netdevice(struct net_device
 10.1525 - 
 10.1526 - 	/* Fix illegal SG+CSUM combinations. */
 10.1527 - 	if ((dev->features & NETIF_F_SG) &&
 10.1528 --	    !(dev->features & (NETIF_F_IP_CSUM |
 10.1529 --			       NETIF_F_NO_CSUM |
 10.1530 --			       NETIF_F_HW_CSUM))) {
 10.1531 -+	    !(dev->features & NETIF_F_ALL_CSUM)) {
 10.1532 - 		printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
 10.1533 - 		       dev->name);
 10.1534 - 		dev->features &= ~NETIF_F_SG;
 10.1535 -@@ -3268,7 +3341,6 @@ subsys_initcall(net_dev_init);
 10.1536 - EXPORT_SYMBOL(__dev_get_by_index);
 10.1537 - EXPORT_SYMBOL(__dev_get_by_name);
 10.1538 - EXPORT_SYMBOL(__dev_remove_pack);
 10.1539 --EXPORT_SYMBOL(__skb_linearize);
 10.1540 - EXPORT_SYMBOL(dev_valid_name);
 10.1541 - EXPORT_SYMBOL(dev_add_pack);
 10.1542 - EXPORT_SYMBOL(dev_alloc_name);
 10.1543 -diff -pruN ../orig-linux-2.6.16.29/net/core/dev_mcast.c ./net/core/dev_mcast.c
 10.1544 ---- ../orig-linux-2.6.16.29/net/core/dev_mcast.c	2006-09-12 19:02:10.000000000 +0100
 10.1545 -+++ ./net/core/dev_mcast.c	2006-09-19 13:59:20.000000000 +0100
 10.1546 -@@ -62,7 +62,7 @@
 10.1547 -  *	Device mc lists are changed by bh at least if IPv6 is enabled,
 10.1548 -  *	so that it must be bh protected.
 10.1549 -  *
 10.1550 -- *	We block accesses to device mc filters with dev->xmit_lock.
 10.1551 -+ *	We block accesses to device mc filters with netif_tx_lock.
 10.1552 -  */
 10.1553 - 
 10.1554 - /*
 10.1555 -@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_d
 10.1556 - 
 10.1557 - void dev_mc_upload(struct net_device *dev)
 10.1558 - {
 10.1559 --	spin_lock_bh(&dev->xmit_lock);
 10.1560 -+	netif_tx_lock_bh(dev);
 10.1561 - 	__dev_mc_upload(dev);
 10.1562 --	spin_unlock_bh(&dev->xmit_lock);
 10.1563 -+	netif_tx_unlock_bh(dev);
 10.1564 - }
 10.1565 - 
 10.1566 - /*
 10.1567 -@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev
 10.1568 - 	int err = 0;
 10.1569 - 	struct dev_mc_list *dmi, **dmip;
 10.1570 - 
 10.1571 --	spin_lock_bh(&dev->xmit_lock);
 10.1572 -+	netif_tx_lock_bh(dev);
 10.1573 - 
 10.1574 - 	for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
 10.1575 - 		/*
 10.1576 -@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev
 10.1577 - 			 */
 10.1578 - 			__dev_mc_upload(dev);
 10.1579 - 			
 10.1580 --			spin_unlock_bh(&dev->xmit_lock);
 10.1581 -+			netif_tx_unlock_bh(dev);
 10.1582 - 			return 0;
 10.1583 - 		}
 10.1584 - 	}
 10.1585 - 	err = -ENOENT;
 10.1586 - done:
 10.1587 --	spin_unlock_bh(&dev->xmit_lock);
 10.1588 -+	netif_tx_unlock_bh(dev);
 10.1589 - 	return err;
 10.1590 - }
 10.1591 - 
 10.1592 -@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, v
 10.1593 - 
 10.1594 - 	dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
 10.1595 - 
 10.1596 --	spin_lock_bh(&dev->xmit_lock);
 10.1597 -+	netif_tx_lock_bh(dev);
 10.1598 - 	for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
 10.1599 - 		if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
 10.1600 - 		    dmi->dmi_addrlen == alen) {
 10.1601 -@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, v
 10.1602 - 	}
 10.1603 - 
 10.1604 - 	if ((dmi = dmi1) == NULL) {
 10.1605 --		spin_unlock_bh(&dev->xmit_lock);
 10.1606 -+		netif_tx_unlock_bh(dev);
 10.1607 - 		return -ENOMEM;
 10.1608 - 	}
 10.1609 - 	memcpy(dmi->dmi_addr, addr, alen);
 10.1610 -@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, v
 10.1611 - 
 10.1612 - 	__dev_mc_upload(dev);
 10.1613 - 	
 10.1614 --	spin_unlock_bh(&dev->xmit_lock);
 10.1615 -+	netif_tx_unlock_bh(dev);
 10.1616 - 	return 0;
 10.1617 - 
 10.1618 - done:
 10.1619 --	spin_unlock_bh(&dev->xmit_lock);
 10.1620 -+	netif_tx_unlock_bh(dev);
 10.1621 - 	kfree(dmi1);
 10.1622 - 	return err;
 10.1623 - }
 10.1624 -@@ -204,7 +204,7 @@ done:
 10.1625 - 
 10.1626 - void dev_mc_discard(struct net_device *dev)
 10.1627 - {
 10.1628 --	spin_lock_bh(&dev->xmit_lock);
 10.1629 -+	netif_tx_lock_bh(dev);
 10.1630 - 	
 10.1631 - 	while (dev->mc_list != NULL) {
 10.1632 - 		struct dev_mc_list *tmp = dev->mc_list;
 10.1633 -@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *d
 10.1634 - 	}
 10.1635 - 	dev->mc_count = 0;
 10.1636 - 
 10.1637 --	spin_unlock_bh(&dev->xmit_lock);
 10.1638 -+	netif_tx_unlock_bh(dev);
 10.1639 - }
 10.1640 - 
 10.1641 - #ifdef CONFIG_PROC_FS
 10.1642 -@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_fi
 10.1643 - 	struct dev_mc_list *m;
 10.1644 - 	struct net_device *dev = v;
 10.1645 - 
 10.1646 --	spin_lock_bh(&dev->xmit_lock);
 10.1647 -+	netif_tx_lock_bh(dev);
 10.1648 - 	for (m = dev->mc_list; m; m = m->next) {
 10.1649 - 		int i;
 10.1650 - 
 10.1651 -@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_fi
 10.1652 - 
 10.1653 - 		seq_putc(seq, '\n');
 10.1654 - 	}
 10.1655 --	spin_unlock_bh(&dev->xmit_lock);
 10.1656 -+	netif_tx_unlock_bh(dev);
 10.1657 - 	return 0;
 10.1658 - }
 10.1659 - 
 10.1660 -diff -pruN ../orig-linux-2.6.16.29/net/core/ethtool.c ./net/core/ethtool.c
 10.1661 ---- ../orig-linux-2.6.16.29/net/core/ethtool.c	2006-09-12 19:02:10.000000000 +0100
 10.1662 -+++ ./net/core/ethtool.c	2006-09-19 13:59:20.000000000 +0100
 10.1663 -@@ -30,7 +30,7 @@ u32 ethtool_op_get_link(struct net_devic
 10.1664 - 
 10.1665 - u32 ethtool_op_get_tx_csum(struct net_device *dev)
 10.1666 - {
 10.1667 --	return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0;
 10.1668 -+	return (dev->features & NETIF_F_ALL_CSUM) != 0;
 10.1669 - }
 10.1670 - 
 10.1671 - int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
 10.1672 -@@ -551,9 +551,7 @@ static int ethtool_set_sg(struct net_dev
 10.1673 - 		return -EFAULT;
 10.1674 - 
 10.1675 - 	if (edata.data && 
 10.1676 --	    !(dev->features & (NETIF_F_IP_CSUM |
 10.1677 --			       NETIF_F_NO_CSUM |
 10.1678 --			       NETIF_F_HW_CSUM)))
 10.1679 -+	    !(dev->features & NETIF_F_ALL_CSUM))
 10.1680 - 		return -EINVAL;
 10.1681 - 
 10.1682 - 	return __ethtool_set_sg(dev, edata.data);
 10.1683 -@@ -561,7 +559,7 @@ static int ethtool_set_sg(struct net_dev
 10.1684 - 
 10.1685 - static int ethtool_get_tso(struct net_device *dev, char __user *useraddr)
 10.1686 - {
 10.1687 --	struct ethtool_value edata = { ETHTOOL_GTSO };
 10.1688 -+	struct ethtool_value edata = { ETHTOOL_GUFO };
 10.1689 - 
 10.1690 - 	if (!dev->ethtool_ops->get_tso)
 10.1691 - 		return -EOPNOTSUPP;
 10.1692 -@@ -616,6 +614,29 @@ static int ethtool_set_ufo(struct net_de
 10.1693 - 	return dev->ethtool_ops->set_ufo(dev, edata.data);
 10.1694 - }
 10.1695 - 
 10.1696 -+static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
 10.1697 -+{
 10.1698 -+	struct ethtool_value edata = { ETHTOOL_GGSO };
 10.1699 -+
 10.1700 -+	edata.data = dev->features & NETIF_F_GSO;
 10.1701 -+	if (copy_to_user(useraddr, &edata, sizeof(edata)))
 10.1702 -+		 return -EFAULT;
 10.1703 -+	return 0;
 10.1704 -+}
 10.1705 -+
 10.1706 -+static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
 10.1707 -+{
 10.1708 -+	struct ethtool_value edata;
 10.1709 -+
 10.1710 -+	if (copy_from_user(&edata, useraddr, sizeof(edata)))
 10.1711 -+		return -EFAULT;
 10.1712 -+	if (edata.data)
 10.1713 -+		dev->features |= NETIF_F_GSO;
 10.1714 -+	else
 10.1715 -+		dev->features &= ~NETIF_F_GSO;
 10.1716 -+	return 0;
 10.1717 -+}
 10.1718 -+
 10.1719 - static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
 10.1720 - {
 10.1721 - 	struct ethtool_test test;
 10.1722 -@@ -907,6 +928,12 @@ int dev_ethtool(struct ifreq *ifr)
 10.1723 - 	case ETHTOOL_SUFO:
 10.1724 - 		rc = ethtool_set_ufo(dev, useraddr);
 10.1725 - 		break;
 10.1726 -+	case ETHTOOL_GGSO:
 10.1727 -+		rc = ethtool_get_gso(dev, useraddr);
 10.1728 -+		break;
 10.1729 -+	case ETHTOOL_SGSO:
 10.1730 -+		rc = ethtool_set_gso(dev, useraddr);
 10.1731 -+		break;
 10.1732 - 	default:
 10.1733 - 		rc =  -EOPNOTSUPP;
 10.1734 - 	}
 10.1735 -diff -pruN ../orig-linux-2.6.16.29/net/core/netpoll.c ./net/core/netpoll.c
 10.1736 ---- ../orig-linux-2.6.16.29/net/core/netpoll.c	2006-09-12 19:02:10.000000000 +0100
 10.1737 -+++ ./net/core/netpoll.c	2006-09-19 13:59:20.000000000 +0100
 10.1738 -@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netp
 10.1739 - 
 10.1740 - 	do {
 10.1741 - 		npinfo->tries--;
 10.1742 --		spin_lock(&np->dev->xmit_lock);
 10.1743 --		np->dev->xmit_lock_owner = smp_processor_id();
 10.1744 -+		netif_tx_lock(np->dev);
 10.1745 - 
 10.1746 - 		/*
 10.1747 - 		 * network drivers do not expect to be called if the queue is
 10.1748 - 		 * stopped.
 10.1749 - 		 */
 10.1750 - 		if (netif_queue_stopped(np->dev)) {
 10.1751 --			np->dev->xmit_lock_owner = -1;
 10.1752 --			spin_unlock(&np->dev->xmit_lock);
 10.1753 -+			netif_tx_unlock(np->dev);
 10.1754 - 			netpoll_poll(np);
 10.1755 - 			udelay(50);
 10.1756 - 			continue;
 10.1757 - 		}
 10.1758 - 
 10.1759 - 		status = np->dev->hard_start_xmit(skb, np->dev);
 10.1760 --		np->dev->xmit_lock_owner = -1;
 10.1761 --		spin_unlock(&np->dev->xmit_lock);
 10.1762 -+		netif_tx_unlock(np->dev);
 10.1763 - 
 10.1764 - 		/* success */
 10.1765 - 		if(!status) {
 10.1766 -diff -pruN ../orig-linux-2.6.16.29/net/core/pktgen.c ./net/core/pktgen.c
 10.1767 ---- ../orig-linux-2.6.16.29/net/core/pktgen.c	2006-09-12 19:02:10.000000000 +0100
 10.1768 -+++ ./net/core/pktgen.c	2006-09-19 13:59:20.000000000 +0100
 10.1769 -@@ -2586,7 +2586,7 @@ static __inline__ void pktgen_xmit(struc
 10.1770 - 		}
 10.1771 - 	}
 10.1772 - 	
 10.1773 --	spin_lock_bh(&odev->xmit_lock);
 10.1774 -+	netif_tx_lock_bh(odev);
 10.1775 - 	if (!netif_queue_stopped(odev)) {
 10.1776 - 
 10.1777 - 		atomic_inc(&(pkt_dev->skb->users));
 10.1778 -@@ -2631,7 +2631,7 @@ retry_now:
 10.1779 - 		pkt_dev->next_tx_ns = 0;
 10.1780 -         }
 10.1781 - 
 10.1782 --	spin_unlock_bh(&odev->xmit_lock);
 10.1783 -+	netif_tx_unlock_bh(odev);
 10.1784 - 	
 10.1785 - 	/* If pkt_dev->count is zero, then run forever */
 10.1786 - 	if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
 10.1787 -diff -pruN ../orig-linux-2.6.16.29/net/core/skbuff.c ./net/core/skbuff.c
 10.1788 ---- ../orig-linux-2.6.16.29/net/core/skbuff.c	2006-09-12 19:02:10.000000000 +0100
 10.1789 -+++ ./net/core/skbuff.c	2006-09-19 13:59:20.000000000 +0100
 10.1790 -@@ -164,9 +164,9 @@ struct sk_buff *__alloc_skb(unsigned int
 10.1791 - 	shinfo = skb_shinfo(skb);
 10.1792 - 	atomic_set(&shinfo->dataref, 1);
 10.1793 - 	shinfo->nr_frags  = 0;
 10.1794 --	shinfo->tso_size = 0;
 10.1795 --	shinfo->tso_segs = 0;
 10.1796 --	shinfo->ufo_size = 0;
 10.1797 -+	shinfo->gso_size = 0;
 10.1798 -+	shinfo->gso_segs = 0;
 10.1799 -+	shinfo->gso_type = 0;
 10.1800 - 	shinfo->ip6_frag_id = 0;
 10.1801 - 	shinfo->frag_list = NULL;
 10.1802 - 
 10.1803 -@@ -230,8 +230,9 @@ struct sk_buff *alloc_skb_from_cache(kme
 10.1804 - 
 10.1805 - 	atomic_set(&(skb_shinfo(skb)->dataref), 1);
 10.1806 - 	skb_shinfo(skb)->nr_frags  = 0;
 10.1807 --	skb_shinfo(skb)->tso_size = 0;
 10.1808 --	skb_shinfo(skb)->tso_segs = 0;
 10.1809 -+	skb_shinfo(skb)->gso_size = 0;
 10.1810 -+	skb_shinfo(skb)->gso_segs = 0;
 10.1811 -+	skb_shinfo(skb)->gso_type = 0;
 10.1812 - 	skb_shinfo(skb)->frag_list = NULL;
 10.1813 - out:
 10.1814 - 	return skb;
 10.1815 -@@ -501,8 +502,9 @@ static void copy_skb_header(struct sk_bu
 10.1816 - 	new->tc_index	= old->tc_index;
 10.1817 - #endif
 10.1818 - 	atomic_set(&new->users, 1);
 10.1819 --	skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
 10.1820 --	skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
 10.1821 -+	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
 10.1822 -+	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
 10.1823 -+	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
 10.1824 - }
 10.1825 - 
 10.1826 - /**
 10.1827 -@@ -1777,6 +1779,133 @@ int skb_append_datato_frags(struct sock 
 10.1828 - 	return 0;
 10.1829 - }
 10.1830 - 
 10.1831 -+/**
 10.1832 -+ *	skb_segment - Perform protocol segmentation on skb.
 10.1833 -+ *	@skb: buffer to segment
 10.1834 -+ *	@features: features for the output path (see dev->features)
 10.1835 -+ *
 10.1836 -+ *	This function performs segmentation on the given skb.  It returns
 10.1837 -+ *	the segment at the given position.  It returns NULL if there are
 10.1838 -+ *	no more segments to generate, or when an error is encountered.
 10.1839 -+ */
 10.1840 -+struct sk_buff *skb_segment(struct sk_buff *skb, int features)
 10.1841 -+{
 10.1842 -+	struct sk_buff *segs = NULL;
 10.1843 -+	struct sk_buff *tail = NULL;
 10.1844 -+	unsigned int mss = skb_shinfo(skb)->gso_size;
 10.1845 -+	unsigned int doffset = skb->data - skb->mac.raw;
 10.1846 -+	unsigned int offset = doffset;
 10.1847 -+	unsigned int headroom;
 10.1848 -+	unsigned int len;
 10.1849 -+	int sg = features & NETIF_F_SG;
 10.1850 -+	int nfrags = skb_shinfo(skb)->nr_frags;
 10.1851 -+	int err = -ENOMEM;
 10.1852 -+	int i = 0;
 10.1853 -+	int pos;
 10.1854 -+
 10.1855 -+	__skb_push(skb, doffset);
 10.1856 -+	headroom = skb_headroom(skb);
 10.1857 -+	pos = skb_headlen(skb);
 10.1858 -+
 10.1859 -+	do {
 10.1860 -+		struct sk_buff *nskb;
 10.1861 -+		skb_frag_t *frag;
 10.1862 -+		int hsize, nsize;
 10.1863 -+		int k;
 10.1864 -+		int size;
 10.1865 -+
 10.1866 -+		len = skb->len - offset;
 10.1867 -+		if (len > mss)
 10.1868 -+			len = mss;
 10.1869 -+
 10.1870 -+		hsize = skb_headlen(skb) - offset;
 10.1871 -+		if (hsize < 0)
 10.1872 -+			hsize = 0;
 10.1873 -+		nsize = hsize + doffset;
 10.1874 -+		if (nsize > len + doffset || !sg)
 10.1875 -+			nsize = len + doffset;
 10.1876 -+
 10.1877 -+		nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
 10.1878 -+		if (unlikely(!nskb))
 10.1879 -+			goto err;
 10.1880 -+
 10.1881 -+		if (segs)
 10.1882 -+			tail->next = nskb;
 10.1883 -+		else
 10.1884 -+			segs = nskb;
 10.1885 -+		tail = nskb;
 10.1886 -+
 10.1887 -+		nskb->dev = skb->dev;
 10.1888 -+		nskb->priority = skb->priority;
 10.1889 -+		nskb->protocol = skb->protocol;
 10.1890 -+		nskb->dst = dst_clone(skb->dst);
 10.1891 -+		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
 10.1892 -+		nskb->pkt_type = skb->pkt_type;
 10.1893 -+		nskb->mac_len = skb->mac_len;
 10.1894 -+
 10.1895 -+		skb_reserve(nskb, headroom);
 10.1896 -+		nskb->mac.raw = nskb->data;
 10.1897 -+		nskb->nh.raw = nskb->data + skb->mac_len;
 10.1898 -+		nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
 10.1899 -+		memcpy(skb_put(nskb, doffset), skb->data, doffset);
 10.1900 -+
 10.1901 -+		if (!sg) {
 10.1902 -+			nskb->csum = skb_copy_and_csum_bits(skb, offset,
 10.1903 -+							    skb_put(nskb, len),
 10.1904 -+							    len, 0);
 10.1905 -+			continue;
 10.1906 -+		}
 10.1907 -+
 10.1908 -+		frag = skb_shinfo(nskb)->frags;
 10.1909 -+		k = 0;
 10.1910 -+
 10.1911 -+		nskb->ip_summed = CHECKSUM_HW;
 10.1912 -+		nskb->csum = skb->csum;
 10.1913 -+		memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
 10.1914 -+
 10.1915 -+		while (pos < offset + len) {
 10.1916 -+			BUG_ON(i >= nfrags);
 10.1917 -+
 10.1918 -+			*frag = skb_shinfo(skb)->frags[i];
 10.1919 -+			get_page(frag->page);
 10.1920 -+			size = frag->size;
 10.1921 -+
 10.1922 -+			if (pos < offset) {
 10.1923 -+				frag->page_offset += offset - pos;
 10.1924 -+				frag->size -= offset - pos;
 10.1925 -+			}
 10.1926 -+
 10.1927 -+			k++;
 10.1928 -+
 10.1929 -+			if (pos + size <= offset + len) {
 10.1930 -+				i++;
 10.1931 -+				pos += size;
 10.1932 -+			} else {
 10.1933 -+				frag->size -= pos + size - (offset + len);
 10.1934 -+				break;
 10.1935 -+			}
 10.1936 -+
 10.1937 -+			frag++;
 10.1938 -+		}
 10.1939 -+
 10.1940 -+		skb_shinfo(nskb)->nr_frags = k;
 10.1941 -+		nskb->data_len = len - hsize;
 10.1942 -+		nskb->len += nskb->data_len;
 10.1943 -+		nskb->truesize += nskb->data_len;
 10.1944 -+	} while ((offset += len) < skb->len);
 10.1945 -+
 10.1946 -+	return segs;
 10.1947 -+
 10.1948 -+err:
 10.1949 -+	while ((skb = segs)) {
 10.1950 -+		segs = skb->next;
 10.1951 -+		kfree(skb);
 10.1952 -+	}
 10.1953 -+	return ERR_PTR(err);
 10.1954 -+}
 10.1955 -+
 10.1956 -+EXPORT_SYMBOL_GPL(skb_segment);
 10.1957 -+
 10.1958 - void __init skb_init(void)
 10.1959 - {
 10.1960 - 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
 10.1961 -diff -pruN ../orig-linux-2.6.16.29/net/decnet/dn_nsp_in.c ./net/decnet/dn_nsp_in.c
 10.1962 ---- ../orig-linux-2.6.16.29/net/decnet/dn_nsp_in.c	2006-09-12 19:02:10.000000000 +0100
 10.1963 -+++ ./net/decnet/dn_nsp_in.c	2006-09-19 13:59:20.000000000 +0100
 10.1964 -@@ -801,8 +801,7 @@ got_it:
 10.1965 - 		 * We linearize everything except data segments here.
 10.1966 - 		 */
 10.1967 - 		if (cb->nsp_flags & ~0x60) {
 10.1968 --			if (unlikely(skb_is_nonlinear(skb)) &&
 10.1969 --			    skb_linearize(skb, GFP_ATOMIC) != 0)
 10.1970 -+			if (unlikely(skb_linearize(skb)))
 10.1971 - 				goto free_out;
 10.1972 - 		}
 10.1973 - 
 10.1974 -diff -pruN ../orig-linux-2.6.16.29/net/decnet/dn_route.c ./net/decnet/dn_route.c
 10.1975 ---- ../orig-linux-2.6.16.29/net/decnet/dn_route.c	2006-09-12 19:02:10.000000000 +0100
 10.1976 -+++ ./net/decnet/dn_route.c	2006-09-19 13:59:20.000000000 +0100
 10.1977 -@@ -629,8 +629,7 @@ int dn_route_rcv(struct sk_buff *skb, st
 10.1978 - 			padlen);
 10.1979 - 
 10.1980 -         if (flags & DN_RT_PKT_CNTL) {
 10.1981 --		if (unlikely(skb_is_nonlinear(skb)) &&
 10.1982 --		    skb_linearize(skb, GFP_ATOMIC) != 0)
 10.1983 -+		if (unlikely(skb_linearize(skb)))
 10.1984 - 			goto dump_it;
 10.1985 - 
 10.1986 -                 switch(flags & DN_RT_CNTL_MSK) {
 10.1987 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/af_inet.c ./net/ipv4/af_inet.c
 10.1988 ---- ../orig-linux-2.6.16.29/net/ipv4/af_inet.c	2006-09-12 19:02:10.000000000 +0100
 10.1989 -+++ ./net/ipv4/af_inet.c	2006-09-19 13:59:20.000000000 +0100
 10.1990 -@@ -68,6 +68,7 @@
 10.1991 -  */
 10.1992 - 
 10.1993 - #include <linux/config.h>
 10.1994 -+#include <linux/err.h>
 10.1995 - #include <linux/errno.h>
 10.1996 - #include <linux/types.h>
 10.1997 - #include <linux/socket.h>
 10.1998 -@@ -1084,6 +1085,54 @@ int inet_sk_rebuild_header(struct sock *
 10.1999 - 
 10.2000 - EXPORT_SYMBOL(inet_sk_rebuild_header);
 10.2001 - 
 10.2002 -+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
 10.2003 -+{
 10.2004 -+	struct sk_buff *segs = ERR_PTR(-EINVAL);
 10.2005 -+	struct iphdr *iph;
 10.2006 -+	struct net_protocol *ops;
 10.2007 -+	int proto;
 10.2008 -+	int ihl;
 10.2009 -+	int id;
 10.2010 -+
 10.2011 -+	if (!pskb_may_pull(skb, sizeof(*iph)))
 10.2012 -+		goto out;
 10.2013 -+
 10.2014 -+	iph = skb->nh.iph;
 10.2015 -+	ihl = iph->ihl * 4;
 10.2016 -+	if (ihl < sizeof(*iph))
 10.2017 -+		goto out;
 10.2018 -+
 10.2019 -+	if (!pskb_may_pull(skb, ihl))
 10.2020 -+		goto out;
 10.2021 -+
 10.2022 -+	skb->h.raw = __skb_pull(skb, ihl);
 10.2023 -+	iph = skb->nh.iph;
 10.2024 -+	id = ntohs(iph->id);
 10.2025 -+	proto = iph->protocol & (MAX_INET_PROTOS - 1);
 10.2026 -+	segs = ERR_PTR(-EPROTONOSUPPORT);
 10.2027 -+
 10.2028 -+	rcu_read_lock();
 10.2029 -+	ops = rcu_dereference(inet_protos[proto]);
 10.2030 -+	if (ops && ops->gso_segment)
 10.2031 -+		segs = ops->gso_segment(skb, features);
 10.2032 -+	rcu_read_unlock();
 10.2033 -+
 10.2034 -+	if (!segs || unlikely(IS_ERR(segs)))
 10.2035 -+		goto out;
 10.2036 -+
 10.2037 -+	skb = segs;
 10.2038 -+	do {
 10.2039 -+		iph = skb->nh.iph;
 10.2040 -+		iph->id = htons(id++);
 10.2041 -+		iph->tot_len = htons(skb->len - skb->mac_len);
 10.2042 -+		iph->check = 0;
 10.2043 -+		iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
 10.2044 -+	} while ((skb = skb->next));
 10.2045 -+
 10.2046 -+out:
 10.2047 -+	return segs;
 10.2048 -+}
 10.2049 -+
 10.2050 - #ifdef CONFIG_IP_MULTICAST
 10.2051 - static struct net_protocol igmp_protocol = {
 10.2052 - 	.handler =	igmp_rcv,
 10.2053 -@@ -1093,6 +1142,7 @@ static struct net_protocol igmp_protocol
 10.2054 - static struct net_protocol tcp_protocol = {
 10.2055 - 	.handler =	tcp_v4_rcv,
 10.2056 - 	.err_handler =	tcp_v4_err,
 10.2057 -+	.gso_segment =	tcp_tso_segment,
 10.2058 - 	.no_policy =	1,
 10.2059 - };
 10.2060 - 
 10.2061 -@@ -1138,6 +1188,7 @@ static int ipv4_proc_init(void);
 10.2062 - static struct packet_type ip_packet_type = {
 10.2063 - 	.type = __constant_htons(ETH_P_IP),
 10.2064 - 	.func = ip_rcv,
 10.2065 -+	.gso_segment = inet_gso_segment,
 10.2066 - };
 10.2067 - 
 10.2068 - static int __init inet_init(void)
 10.2069 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/ip_output.c ./net/ipv4/ip_output.c
 10.2070 ---- ../orig-linux-2.6.16.29/net/ipv4/ip_output.c	2006-09-12 19:02:10.000000000 +0100
 10.2071 -+++ ./net/ipv4/ip_output.c	2006-09-19 13:59:20.000000000 +0100
 10.2072 -@@ -210,8 +210,7 @@ static inline int ip_finish_output(struc
 10.2073 - 		return dst_output(skb);
 10.2074 - 	}
 10.2075 - #endif
 10.2076 --	if (skb->len > dst_mtu(skb->dst) &&
 10.2077 --	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
 10.2078 -+	if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
 10.2079 - 		return ip_fragment(skb, ip_finish_output2);
 10.2080 - 	else
 10.2081 - 		return ip_finish_output2(skb);
 10.2082 -@@ -362,7 +361,7 @@ packet_routed:
 10.2083 - 	}
 10.2084 - 
 10.2085 - 	ip_select_ident_more(iph, &rt->u.dst, sk,
 10.2086 --			     (skb_shinfo(skb)->tso_segs ?: 1) - 1);
 10.2087 -+			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 10.2088 - 
 10.2089 - 	/* Add an IP checksum. */
 10.2090 - 	ip_send_check(iph);
 10.2091 -@@ -743,7 +742,8 @@ static inline int ip_ufo_append_data(str
 10.2092 - 			       (length - transhdrlen));
 10.2093 - 	if (!err) {
 10.2094 - 		/* specify the length of each IP datagram fragment*/
 10.2095 --		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
 10.2096 -+		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
 10.2097 -+		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
 10.2098 - 		__skb_queue_tail(&sk->sk_write_queue, skb);
 10.2099 - 
 10.2100 - 		return 0;
 10.2101 -@@ -839,7 +839,7 @@ int ip_append_data(struct sock *sk,
 10.2102 - 	 */
 10.2103 - 	if (transhdrlen &&
 10.2104 - 	    length + fragheaderlen <= mtu &&
 10.2105 --	    rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
 10.2106 -+	    rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
 10.2107 - 	    !exthdrlen)
 10.2108 - 		csummode = CHECKSUM_HW;
 10.2109 - 
 10.2110 -@@ -1086,14 +1086,16 @@ ssize_t	ip_append_page(struct sock *sk, 
 10.2111 - 
 10.2112 - 	inet->cork.length += size;
 10.2113 - 	if ((sk->sk_protocol == IPPROTO_UDP) &&
 10.2114 --	    (rt->u.dst.dev->features & NETIF_F_UFO))
 10.2115 --		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
 10.2116 -+	    (rt->u.dst.dev->features & NETIF_F_UFO)) {
 10.2117 -+		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
 10.2118 -+		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
 10.2119 -+	}
 10.2120 - 
 10.2121 - 
 10.2122 - 	while (size > 0) {
 10.2123 - 		int i;
 10.2124 - 
 10.2125 --		if (skb_shinfo(skb)->ufo_size)
 10.2126 -+		if (skb_shinfo(skb)->gso_size)
 10.2127 - 			len = size;
 10.2128 - 		else {
 10.2129 - 
 10.2130 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/ipcomp.c ./net/ipv4/ipcomp.c
 10.2131 ---- ../orig-linux-2.6.16.29/net/ipv4/ipcomp.c	2006-09-12 19:02:10.000000000 +0100
 10.2132 -+++ ./net/ipv4/ipcomp.c	2006-09-19 13:59:20.000000000 +0100
 10.2133 -@@ -84,7 +84,7 @@ static int ipcomp_input(struct xfrm_stat
 10.2134 -                         struct xfrm_decap_state *decap, struct sk_buff *skb)
 10.2135 - {
 10.2136 - 	u8 nexthdr;
 10.2137 --	int err = 0;
 10.2138 -+	int err = -ENOMEM;
 10.2139 - 	struct iphdr *iph;
 10.2140 - 	union {
 10.2141 - 		struct iphdr	iph;
 10.2142 -@@ -92,11 +92,8 @@ static int ipcomp_input(struct xfrm_stat
 10.2143 - 	} tmp_iph;
 10.2144 - 
 10.2145 - 
 10.2146 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
 10.2147 --	    skb_linearize(skb, GFP_ATOMIC) != 0) {
 10.2148 --	    	err = -ENOMEM;
 10.2149 -+	if (skb_linearize_cow(skb))
 10.2150 - 	    	goto out;
 10.2151 --	}
 10.2152 - 
 10.2153 - 	skb->ip_summed = CHECKSUM_NONE;
 10.2154 - 
 10.2155 -@@ -171,10 +168,8 @@ static int ipcomp_output(struct xfrm_sta
 10.2156 - 		goto out_ok;
 10.2157 - 	}
 10.2158 - 
 10.2159 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
 10.2160 --	    skb_linearize(skb, GFP_ATOMIC) != 0) {
 10.2161 -+	if (skb_linearize_cow(skb))
 10.2162 - 		goto out_ok;
 10.2163 --	}
 10.2164 - 	
 10.2165 - 	err = ipcomp_compress(x, skb);
 10.2166 - 	iph = skb->nh.iph;
 10.2167 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp.c ./net/ipv4/tcp.c
 10.2168 ---- ../orig-linux-2.6.16.29/net/ipv4/tcp.c	2006-09-12 19:02:10.000000000 +0100
 10.2169 -+++ ./net/ipv4/tcp.c	2006-09-19 13:59:20.000000000 +0100
 10.2170 -@@ -257,6 +257,7 @@
 10.2171 - #include <linux/fs.h>
 10.2172 - #include <linux/random.h>
 10.2173 - #include <linux/bootmem.h>
 10.2174 -+#include <linux/err.h>
 10.2175 - 
 10.2176 - #include <net/icmp.h>
 10.2177 - #include <net/tcp.h>
 10.2178 -@@ -570,7 +571,7 @@ new_segment:
 10.2179 - 		skb->ip_summed = CHECKSUM_HW;
 10.2180 - 		tp->write_seq += copy;
 10.2181 - 		TCP_SKB_CB(skb)->end_seq += copy;
 10.2182 --		skb_shinfo(skb)->tso_segs = 0;
 10.2183 -+		skb_shinfo(skb)->gso_segs = 0;
 10.2184 - 
 10.2185 - 		if (!copied)
 10.2186 - 			TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
 10.2187 -@@ -621,14 +622,10 @@ ssize_t tcp_sendpage(struct socket *sock
 10.2188 - 	ssize_t res;
 10.2189 - 	struct sock *sk = sock->sk;
 10.2190 - 
 10.2191 --#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
 10.2192 --
 10.2193 - 	if (!(sk->sk_route_caps & NETIF_F_SG) ||
 10.2194 --	    !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
 10.2195 -+	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
 10.2196 - 		return sock_no_sendpage(sock, page, offset, size, flags);
 10.2197 - 
 10.2198 --#undef TCP_ZC_CSUM_FLAGS
 10.2199 --
 10.2200 - 	lock_sock(sk);
 10.2201 - 	TCP_CHECK_TIMER(sk);
 10.2202 - 	res = do_tcp_sendpages(sk, &page, offset, size, flags);
 10.2203 -@@ -725,9 +722,7 @@ new_segment:
 10.2204 - 				/*
 10.2205 - 				 * Check whether we can use HW checksum.
 10.2206 - 				 */
 10.2207 --				if (sk->sk_route_caps &
 10.2208 --				    (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
 10.2209 --				     NETIF_F_HW_CSUM))
 10.2210 -+				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
 10.2211 - 					skb->ip_summed = CHECKSUM_HW;
 10.2212 - 
 10.2213 - 				skb_entail(sk, tp, skb);
 10.2214 -@@ -823,7 +818,7 @@ new_segment:
 10.2215 - 
 10.2216 - 			tp->write_seq += copy;
 10.2217 - 			TCP_SKB_CB(skb)->end_seq += copy;
 10.2218 --			skb_shinfo(skb)->tso_segs = 0;
 10.2219 -+			skb_shinfo(skb)->gso_segs = 0;
 10.2220 - 
 10.2221 - 			from += copy;
 10.2222 - 			copied += copy;
 10.2223 -@@ -2026,6 +2021,71 @@ int tcp_getsockopt(struct sock *sk, int 
 10.2224 - }
 10.2225 - 
 10.2226 - 
 10.2227 -+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
 10.2228 -+{
 10.2229 -+	struct sk_buff *segs = ERR_PTR(-EINVAL);
 10.2230 -+	struct tcphdr *th;
 10.2231 -+	unsigned thlen;
 10.2232 -+	unsigned int seq;
 10.2233 -+	unsigned int delta;
 10.2234 -+	unsigned int oldlen;
 10.2235 -+	unsigned int len;
 10.2236 -+
 10.2237 -+	if (!pskb_may_pull(skb, sizeof(*th)))
 10.2238 -+		goto out;
 10.2239 -+
 10.2240 -+	th = skb->h.th;
 10.2241 -+	thlen = th->doff * 4;
 10.2242 -+	if (thlen < sizeof(*th))
 10.2243 -+		goto out;
 10.2244 -+
 10.2245 -+	if (!pskb_may_pull(skb, thlen))
 10.2246 -+		goto out;
 10.2247 -+
 10.2248 -+	segs = NULL;
 10.2249 -+	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
 10.2250 -+		goto out;
 10.2251 -+
 10.2252 -+	oldlen = (u16)~skb->len;
 10.2253 -+	__skb_pull(skb, thlen);
 10.2254 -+
 10.2255 -+	segs = skb_segment(skb, features);
 10.2256 -+	if (IS_ERR(segs))
 10.2257 -+		goto out;
 10.2258 -+
 10.2259 -+	len = skb_shinfo(skb)->gso_size;
 10.2260 -+	delta = htonl(oldlen + (thlen + len));
 10.2261 -+
 10.2262 -+	skb = segs;
 10.2263 -+	th = skb->h.th;
 10.2264 -+	seq = ntohl(th->seq);
 10.2265 -+
 10.2266 -+	do {
 10.2267 -+		th->fin = th->psh = 0;
 10.2268 -+
 10.2269 -+		th->check = ~csum_fold(th->check + delta);
 10.2270 -+		if (skb->ip_summed != CHECKSUM_HW)
 10.2271 -+			th->check = csum_fold(csum_partial(skb->h.raw, thlen,
 10.2272 -+							   skb->csum));
 10.2273 -+
 10.2274 -+		seq += len;
 10.2275 -+		skb = skb->next;
 10.2276 -+		th = skb->h.th;
 10.2277 -+
 10.2278 -+		th->seq = htonl(seq);
 10.2279 -+		th->cwr = 0;
 10.2280 -+	} while (skb->next);
 10.2281 -+
 10.2282 -+	delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
 10.2283 -+	th->check = ~csum_fold(th->check + delta);
 10.2284 -+	if (skb->ip_summed != CHECKSUM_HW)
 10.2285 -+		th->check = csum_fold(csum_partial(skb->h.raw, thlen,
 10.2286 -+						   skb->csum));
 10.2287 -+
 10.2288 -+out:
 10.2289 -+	return segs;
 10.2290 -+}
 10.2291 -+
 10.2292 - extern void __skb_cb_too_small_for_tcp(int, int);
 10.2293 - extern struct tcp_congestion_ops tcp_reno;
 10.2294 - 
 10.2295 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp_input.c ./net/ipv4/tcp_input.c
 10.2296 ---- ../orig-linux-2.6.16.29/net/ipv4/tcp_input.c	2006-09-12 19:02:10.000000000 +0100
 10.2297 -+++ ./net/ipv4/tcp_input.c	2006-09-19 13:59:20.000000000 +0100
 10.2298 -@@ -1072,7 +1072,7 @@ tcp_sacktag_write_queue(struct sock *sk,
 10.2299 - 				else
 10.2300 - 					pkt_len = (end_seq -
 10.2301 - 						   TCP_SKB_CB(skb)->seq);
 10.2302 --				if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
 10.2303 -+				if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
 10.2304 - 					break;
 10.2305 - 				pcount = tcp_skb_pcount(skb);
 10.2306 - 			}
 10.2307 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp_output.c ./net/ipv4/tcp_output.c
 10.2308 ---- ../orig-linux-2.6.16.29/net/ipv4/tcp_output.c	2006-09-12 19:02:10.000000000 +0100
 10.2309 -+++ ./net/ipv4/tcp_output.c	2006-09-19 13:59:20.000000000 +0100
 10.2310 -@@ -497,15 +497,17 @@ static void tcp_set_skb_tso_segs(struct 
 10.2311 - 		/* Avoid the costly divide in the normal
 10.2312 - 		 * non-TSO case.
 10.2313 - 		 */
 10.2314 --		skb_shinfo(skb)->tso_segs = 1;
 10.2315 --		skb_shinfo(skb)->tso_size = 0;
 10.2316 -+		skb_shinfo(skb)->gso_segs = 1;
 10.2317 -+		skb_shinfo(skb)->gso_size = 0;
 10.2318 -+		skb_shinfo(skb)->gso_type = 0;
 10.2319 - 	} else {
 10.2320 - 		unsigned int factor;
 10.2321 - 
 10.2322 - 		factor = skb->len + (mss_now - 1);
 10.2323 - 		factor /= mss_now;
 10.2324 --		skb_shinfo(skb)->tso_segs = factor;
 10.2325 --		skb_shinfo(skb)->tso_size = mss_now;
 10.2326 -+		skb_shinfo(skb)->gso_segs = factor;
 10.2327 -+		skb_shinfo(skb)->gso_size = mss_now;
 10.2328 -+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 10.2329 - 	}
 10.2330 - }
 10.2331 - 
 10.2332 -@@ -850,7 +852,7 @@ static int tcp_init_tso_segs(struct sock
 10.2333 - 
 10.2334 - 	if (!tso_segs ||
 10.2335 - 	    (tso_segs > 1 &&
 10.2336 --	     skb_shinfo(skb)->tso_size != mss_now)) {
 10.2337 -+	     tcp_skb_mss(skb) != mss_now)) {
 10.2338 - 		tcp_set_skb_tso_segs(sk, skb, mss_now);
 10.2339 - 		tso_segs = tcp_skb_pcount(skb);
 10.2340 - 	}
 10.2341 -@@ -1510,8 +1512,9 @@ int tcp_retransmit_skb(struct sock *sk, 
 10.2342 - 	   tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
 10.2343 - 		if (!pskb_trim(skb, 0)) {
 10.2344 - 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
 10.2345 --			skb_shinfo(skb)->tso_segs = 1;
 10.2346 --			skb_shinfo(skb)->tso_size = 0;
 10.2347 -+			skb_shinfo(skb)->gso_segs = 1;
 10.2348 -+			skb_shinfo(skb)->gso_size = 0;
 10.2349 -+			skb_shinfo(skb)->gso_type = 0;
 10.2350 - 			skb->ip_summed = CHECKSUM_NONE;
 10.2351 - 			skb->csum = 0;
 10.2352 - 		}
 10.2353 -@@ -1716,8 +1719,9 @@ void tcp_send_fin(struct sock *sk)
 10.2354 - 		skb->csum = 0;
 10.2355 - 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
 10.2356 - 		TCP_SKB_CB(skb)->sacked = 0;
 10.2357 --		skb_shinfo(skb)->tso_segs = 1;
 10.2358 --		skb_shinfo(skb)->tso_size = 0;
 10.2359 -+		skb_shinfo(skb)->gso_segs = 1;
 10.2360 -+		skb_shinfo(skb)->gso_size = 0;
 10.2361 -+		skb_shinfo(skb)->gso_type = 0;
 10.2362 - 
 10.2363 - 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
 10.2364 - 		TCP_SKB_CB(skb)->seq = tp->write_seq;
 10.2365 -@@ -1749,8 +1753,9 @@ void tcp_send_active_reset(struct sock *
 10.2366 - 	skb->csum = 0;
 10.2367 - 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
 10.2368 - 	TCP_SKB_CB(skb)->sacked = 0;
 10.2369 --	skb_shinfo(skb)->tso_segs = 1;
 10.2370 --	skb_shinfo(skb)->tso_size = 0;
 10.2371 -+	skb_shinfo(skb)->gso_segs = 1;
 10.2372 -+	skb_shinfo(skb)->gso_size = 0;
 10.2373 -+	skb_shinfo(skb)->gso_type = 0;
 10.2374 - 
 10.2375 - 	/* Send it off. */
 10.2376 - 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
 10.2377 -@@ -1833,8 +1838,9 @@ struct sk_buff * tcp_make_synack(struct 
 10.2378 - 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
 10.2379 - 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
 10.2380 - 	TCP_SKB_CB(skb)->sacked = 0;
 10.2381 --	skb_shinfo(skb)->tso_segs = 1;
 10.2382 --	skb_shinfo(skb)->tso_size = 0;
 10.2383 -+	skb_shinfo(skb)->gso_segs = 1;
 10.2384 -+	skb_shinfo(skb)->gso_size = 0;
 10.2385 -+	skb_shinfo(skb)->gso_type = 0;
 10.2386 - 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
 10.2387 - 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
 10.2388 - 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
 10.2389 -@@ -1937,8 +1943,9 @@ int tcp_connect(struct sock *sk)
 10.2390 - 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
 10.2391 - 	TCP_ECN_send_syn(sk, tp, buff);
 10.2392 - 	TCP_SKB_CB(buff)->sacked = 0;
 10.2393 --	skb_shinfo(buff)->tso_segs = 1;
 10.2394 --	skb_shinfo(buff)->tso_size = 0;
 10.2395 -+	skb_shinfo(buff)->gso_segs = 1;
 10.2396 -+	skb_shinfo(buff)->gso_size = 0;
 10.2397 -+	skb_shinfo(buff)->gso_type = 0;
 10.2398 - 	buff->csum = 0;
 10.2399 - 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
 10.2400 - 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
 10.2401 -@@ -2042,8 +2049,9 @@ void tcp_send_ack(struct sock *sk)
 10.2402 - 		buff->csum = 0;
 10.2403 - 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
 10.2404 - 		TCP_SKB_CB(buff)->sacked = 0;
 10.2405 --		skb_shinfo(buff)->tso_segs = 1;
 10.2406 --		skb_shinfo(buff)->tso_size = 0;
 10.2407 -+		skb_shinfo(buff)->gso_segs = 1;
 10.2408 -+		skb_shinfo(buff)->gso_size = 0;
 10.2409 -+		skb_shinfo(buff)->gso_type = 0;
 10.2410 - 
 10.2411 - 		/* Send it off, this clears delayed acks for us. */
 10.2412 - 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
 10.2413 -@@ -2078,8 +2086,9 @@ static int tcp_xmit_probe_skb(struct soc
 10.2414 - 	skb->csum = 0;
 10.2415 - 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
 10.2416 - 	TCP_SKB_CB(skb)->sacked = urgent;
 10.2417 --	skb_shinfo(skb)->tso_segs = 1;
 10.2418 --	skb_shinfo(skb)->tso_size = 0;
 10.2419 -+	skb_shinfo(skb)->gso_segs = 1;
 10.2420 -+	skb_shinfo(skb)->gso_size = 0;
 10.2421 -+	skb_shinfo(skb)->gso_type = 0;
 10.2422 - 
 10.2423 - 	/* Use a previous sequence.  This should cause the other
 10.2424 - 	 * end to send an ack.  Don't queue or clone SKB, just
 10.2425 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
 10.2426 ---- ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c	2006-09-19 13:59:15.000000000 +0100
 10.2427 -+++ ./net/ipv4/xfrm4_output.c	2006-09-19 13:59:20.000000000 +0100
 10.2428 -@@ -9,6 +9,8 @@
 10.2429 -  */
 10.2430 - 
 10.2431 - #include <linux/compiler.h>
 10.2432 -+#include <linux/if_ether.h>
 10.2433 -+#include <linux/kernel.h>
 10.2434 - #include <linux/skbuff.h>
 10.2435 - #include <linux/spinlock.h>
 10.2436 - #include <linux/netfilter_ipv4.h>
 10.2437 -@@ -158,16 +160,10 @@ error_nolock:
 10.2438 - 	goto out_exit;
 10.2439 - }
 10.2440 - 
 10.2441 --static int xfrm4_output_finish(struct sk_buff *skb)
 10.2442 -+static int xfrm4_output_finish2(struct sk_buff *skb)
 10.2443 - {
 10.2444 - 	int err;
 10.2445 - 
 10.2446 --#ifdef CONFIG_NETFILTER
 10.2447 --	if (!skb->dst->xfrm) {
 10.2448 --		IPCB(skb)->flags |= IPSKB_REROUTED;
 10.2449 --		return dst_output(skb);
 10.2450 --	}
 10.2451 --#endif
 10.2452 - 	while (likely((err = xfrm4_output_one(skb)) == 0)) {
 10.2453 - 		nf_reset(skb);
 10.2454 - 
 10.2455 -@@ -180,7 +176,7 @@ static int xfrm4_output_finish(struct sk
 10.2456 - 			return dst_output(skb);
 10.2457 - 
 10.2458 - 		err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL,
 10.2459 --			      skb->dst->dev, xfrm4_output_finish);
 10.2460 -+			      skb->dst->dev, xfrm4_output_finish2);
 10.2461 - 		if (unlikely(err != 1))
 10.2462 - 			break;
 10.2463 - 	}
 10.2464 -@@ -188,6 +184,48 @@ static int xfrm4_output_finish(struct sk
 10.2465 - 	return err;
 10.2466 - }
 10.2467 - 
 10.2468 -+static int xfrm4_output_finish(struct sk_buff *skb)
 10.2469 -+{
 10.2470 -+	struct sk_buff *segs;
 10.2471 -+
 10.2472 -+#ifdef CONFIG_NETFILTER
 10.2473 -+	if (!skb->dst->xfrm) {
 10.2474 -+		IPCB(skb)->flags |= IPSKB_REROUTED;
 10.2475 -+		return dst_output(skb);
 10.2476 -+	}
 10.2477 -+#endif
 10.2478 -+
 10.2479 -+	if (!skb_shinfo(skb)->gso_size)
 10.2480 -+		return xfrm4_output_finish2(skb);
 10.2481 -+
 10.2482 -+	skb->protocol = htons(ETH_P_IP);
 10.2483 -+	segs = skb_gso_segment(skb, 0);
 10.2484 -+	kfree_skb(skb);
 10.2485 -+	if (unlikely(IS_ERR(segs)))
 10.2486 -+		return PTR_ERR(segs);
 10.2487 -+
 10.2488 -+	do {
 10.2489 -+		struct sk_buff *nskb = segs->next;
 10.2490 -+		int err;
 10.2491 -+
 10.2492 -+		segs->next = NULL;
 10.2493 -+		err = xfrm4_output_finish2(segs);
 10.2494 -+
 10.2495 -+		if (unlikely(err)) {
 10.2496 -+			while ((segs = nskb)) {
 10.2497 -+				nskb = segs->next;
 10.2498 -+				segs->next = NULL;
 10.2499 -+				kfree_skb(segs);
 10.2500 -+			}
 10.2501 -+			return err;
 10.2502 -+		}
 10.2503 -+
 10.2504 -+		segs = nskb;
 10.2505 -+	} while (segs);
 10.2506 -+
 10.2507 -+	return 0;
 10.2508 -+}
 10.2509 -+
 10.2510 - int xfrm4_output(struct sk_buff *skb)
 10.2511 - {
 10.2512 - 	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
 10.2513 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/ip6_output.c ./net/ipv6/ip6_output.c
 10.2514 ---- ../orig-linux-2.6.16.29/net/ipv6/ip6_output.c	2006-09-12 19:02:10.000000000 +0100
 10.2515 -+++ ./net/ipv6/ip6_output.c	2006-09-19 13:59:20.000000000 +0100
 10.2516 -@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s
 10.2517 - 
 10.2518 - int ip6_output(struct sk_buff *skb)
 10.2519 - {
 10.2520 --	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) ||
 10.2521 -+	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
 10.2522 - 				dst_allfrag(skb->dst))
 10.2523 - 		return ip6_fragment(skb, ip6_output2);
 10.2524 - 	else
 10.2525 -@@ -829,8 +829,9 @@ static inline int ip6_ufo_append_data(st
 10.2526 - 		struct frag_hdr fhdr;
 10.2527 - 
 10.2528 - 		/* specify the length of each IP datagram fragment*/
 10.2529 --		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) - 
 10.2530 --						sizeof(struct frag_hdr);
 10.2531 -+		skb_shinfo(skb)->gso_size = mtu - fragheaderlen - 
 10.2532 -+					    sizeof(struct frag_hdr);
 10.2533 -+		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
 10.2534 - 		ipv6_select_ident(skb, &fhdr);
 10.2535 - 		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
 10.2536 - 		__skb_queue_tail(&sk->sk_write_queue, skb);
 10.2537 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/ipcomp6.c ./net/ipv6/ipcomp6.c
 10.2538 ---- ../orig-linux-2.6.16.29/net/ipv6/ipcomp6.c	2006-09-12 19:02:10.000000000 +0100
 10.2539 -+++ ./net/ipv6/ipcomp6.c	2006-09-19 13:59:20.000000000 +0100
 10.2540 -@@ -64,7 +64,7 @@ static LIST_HEAD(ipcomp6_tfms_list);
 10.2541 - 
 10.2542 - static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
 10.2543 - {
 10.2544 --	int err = 0;
 10.2545 -+	int err = -ENOMEM;
 10.2546 - 	u8 nexthdr = 0;
 10.2547 - 	int hdr_len = skb->h.raw - skb->nh.raw;
 10.2548 - 	unsigned char *tmp_hdr = NULL;
 10.2549 -@@ -75,11 +75,8 @@ static int ipcomp6_input(struct xfrm_sta
 10.2550 - 	struct crypto_tfm *tfm;
 10.2551 - 	int cpu;
 10.2552 - 
 10.2553 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
 10.2554 --		skb_linearize(skb, GFP_ATOMIC) != 0) {
 10.2555 --		err = -ENOMEM;
 10.2556 -+	if (skb_linearize_cow(skb))
 10.2557 - 		goto out;
 10.2558 --	}
 10.2559 - 
 10.2560 - 	skb->ip_summed = CHECKSUM_NONE;
 10.2561 - 
 10.2562 -@@ -158,10 +155,8 @@ static int ipcomp6_output(struct xfrm_st
 10.2563 - 		goto out_ok;
 10.2564 - 	}
 10.2565 - 
 10.2566 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
 10.2567 --		skb_linearize(skb, GFP_ATOMIC) != 0) {
 10.2568 -+	if (skb_linearize_cow(skb))
 10.2569 - 		goto out_ok;
 10.2570 --	}
 10.2571 - 
 10.2572 - 	/* compression */
 10.2573 - 	plen = skb->len - hdr_len;
 10.2574 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/xfrm6_output.c ./net/ipv6/xfrm6_output.c
 10.2575 ---- ../orig-linux-2.6.16.29/net/ipv6/xfrm6_output.c	2006-09-12 19:02:10.000000000 +0100
 10.2576 -+++ ./net/ipv6/xfrm6_output.c	2006-09-19 13:59:20.000000000 +0100
 10.2577 -@@ -151,7 +151,7 @@ error_nolock:
 10.2578 - 	goto out_exit;
 10.2579 - }
 10.2580 - 
 10.2581 --static int xfrm6_output_finish(struct sk_buff *skb)
 10.2582 -+static int xfrm6_output_finish2(struct sk_buff *skb)
 10.2583 - {
 10.2584 - 	int err;
 10.2585 - 
 10.2586 -@@ -167,7 +167,7 @@ static int xfrm6_output_finish(struct sk
 10.2587 - 			return dst_output(skb);
 10.2588 - 
 10.2589 - 		err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL,
 10.2590 --			      skb->dst->dev, xfrm6_output_finish);
 10.2591 -+			      skb->dst->dev, xfrm6_output_finish2);
 10.2592 - 		if (unlikely(err != 1))
 10.2593 - 			break;
 10.2594 - 	}
 10.2595 -@@ -175,6 +175,41 @@ static int xfrm6_output_finish(struct sk
 10.2596 - 	return err;
 10.2597 - }
 10.2598 - 
 10.2599 -+static int xfrm6_output_finish(struct sk_buff *skb)
 10.2600 -+{
 10.2601 -+	struct sk_buff *segs;
 10.2602 -+
 10.2603 -+	if (!skb_shinfo(skb)->gso_size)
 10.2604 -+		return xfrm6_output_finish2(skb);
 10.2605 -+
 10.2606 -+	skb->protocol = htons(ETH_P_IP);
 10.2607 -+	segs = skb_gso_segment(skb, 0);
 10.2608 -+	kfree_skb(skb);
 10.2609 -+	if (unlikely(IS_ERR(segs)))
 10.2610 -+		return PTR_ERR(segs);
 10.2611 -+
 10.2612 -+	do {
 10.2613 -+		struct sk_buff *nskb = segs->next;
 10.2614 -+		int err;
 10.2615 -+
 10.2616 -+		segs->next = NULL;
 10.2617 -+		err = xfrm6_output_finish2(segs);
 10.2618 -+
 10.2619 -+		if (unlikely(err)) {
 10.2620 -+			while ((segs = nskb)) {
 10.2621 -+				nskb = segs->next;
 10.2622 -+				segs->next = NULL;
 10.2623 -+				kfree_skb(segs);
 10.2624 -+			}
 10.2625 -+			return err;
 10.2626 -+		}
 10.2627 -+
 10.2628 -+		segs = nskb;
 10.2629 -+	} while (segs);
 10.2630 -+
 10.2631 -+	return 0;
 10.2632 -+}
 10.2633 -+
 10.2634 - int xfrm6_output(struct sk_buff *skb)
 10.2635 - {
 10.2636 - 	return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dst->dev,
 10.2637 -diff -pruN ../orig-linux-2.6.16.29/net/sched/sch_generic.c ./net/sched/sch_generic.c
 10.2638 ---- ../orig-linux-2.6.16.29/net/sched/sch_generic.c	2006-09-12 19:02:10.000000000 +0100
 10.2639 -+++ ./net/sched/sch_generic.c	2006-09-19 13:59:20.000000000 +0100
 10.2640 -@@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device
 10.2641 -    dev->queue_lock serializes queue accesses for this device
 10.2642 -    AND dev->qdisc pointer itself.
 10.2643 - 
 10.2644 --   dev->xmit_lock serializes accesses to device driver.
 10.2645 -+   netif_tx_lock serializes accesses to device driver.
 10.2646 - 
 10.2647 --   dev->queue_lock and dev->xmit_lock are mutually exclusive,
 10.2648 -+   dev->queue_lock and netif_tx_lock are mutually exclusive,
 10.2649 -    if one is grabbed, another must be free.
 10.2650 -  */
 10.2651 - 
 10.2652 -@@ -90,14 +90,17 @@ void qdisc_unlock_tree(struct net_device
 10.2653 -    NOTE: Called under dev->queue_lock with locally disabled BH.
 10.2654 - */
 10.2655 - 
 10.2656 --int qdisc_restart(struct net_device *dev)
 10.2657 -+static inline int qdisc_restart(struct net_device *dev)
 10.2658 - {
 10.2659 - 	struct Qdisc *q = dev->qdisc;
 10.2660 - 	struct sk_buff *skb;
 10.2661 - 
 10.2662 - 	/* Dequeue packet */
 10.2663 --	if ((skb = q->dequeue(q)) != NULL) {
 10.2664 -+	if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
 10.2665 - 		unsigned nolock = (dev->features & NETIF_F_LLTX);
 10.2666 -+
 10.2667 -+		dev->gso_skb = NULL;
 10.2668 -+
 10.2669 - 		/*
 10.2670 - 		 * When the driver has LLTX set it does its own locking
 10.2671 - 		 * in start_xmit. No need to add additional overhead by
 10.2672 -@@ -108,7 +111,7 @@ int qdisc_restart(struct net_device *dev
 10.2673 - 		 * will be requeued.
 10.2674 - 		 */
 10.2675 - 		if (!nolock) {
 10.2676 --			if (!spin_trylock(&dev->xmit_lock)) {
 10.2677 -+			if (!netif_tx_trylock(dev)) {
 10.2678 - 			collision:
 10.2679 - 				/* So, someone grabbed the driver. */
 10.2680 - 				
 10.2681 -@@ -126,8 +129,6 @@ int qdisc_restart(struct net_device *dev
 10.2682 - 				__get_cpu_var(netdev_rx_stat).cpu_collision++;
 10.2683 - 				goto requeue;
 10.2684 - 			}
 10.2685 --			/* Remember that the driver is grabbed by us. */
 10.2686 --			dev->xmit_lock_owner = smp_processor_id();
 10.2687 - 		}
 10.2688 - 		
 10.2689 - 		{
 10.2690 -@@ -136,14 +137,11 @@ int qdisc_restart(struct net_device *dev
 10.2691 - 
 10.2692 - 			if (!netif_queue_stopped(dev)) {
 10.2693 - 				int ret;
 10.2694 --				if (netdev_nit)
 10.2695 --					dev_queue_xmit_nit(skb, dev);
 10.2696 - 
 10.2697 --				ret = dev->hard_start_xmit(skb, dev);
 10.2698 -+				ret = dev_hard_start_xmit(skb, dev);
 10.2699 - 				if (ret == NETDEV_TX_OK) { 
 10.2700 - 					if (!nolock) {
 10.2701 --						dev->xmit_lock_owner = -1;
 10.2702 --						spin_unlock(&dev->xmit_lock);
 10.2703 -+						netif_tx_unlock(dev);
 10.2704 - 					}
 10.2705 - 					spin_lock(&dev->queue_lock);
 10.2706 - 					return -1;
 10.2707 -@@ -157,8 +155,7 @@ int qdisc_restart(struct net_device *dev
 10.2708 - 			/* NETDEV_TX_BUSY - we need to requeue */
 10.2709 - 			/* Release the driver */
 10.2710 - 			if (!nolock) { 
 10.2711 --				dev->xmit_lock_owner = -1;
 10.2712 --				spin_unlock(&dev->xmit_lock);
 10.2713 -+				netif_tx_unlock(dev);
 10.2714 - 			} 
 10.2715 - 			spin_lock(&dev->queue_lock);
 10.2716 - 			q = dev->qdisc;
 10.2717 -@@ -175,7 +172,10 @@ int qdisc_restart(struct net_device *dev
 10.2718 - 		 */
 10.2719 - 
 10.2720 - requeue:
 10.2721 --		q->ops->requeue(skb, q);
 10.2722 -+		if (skb->next)
 10.2723 -+			dev->gso_skb = skb;
 10.2724 -+		else
 10.2725 -+			q->ops->requeue(skb, q);
 10.2726 - 		netif_schedule(dev);
 10.2727 - 		return 1;
 10.2728 - 	}
 10.2729 -@@ -183,11 +183,23 @@ requeue:
 10.2730 - 	return q->q.qlen;
 10.2731 - }
 10.2732 - 
 10.2733 -+void __qdisc_run(struct net_device *dev)
 10.2734 -+{
 10.2735 -+	if (unlikely(dev->qdisc == &noop_qdisc))
 10.2736 -+		goto out;
 10.2737 -+
 10.2738 -+	while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
 10.2739 -+		/* NOTHING */;
 10.2740 -+
 10.2741 -+out:
 10.2742 -+	clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
 10.2743 -+}
 10.2744 -+
 10.2745 - static void dev_watchdog(unsigned long arg)
 10.2746 - {
 10.2747 - 	struct net_device *dev = (struct net_device *)arg;
 10.2748 - 
 10.2749 --	spin_lock(&dev->xmit_lock);
 10.2750 -+	netif_tx_lock(dev);
 10.2751 - 	if (dev->qdisc != &noop_qdisc) {
 10.2752 - 		if (netif_device_present(dev) &&
 10.2753 - 		    netif_running(dev) &&
 10.2754 -@@ -201,7 +213,7 @@ static void dev_watchdog(unsigned long a
 10.2755 - 				dev_hold(dev);
 10.2756 - 		}
 10.2757 - 	}
 10.2758 --	spin_unlock(&dev->xmit_lock);
 10.2759 -+	netif_tx_unlock(dev);
 10.2760 - 
 10.2761 - 	dev_put(dev);
 10.2762 - }
 10.2763 -@@ -225,17 +237,17 @@ void __netdev_watchdog_up(struct net_dev
 10.2764 - 
 10.2765 - static void dev_watchdog_up(struct net_device *dev)
 10.2766 - {
 10.2767 --	spin_lock_bh(&dev->xmit_lock);
 10.2768 -+	netif_tx_lock_bh(dev);
 10.2769 - 	__netdev_watchdog_up(dev);
 10.2770 --	spin_unlock_bh(&dev->xmit_lock);
 10.2771 -+	netif_tx_unlock_bh(dev);
 10.2772 - }
 10.2773 - 
 10.2774 - static void dev_watchdog_down(struct net_device *dev)
 10.2775 - {
 10.2776 --	spin_lock_bh(&dev->xmit_lock);
 10.2777 -+	netif_tx_lock_bh(dev);
 10.2778 - 	if (del_timer(&dev->watchdog_timer))
 10.2779 - 		__dev_put(dev);
 10.2780 --	spin_unlock_bh(&dev->xmit_lock);
 10.2781 -+	netif_tx_unlock_bh(dev);
 10.2782 - }
 10.2783 - 
 10.2784 - void netif_carrier_on(struct net_device *dev)
 10.2785 -@@ -577,10 +589,17 @@ void dev_deactivate(struct net_device *d
 10.2786 - 
 10.2787 - 	dev_watchdog_down(dev);
 10.2788 - 
 10.2789 --	while (test_bit(__LINK_STATE_SCHED, &dev->state))
 10.2790 -+	/* Wait for outstanding dev_queue_xmit calls. */
 10.2791 -+	synchronize_rcu();
 10.2792 -+
 10.2793 -+	/* Wait for outstanding qdisc_run calls. */
 10.2794 -+	while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
 10.2795 - 		yield();
 10.2796 - 
 10.2797 --	spin_unlock_wait(&dev->xmit_lock);
 10.2798 -+	if (dev->gso_skb) {
 10.2799 -+		kfree_skb(dev->gso_skb);
 10.2800 -+		dev->gso_skb = NULL;
 10.2801 -+	}
 10.2802 - }
 10.2803 - 
 10.2804 - void dev_init_scheduler(struct net_device *dev)
 10.2805 -@@ -622,6 +641,5 @@ EXPORT_SYMBOL(qdisc_create_dflt);
 10.2806 - EXPORT_SYMBOL(qdisc_alloc);
 10.2807 - EXPORT_SYMBOL(qdisc_destroy);
 10.2808 - EXPORT_SYMBOL(qdisc_reset);
 10.2809 --EXPORT_SYMBOL(qdisc_restart);
 10.2810 - EXPORT_SYMBOL(qdisc_lock_tree);
 10.2811 - EXPORT_SYMBOL(qdisc_unlock_tree);
 10.2812 -diff -pruN ../orig-linux-2.6.16.29/net/sched/sch_teql.c ./net/sched/sch_teql.c
 10.2813 ---- ../orig-linux-2.6.16.29/net/sched/sch_teql.c	2006-09-12 19:02:10.000000000 +0100
 10.2814 -+++ ./net/sched/sch_teql.c	2006-09-19 13:59:20.000000000 +0100
 10.2815 -@@ -302,20 +302,17 @@ restart:
 10.2816 - 
 10.2817 - 		switch (teql_resolve(skb, skb_res, slave)) {
 10.2818 - 		case 0:
 10.2819 --			if (spin_trylock(&slave->xmit_lock)) {
 10.2820 --				slave->xmit_lock_owner = smp_processor_id();
 10.2821 -+			if (netif_tx_trylock(slave)) {
 10.2822 - 				if (!netif_queue_stopped(slave) &&
 10.2823 - 				    slave->hard_start_xmit(skb, slave) == 0) {
 10.2824 --					slave->xmit_lock_owner = -1;
 10.2825 --					spin_unlock(&slave->xmit_lock);
 10.2826 -+					netif_tx_unlock(slave);
 10.2827 - 					master->slaves = NEXT_SLAVE(q);
 10.2828 - 					netif_wake_queue(dev);
 10.2829 - 					master->stats.tx_packets++;
 10.2830 - 					master->stats.tx_bytes += len;
 10.2831 - 					return 0;
 10.2832 - 				}
 10.2833 --				slave->xmit_lock_owner = -1;
 10.2834 --				spin_unlock(&slave->xmit_lock);
 10.2835 -+				netif_tx_unlock(slave);
 10.2836 - 			}
 10.2837 - 			if (netif_queue_stopped(dev))
 10.2838 - 				busy = 1;
    11.1 --- a/patches/linux-2.6.16.30/net-gso-1-check-dodgy.patch	Mon Nov 27 13:50:01 2006 +0000
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,27 +0,0 @@
    11.4 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp.c ./net/ipv4/tcp.c
    11.5 ---- ../orig-linux-2.6.16.29/net/ipv4/tcp.c	2006-09-19 13:59:20.000000000 +0100
    11.6 -+++ ./net/ipv4/tcp.c	2006-09-19 13:59:42.000000000 +0100
    11.7 -@@ -2042,13 +2042,19 @@ struct sk_buff *tcp_tso_segment(struct s
    11.8 - 	if (!pskb_may_pull(skb, thlen))
    11.9 - 		goto out;
   11.10 - 
   11.11 --	segs = NULL;
   11.12 --	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
   11.13 --		goto out;
   11.14 --
   11.15 - 	oldlen = (u16)~skb->len;
   11.16 - 	__skb_pull(skb, thlen);
   11.17 - 
   11.18 -+	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
   11.19 -+		/* Packet is from an untrusted source, reset gso_segs. */
   11.20 -+		int mss = skb_shinfo(skb)->gso_size;
   11.21 -+
   11.22 -+		skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
   11.23 -+
   11.24 -+		segs = NULL;
   11.25 -+		goto out;
   11.26 -+	}
   11.27 -+
   11.28 - 	segs = skb_segment(skb, features);
   11.29 - 	if (IS_ERR(segs))
   11.30 - 		goto out;
    12.1 --- a/patches/linux-2.6.16.30/net-gso-2-checksum-fix.patch	Mon Nov 27 13:50:01 2006 +0000
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,451 +0,0 @@
    12.4 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/bnx2.c ./drivers/net/bnx2.c
    12.5 ---- ../orig-linux-2.6.16.29/drivers/net/bnx2.c	2006-09-19 13:59:20.000000000 +0100
    12.6 -+++ ./drivers/net/bnx2.c	2006-09-19 13:59:46.000000000 +0100
    12.7 -@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
    12.8 - 		skb = tx_buf->skb;
    12.9 - #ifdef BCM_TSO 
   12.10 - 		/* partial BD completions possible with TSO packets */
   12.11 --		if (skb_shinfo(skb)->gso_size) {
   12.12 -+		if (skb_is_gso(skb)) {
   12.13 - 			u16 last_idx, last_ring_idx;
   12.14 - 
   12.15 - 			last_idx = sw_cons +
   12.16 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c ./drivers/net/chelsio/sge.c
   12.17 ---- ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c	2006-09-19 13:59:20.000000000 +0100
   12.18 -+++ ./drivers/net/chelsio/sge.c	2006-09-19 13:59:46.000000000 +0100
   12.19 -@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
   12.20 - 	struct cpl_tx_pkt *cpl;
   12.21 - 
   12.22 - #ifdef NETIF_F_TSO
   12.23 --	if (skb_shinfo(skb)->gso_size) {
   12.24 -+	if (skb_is_gso(skb)) {
   12.25 - 		int eth_type;
   12.26 - 		struct cpl_tx_pkt_lso *hdr;
   12.27 - 
   12.28 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c ./drivers/net/e1000/e1000_main.c
   12.29 ---- ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c	2006-09-19 13:59:20.000000000 +0100
   12.30 -+++ ./drivers/net/e1000/e1000_main.c	2006-09-19 13:59:46.000000000 +0100
   12.31 -@@ -2526,7 +2526,7 @@ e1000_tso(struct e1000_adapter *adapter,
   12.32 - 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
   12.33 - 	int err;
   12.34 - 
   12.35 --	if (skb_shinfo(skb)->gso_size) {
   12.36 -+	if (skb_is_gso(skb)) {
   12.37 - 		if (skb_header_cloned(skb)) {
   12.38 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   12.39 - 			if (err)
   12.40 -@@ -2651,7 +2651,7 @@ e1000_tx_map(struct e1000_adapter *adapt
   12.41 - 		 * tso gets written back prematurely before the data is fully
   12.42 - 		 * DMAd to the controller */
   12.43 - 		if (!skb->data_len && tx_ring->last_tx_tso &&
   12.44 --				!skb_shinfo(skb)->gso_size) {
   12.45 -+		    !skb_is_gso(skb)) {
   12.46 - 			tx_ring->last_tx_tso = 0;
   12.47 - 			size -= 4;
   12.48 - 		}
   12.49 -@@ -2934,8 +2934,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
   12.50 - 
   12.51 - #ifdef NETIF_F_TSO
   12.52 - 	/* Controller Erratum workaround */
   12.53 --	if (!skb->data_len && tx_ring->last_tx_tso &&
   12.54 --		!skb_shinfo(skb)->gso_size)
   12.55 -+	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
   12.56 - 		count++;
   12.57 - #endif
   12.58 - 
   12.59 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/forcedeth.c ./drivers/net/forcedeth.c
   12.60 ---- ../orig-linux-2.6.16.29/drivers/net/forcedeth.c	2006-09-19 13:59:20.000000000 +0100
   12.61 -+++ ./drivers/net/forcedeth.c	2006-09-19 13:59:46.000000000 +0100
   12.62 -@@ -1105,7 +1105,7 @@ static int nv_start_xmit(struct sk_buff 
   12.63 - 	np->tx_skbuff[nr] = skb;
   12.64 - 
   12.65 - #ifdef NETIF_F_TSO
   12.66 --	if (skb_shinfo(skb)->gso_size)
   12.67 -+	if (skb_is_gso(skb))
   12.68 - 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
   12.69 - 	else
   12.70 - #endif
   12.71 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c ./drivers/net/ixgb/ixgb_main.c
   12.72 ---- ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c	2006-09-19 13:59:20.000000000 +0100
   12.73 -+++ ./drivers/net/ixgb/ixgb_main.c	2006-09-19 13:59:46.000000000 +0100
   12.74 -@@ -1163,7 +1163,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
   12.75 - 	uint16_t ipcse, tucse, mss;
   12.76 - 	int err;
   12.77 - 
   12.78 --	if(likely(skb_shinfo(skb)->gso_size)) {
   12.79 -+	if (likely(skb_is_gso(skb))) {
   12.80 - 		if (skb_header_cloned(skb)) {
   12.81 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   12.82 - 			if (err)
   12.83 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/loopback.c ./drivers/net/loopback.c
   12.84 ---- ../orig-linux-2.6.16.29/drivers/net/loopback.c	2006-09-19 13:59:20.000000000 +0100
   12.85 -+++ ./drivers/net/loopback.c	2006-09-19 13:59:46.000000000 +0100
   12.86 -@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff 
   12.87 - #endif
   12.88 - 
   12.89 - #ifdef LOOPBACK_TSO
   12.90 --	if (skb_shinfo(skb)->gso_size) {
   12.91 -+	if (skb_is_gso(skb)) {
   12.92 - 		BUG_ON(skb->protocol != htons(ETH_P_IP));
   12.93 - 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
   12.94 - 
   12.95 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/sky2.c ./drivers/net/sky2.c
   12.96 ---- ../orig-linux-2.6.16.29/drivers/net/sky2.c	2006-09-19 13:59:20.000000000 +0100
   12.97 -+++ ./drivers/net/sky2.c	2006-09-19 13:59:46.000000000 +0100
   12.98 -@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
   12.99 - 	count = sizeof(dma_addr_t) / sizeof(u32);
  12.100 - 	count += skb_shinfo(skb)->nr_frags * count;
  12.101 - 
  12.102 --	if (skb_shinfo(skb)->gso_size)
  12.103 -+	if (skb_is_gso(skb))
  12.104 - 		++count;
  12.105 - 
  12.106 - 	if (skb->ip_summed == CHECKSUM_HW)
  12.107 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/typhoon.c ./drivers/net/typhoon.c
  12.108 ---- ../orig-linux-2.6.16.29/drivers/net/typhoon.c	2006-09-19 13:59:20.000000000 +0100
  12.109 -+++ ./drivers/net/typhoon.c	2006-09-19 13:59:46.000000000 +0100
  12.110 -@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, st
  12.111 - 	 * If problems develop with TSO, check this first.
  12.112 - 	 */
  12.113 - 	numDesc = skb_shinfo(skb)->nr_frags + 1;
  12.114 --	if(skb_tso_size(skb))
  12.115 -+	if (skb_is_gso(skb))
  12.116 - 		numDesc++;
  12.117 - 
  12.118 - 	/* When checking for free space in the ring, we need to also
  12.119 -@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, st
  12.120 - 				TYPHOON_TX_PF_VLAN_TAG_SHIFT);
  12.121 - 	}
  12.122 - 
  12.123 --	if(skb_tso_size(skb)) {
  12.124 -+	if (skb_is_gso(skb)) {
  12.125 - 		first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
  12.126 - 		first_txd->numDesc++;
  12.127 - 
  12.128 -diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c ./drivers/s390/net/qeth_main.c
  12.129 ---- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c	2006-09-19 13:59:20.000000000 +0100
  12.130 -+++ ./drivers/s390/net/qeth_main.c	2006-09-19 13:59:46.000000000 +0100
  12.131 -@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
  12.132 - 	queue = card->qdio.out_qs
  12.133 - 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
  12.134 - 
  12.135 --	if (skb_shinfo(skb)->gso_size)
  12.136 -+	if (skb_is_gso(skb))
  12.137 - 		large_send = card->options.large_send;
  12.138 - 
  12.139 - 	/*are we able to do TSO ? If so ,prepare and send it from here */
  12.140 -@@ -4501,8 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
  12.141 - 		card->stats.tx_packets++;
  12.142 - 		card->stats.tx_bytes += skb->len;
  12.143 - #ifdef CONFIG_QETH_PERF_STATS
  12.144 --		if (skb_shinfo(skb)->gso_size &&
  12.145 --		   !(large_send == QETH_LARGE_SEND_NO)) {
  12.146 -+		if (skb_is_gso(skb) && !(large_send == QETH_LARGE_SEND_NO)) {
  12.147 - 			card->perf_stats.large_send_bytes += skb->len;
  12.148 - 			card->perf_stats.large_send_cnt++;
  12.149 - 		}
  12.150 -diff -pruN ../orig-linux-2.6.16.29/include/linux/netdevice.h ./include/linux/netdevice.h
  12.151 ---- ../orig-linux-2.6.16.29/include/linux/netdevice.h	2006-09-19 13:59:20.000000000 +0100
  12.152 -+++ ./include/linux/netdevice.h	2006-09-19 13:59:46.000000000 +0100
  12.153 -@@ -541,6 +541,7 @@ struct packet_type {
  12.154 - 					 struct net_device *);
  12.155 - 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
  12.156 - 						int features);
  12.157 -+	int			(*gso_send_check)(struct sk_buff *skb);
  12.158 - 	void			*af_packet_priv;
  12.159 - 	struct list_head	list;
  12.160 - };
  12.161 -@@ -1001,14 +1002,15 @@ extern void linkwatch_run_queue(void);
  12.162 - 
  12.163 - static inline int skb_gso_ok(struct sk_buff *skb, int features)
  12.164 - {
  12.165 --	int feature = skb_shinfo(skb)->gso_size ?
  12.166 --		      skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
  12.167 -+	int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT;
  12.168 - 	return (features & feature) == feature;
  12.169 - }
  12.170 - 
  12.171 - static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  12.172 - {
  12.173 --	return !skb_gso_ok(skb, dev->features);
  12.174 -+	return skb_is_gso(skb) &&
  12.175 -+	       (!skb_gso_ok(skb, dev->features) ||
  12.176 -+		unlikely(skb->ip_summed != CHECKSUM_HW));
  12.177 - }
  12.178 - 
  12.179 - #endif /* __KERNEL__ */
  12.180 -diff -pruN ../orig-linux-2.6.16.29/include/linux/skbuff.h ./include/linux/skbuff.h
  12.181 ---- ../orig-linux-2.6.16.29/include/linux/skbuff.h	2006-09-19 13:59:20.000000000 +0100
  12.182 -+++ ./include/linux/skbuff.h	2006-09-19 13:59:46.000000000 +0100
  12.183 -@@ -1403,5 +1403,10 @@ static inline void nf_bridge_get(struct 
  12.184 - static inline void nf_reset(struct sk_buff *skb) {}
  12.185 - #endif /* CONFIG_NETFILTER */
  12.186 - 
  12.187 -+static inline int skb_is_gso(const struct sk_buff *skb)
  12.188 -+{
  12.189 -+	return skb_shinfo(skb)->gso_size;
  12.190 -+}
  12.191 -+
  12.192 - #endif	/* __KERNEL__ */
  12.193 - #endif	/* _LINUX_SKBUFF_H */
  12.194 -diff -pruN ../orig-linux-2.6.16.29/include/net/protocol.h ./include/net/protocol.h
  12.195 ---- ../orig-linux-2.6.16.29/include/net/protocol.h	2006-09-19 13:59:20.000000000 +0100
  12.196 -+++ ./include/net/protocol.h	2006-09-19 13:59:46.000000000 +0100
  12.197 -@@ -37,6 +37,7 @@
  12.198 - struct net_protocol {
  12.199 - 	int			(*handler)(struct sk_buff *skb);
  12.200 - 	void			(*err_handler)(struct sk_buff *skb, u32 info);
  12.201 -+	int			(*gso_send_check)(struct sk_buff *skb);
  12.202 - 	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
  12.203 - 					       int features);
  12.204 - 	int			no_policy;
  12.205 -diff -pruN ../orig-linux-2.6.16.29/include/net/tcp.h ./include/net/tcp.h
  12.206 ---- ../orig-linux-2.6.16.29/include/net/tcp.h	2006-09-19 13:59:20.000000000 +0100
  12.207 -+++ ./include/net/tcp.h	2006-09-19 13:59:46.000000000 +0100
  12.208 -@@ -1063,6 +1063,7 @@ extern struct request_sock_ops tcp_reque
  12.209 - 
  12.210 - extern int tcp_v4_destroy_sock(struct sock *sk);
  12.211 - 
  12.212 -+extern int tcp_v4_gso_send_check(struct sk_buff *skb);
  12.213 - extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
  12.214 - 
  12.215 - #ifdef CONFIG_PROC_FS
  12.216 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_forward.c ./net/bridge/br_forward.c
  12.217 ---- ../orig-linux-2.6.16.29/net/bridge/br_forward.c	2006-09-19 13:59:20.000000000 +0100
  12.218 -+++ ./net/bridge/br_forward.c	2006-09-19 13:59:46.000000000 +0100
  12.219 -@@ -32,7 +32,7 @@ static inline int should_deliver(const s
  12.220 - int br_dev_queue_push_xmit(struct sk_buff *skb)
  12.221 - {
  12.222 - 	/* drop mtu oversized packets except tso */
  12.223 --	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
  12.224 -+	if (skb->len > skb->dev->mtu && !skb_is_gso(skb))
  12.225 - 		kfree_skb(skb);
  12.226 - 	else {
  12.227 - #ifdef CONFIG_BRIDGE_NETFILTER
  12.228 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c ./net/bridge/br_netfilter.c
  12.229 ---- ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c	2006-09-19 13:59:20.000000000 +0100
  12.230 -+++ ./net/bridge/br_netfilter.c	2006-09-19 13:59:46.000000000 +0100
  12.231 -@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
  12.232 - {
  12.233 - 	if (skb->protocol == htons(ETH_P_IP) &&
  12.234 - 	    skb->len > skb->dev->mtu &&
  12.235 --	    !skb_shinfo(skb)->gso_size)
  12.236 -+	    !skb_is_gso(skb))
  12.237 - 		return ip_fragment(skb, br_dev_queue_push_xmit);
  12.238 - 	else
  12.239 - 		return br_dev_queue_push_xmit(skb);
  12.240 -diff -pruN ../orig-linux-2.6.16.29/net/core/dev.c ./net/core/dev.c
  12.241 ---- ../orig-linux-2.6.16.29/net/core/dev.c	2006-09-19 13:59:20.000000000 +0100
  12.242 -+++ ./net/core/dev.c	2006-09-19 13:59:46.000000000 +0100
  12.243 -@@ -1083,9 +1083,17 @@ int skb_checksum_help(struct sk_buff *sk
  12.244 - 	unsigned int csum;
  12.245 - 	int ret = 0, offset = skb->h.raw - skb->data;
  12.246 - 
  12.247 --	if (inward) {
  12.248 --		skb->ip_summed = CHECKSUM_NONE;
  12.249 --		goto out;
  12.250 -+	if (inward)
  12.251 -+		goto out_set_summed;
  12.252 -+
  12.253 -+	if (unlikely(skb_shinfo(skb)->gso_size)) {
  12.254 -+		static int warned;
  12.255 -+
  12.256 -+		WARN_ON(!warned);
  12.257 -+		warned = 1;
  12.258 -+
  12.259 -+		/* Let GSO fix up the checksum. */
  12.260 -+		goto out_set_summed;
  12.261 - 	}
  12.262 - 
  12.263 - 	if (skb_cloned(skb)) {
  12.264 -@@ -1102,6 +1110,8 @@ int skb_checksum_help(struct sk_buff *sk
  12.265 - 	BUG_ON(skb->csum + 2 > offset);
  12.266 - 
  12.267 - 	*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
  12.268 -+
  12.269 -+out_set_summed:
  12.270 - 	skb->ip_summed = CHECKSUM_NONE;
  12.271 - out:	
  12.272 - 	return ret;
  12.273 -@@ -1122,17 +1132,35 @@ struct sk_buff *skb_gso_segment(struct s
  12.274 - 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  12.275 - 	struct packet_type *ptype;
  12.276 - 	int type = skb->protocol;
  12.277 -+	int err;
  12.278 - 
  12.279 - 	BUG_ON(skb_shinfo(skb)->frag_list);
  12.280 --	BUG_ON(skb->ip_summed != CHECKSUM_HW);
  12.281 - 
  12.282 - 	skb->mac.raw = skb->data;
  12.283 - 	skb->mac_len = skb->nh.raw - skb->data;
  12.284 - 	__skb_pull(skb, skb->mac_len);
  12.285 - 
  12.286 -+	if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
  12.287 -+		static int warned;
  12.288 -+
  12.289 -+		WARN_ON(!warned);
  12.290 -+		warned = 1;
  12.291 -+
  12.292 -+		if (skb_header_cloned(skb) &&
  12.293 -+		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
  12.294 -+			return ERR_PTR(err);
  12.295 -+	}
  12.296 -+
  12.297 - 	rcu_read_lock();
  12.298 - 	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
  12.299 - 		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
  12.300 -+			if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
  12.301 -+				err = ptype->gso_send_check(skb);
  12.302 -+				segs = ERR_PTR(err);
  12.303 -+				if (err || skb_gso_ok(skb, features))
  12.304 -+					break;
  12.305 -+				__skb_push(skb, skb->data - skb->nh.raw);
  12.306 -+			}
  12.307 - 			segs = ptype->gso_segment(skb, features);
  12.308 - 			break;
  12.309 - 		}
  12.310 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/af_inet.c ./net/ipv4/af_inet.c
  12.311 ---- ../orig-linux-2.6.16.29/net/ipv4/af_inet.c	2006-09-19 13:59:20.000000000 +0100
  12.312 -+++ ./net/ipv4/af_inet.c	2006-09-19 13:59:46.000000000 +0100
  12.313 -@@ -1085,6 +1085,40 @@ int inet_sk_rebuild_header(struct sock *
  12.314 - 
  12.315 - EXPORT_SYMBOL(inet_sk_rebuild_header);
  12.316 - 
  12.317 -+static int inet_gso_send_check(struct sk_buff *skb)
  12.318 -+{
  12.319 -+	struct iphdr *iph;
  12.320 -+	struct net_protocol *ops;
  12.321 -+	int proto;
  12.322 -+	int ihl;
  12.323 -+	int err = -EINVAL;
  12.324 -+
  12.325 -+	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
  12.326 -+		goto out;
  12.327 -+
  12.328 -+	iph = skb->nh.iph;
  12.329 -+	ihl = iph->ihl * 4;
  12.330 -+	if (ihl < sizeof(*iph))
  12.331 -+		goto out;
  12.332 -+
  12.333 -+	if (unlikely(!pskb_may_pull(skb, ihl)))
  12.334 -+		goto out;
  12.335 -+
  12.336 -+	skb->h.raw = __skb_pull(skb, ihl);
  12.337 -+	iph = skb->nh.iph;
  12.338 -+	proto = iph->protocol & (MAX_INET_PROTOS - 1);
  12.339 -+	err = -EPROTONOSUPPORT;
  12.340 -+
  12.341 -+	rcu_read_lock();
  12.342 -+	ops = rcu_dereference(inet_protos[proto]);
  12.343 -+	if (likely(ops && ops->gso_send_check))
  12.344 -+		err = ops->gso_send_check(skb);
  12.345 -+	rcu_read_unlock();
  12.346 -+
  12.347 -+out:
  12.348 -+	return err;
  12.349 -+}
  12.350 -+
  12.351 - static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
  12.352 - {
  12.353 - 	struct sk_buff *segs = ERR_PTR(-EINVAL);
  12.354 -@@ -1142,6 +1176,7 @@ static struct net_protocol igmp_protocol
  12.355 - static struct net_protocol tcp_protocol = {
  12.356 - 	.handler =	tcp_v4_rcv,
  12.357 - 	.err_handler =	tcp_v4_err,
  12.358 -+	.gso_send_check = tcp_v4_gso_send_check,
  12.359 - 	.gso_segment =	tcp_tso_segment,
  12.360 - 	.no_policy =	1,
  12.361 - };
  12.362 -@@ -1188,6 +1223,7 @@ static int ipv4_proc_init(void);
  12.363 - static struct packet_type ip_packet_type = {
  12.364 - 	.type = __constant_htons(ETH_P_IP),
  12.365 - 	.func = ip_rcv,
  12.366 -+	.gso_send_check = inet_gso_send_check,
  12.367 - 	.gso_segment = inet_gso_segment,
  12.368 - };
  12.369 - 
  12.370 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/ip_output.c ./net/ipv4/ip_output.c
  12.371 ---- ../orig-linux-2.6.16.29/net/ipv4/ip_output.c	2006-09-19 13:59:20.000000000 +0100
  12.372 -+++ ./net/ipv4/ip_output.c	2006-09-19 13:59:46.000000000 +0100
  12.373 -@@ -210,7 +210,7 @@ static inline int ip_finish_output(struc
  12.374 - 		return dst_output(skb);
  12.375 - 	}
  12.376 - #endif
  12.377 --	if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
  12.378 -+	if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
  12.379 - 		return ip_fragment(skb, ip_finish_output2);
  12.380 - 	else
  12.381 - 		return ip_finish_output2(skb);
  12.382 -@@ -1095,7 +1095,7 @@ ssize_t	ip_append_page(struct sock *sk, 
  12.383 - 	while (size > 0) {
  12.384 - 		int i;
  12.385 - 
  12.386 --		if (skb_shinfo(skb)->gso_size)
  12.387 -+		if (skb_is_gso(skb))
  12.388 - 			len = size;
  12.389 - 		else {
  12.390 - 
  12.391 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp_ipv4.c ./net/ipv4/tcp_ipv4.c
  12.392 ---- ../orig-linux-2.6.16.29/net/ipv4/tcp_ipv4.c	2006-09-12 19:02:10.000000000 +0100
  12.393 -+++ ./net/ipv4/tcp_ipv4.c	2006-09-19 13:59:46.000000000 +0100
  12.394 -@@ -495,6 +495,24 @@ void tcp_v4_send_check(struct sock *sk, 
  12.395 - 	}
  12.396 - }
  12.397 - 
  12.398 -+int tcp_v4_gso_send_check(struct sk_buff *skb)
  12.399 -+{
  12.400 -+	struct iphdr *iph;
  12.401 -+	struct tcphdr *th;
  12.402 -+
  12.403 -+	if (!pskb_may_pull(skb, sizeof(*th)))
  12.404 -+		return -EINVAL;
  12.405 -+
  12.406 -+	iph = skb->nh.iph;
  12.407 -+	th = skb->h.th;
  12.408 -+
  12.409 -+	th->check = 0;
  12.410 -+	th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
  12.411 -+	skb->csum = offsetof(struct tcphdr, check);
  12.412 -+	skb->ip_summed = CHECKSUM_HW;
  12.413 -+	return 0;
  12.414 -+}
  12.415 -+
  12.416 - /*
  12.417 -  *	This routine will send an RST to the other tcp.
  12.418 -  *
  12.419 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
  12.420 ---- ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c	2006-09-19 13:59:20.000000000 +0100
  12.421 -+++ ./net/ipv4/xfrm4_output.c	2006-09-19 13:59:46.000000000 +0100
  12.422 -@@ -195,7 +195,7 @@ static int xfrm4_output_finish(struct sk
  12.423 - 	}
  12.424 - #endif
  12.425 - 
  12.426 --	if (!skb_shinfo(skb)->gso_size)
  12.427 -+	if (!skb_is_gso(skb))
  12.428 - 		return xfrm4_output_finish2(skb);
  12.429 - 
  12.430 - 	skb->protocol = htons(ETH_P_IP);
  12.431 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/ip6_output.c ./net/ipv6/ip6_output.c
  12.432 ---- ../orig-linux-2.6.16.29/net/ipv6/ip6_output.c	2006-09-19 13:59:20.000000000 +0100
  12.433 -+++ ./net/ipv6/ip6_output.c	2006-09-19 13:59:46.000000000 +0100
  12.434 -@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s
  12.435 - 
  12.436 - int ip6_output(struct sk_buff *skb)
  12.437 - {
  12.438 --	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
  12.439 -+	if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
  12.440 - 				dst_allfrag(skb->dst))
  12.441 - 		return ip6_fragment(skb, ip6_output2);
  12.442 - 	else
  12.443 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/xfrm6_output.c ./net/ipv6/xfrm6_output.c
  12.444 ---- ../orig-linux-2.6.16.29/net/ipv6/xfrm6_output.c	2006-09-19 13:59:20.000000000 +0100
  12.445 -+++ ./net/ipv6/xfrm6_output.c	2006-09-19 13:59:46.000000000 +0100
  12.446 -@@ -179,7 +179,7 @@ static int xfrm6_output_finish(struct sk
  12.447 - {
  12.448 - 	struct sk_buff *segs;
  12.449 - 
  12.450 --	if (!skb_shinfo(skb)->gso_size)
  12.451 -+	if (!skb_is_gso(skb))
  12.452 - 		return xfrm6_output_finish2(skb);
  12.453 - 
  12.454 - 	skb->protocol = htons(ETH_P_IP);
    13.1 --- a/patches/linux-2.6.16.30/net-gso-3-fix-errorcheck.patch	Mon Nov 27 13:50:01 2006 +0000
    13.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.3 @@ -1,17 +0,0 @@
    13.4 -diff -pruN ../orig-linux-2.6.16.29/include/linux/netdevice.h ./include/linux/netdevice.h
    13.5 ---- ../orig-linux-2.6.16.29/include/linux/netdevice.h	2006-09-19 13:59:46.000000000 +0100
    13.6 -+++ ./include/linux/netdevice.h	2006-09-19 14:05:28.000000000 +0100
    13.7 -@@ -930,10 +930,10 @@ static inline void netif_tx_lock_bh(stru
    13.8 - 
    13.9 - static inline int netif_tx_trylock(struct net_device *dev)
   13.10 - {
   13.11 --	int err = spin_trylock(&dev->_xmit_lock);
   13.12 --	if (!err)
   13.13 -+	int ok = spin_trylock(&dev->_xmit_lock);
   13.14 -+	if (likely(ok))
   13.15 - 		dev->xmit_lock_owner = smp_processor_id();
   13.16 --	return err;
   13.17 -+	return ok;
   13.18 - }
   13.19 - 
   13.20 - static inline void netif_tx_unlock(struct net_device *dev)
    14.1 --- a/patches/linux-2.6.16.30/net-gso-4-kill-warnon.patch	Mon Nov 27 13:50:01 2006 +0000
    14.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.3 @@ -1,27 +0,0 @@
    14.4 -diff -pruN ../orig-linux-2.6.16.29/net/core/dev.c ./net/core/dev.c
    14.5 ---- ../orig-linux-2.6.16.29/net/core/dev.c	2006-09-19 13:59:46.000000000 +0100
    14.6 -+++ ./net/core/dev.c	2006-09-19 14:05:32.000000000 +0100
    14.7 -@@ -1087,11 +1087,6 @@ int skb_checksum_help(struct sk_buff *sk
    14.8 - 		goto out_set_summed;
    14.9 - 
   14.10 - 	if (unlikely(skb_shinfo(skb)->gso_size)) {
   14.11 --		static int warned;
   14.12 --
   14.13 --		WARN_ON(!warned);
   14.14 --		warned = 1;
   14.15 --
   14.16 - 		/* Let GSO fix up the checksum. */
   14.17 - 		goto out_set_summed;
   14.18 - 	}
   14.19 -@@ -1141,11 +1136,6 @@ struct sk_buff *skb_gso_segment(struct s
   14.20 - 	__skb_pull(skb, skb->mac_len);
   14.21 - 
   14.22 - 	if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
   14.23 --		static int warned;
   14.24 --
   14.25 --		WARN_ON(!warned);
   14.26 --		warned = 1;
   14.27 --
   14.28 - 		if (skb_header_cloned(skb) &&
   14.29 - 		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
   14.30 - 			return ERR_PTR(err);
    15.1 --- a/patches/linux-2.6.16.30/net-gso-5-rcv-mss.patch	Mon Nov 27 13:50:01 2006 +0000
    15.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.3 @@ -1,13 +0,0 @@
    15.4 -diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
    15.5 -index 104af5d..1fa1536 100644
    15.6 ---- a/net/ipv4/tcp_input.c
    15.7 -+++ b/net/ipv4/tcp_input.c
    15.8 -@@ -127,7 +127,7 @@ static void tcp_measure_rcv_mss(struct s
    15.9 - 	/* skb->len may jitter because of SACKs, even if peer
   15.10 - 	 * sends good full-sized frames.
   15.11 - 	 */
   15.12 --	len = skb->len;
   15.13 -+	len = skb_shinfo(skb)->gso_size ?: skb->len;
   15.14 - 	if (len >= icsk->icsk_ack.rcv_mss) {
   15.15 - 		icsk->icsk_ack.rcv_mss = len;
   15.16 - 	} else {
    16.1 --- a/patches/linux-2.6.16.30/pci-mmconfig-fix-from-2.6.17.patch	Mon Nov 27 13:50:01 2006 +0000
    16.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.3 @@ -1,292 +0,0 @@
    16.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/pci/mmconfig.c ./arch/i386/pci/mmconfig.c
    16.5 ---- ../orig-linux-2.6.16.29/arch/i386/pci/mmconfig.c	2006-09-12 19:02:10.000000000 +0100
    16.6 -+++ ./arch/i386/pci/mmconfig.c	2006-09-21 09:35:27.000000000 +0100
    16.7 -@@ -12,14 +12,22 @@
    16.8 - #include <linux/pci.h>
    16.9 - #include <linux/init.h>
   16.10 - #include <linux/acpi.h>
   16.11 -+#include <asm/e820.h>
   16.12 - #include "pci.h"
   16.13 - 
   16.14 -+/* aperture is up to 256MB but BIOS may reserve less */
   16.15 -+#define MMCONFIG_APER_MIN	(2 * 1024*1024)
   16.16 -+#define MMCONFIG_APER_MAX	(256 * 1024*1024)
   16.17 -+
   16.18 -+/* Assume systems with more busses have correct MCFG */
   16.19 -+#define MAX_CHECK_BUS 16
   16.20 -+
   16.21 - #define mmcfg_virt_addr ((void __iomem *) fix_to_virt(FIX_PCIE_MCFG))
   16.22 - 
   16.23 - /* The base address of the last MMCONFIG device accessed */
   16.24 - static u32 mmcfg_last_accessed_device;
   16.25 - 
   16.26 --static DECLARE_BITMAP(fallback_slots, 32);
   16.27 -+static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32);
   16.28 - 
   16.29 - /*
   16.30 -  * Functions for accessing PCI configuration space with MMCONFIG accesses
   16.31 -@@ -29,8 +37,8 @@ static u32 get_base_addr(unsigned int se
   16.32 - 	int cfg_num = -1;
   16.33 - 	struct acpi_table_mcfg_config *cfg;
   16.34 - 
   16.35 --	if (seg == 0 && bus == 0 &&
   16.36 --	    test_bit(PCI_SLOT(devfn), fallback_slots))
   16.37 -+	if (seg == 0 && bus < MAX_CHECK_BUS &&
   16.38 -+	    test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots))
   16.39 - 		return 0;
   16.40 - 
   16.41 - 	while (1) {
   16.42 -@@ -74,8 +82,10 @@ static int pci_mmcfg_read(unsigned int s
   16.43 - 	unsigned long flags;
   16.44 - 	u32 base;
   16.45 - 
   16.46 --	if (!value || (bus > 255) || (devfn > 255) || (reg > 4095))
   16.47 -+	if ((bus > 255) || (devfn > 255) || (reg > 4095)) {
   16.48 -+		*value = -1;
   16.49 - 		return -EINVAL;
   16.50 -+	}
   16.51 - 
   16.52 - 	base = get_base_addr(seg, bus, devfn);
   16.53 - 	if (!base)
   16.54 -@@ -146,30 +156,66 @@ static struct pci_raw_ops pci_mmcfg = {
   16.55 -    Normally this can be expressed in the MCFG by not listing them
   16.56 -    and assigning suitable _SEGs, but this isn't implemented in some BIOS.
   16.57 -    Instead try to discover all devices on bus 0 that are unreachable using MM
   16.58 --   and fallback for them.
   16.59 --   We only do this for bus 0/seg 0 */
   16.60 -+   and fallback for them. */
   16.61 - static __init void unreachable_devices(void)
   16.62 - {
   16.63 --	int i;
   16.64 -+	int i, k;
   16.65 - 	unsigned long flags;
   16.66 - 
   16.67 --	for (i = 0; i < 32; i++) {
   16.68 --		u32 val1;
   16.69 --		u32 addr;
   16.70 -+	for (k = 0; k < MAX_CHECK_BUS; k++) {
   16.71 -+		for (i = 0; i < 32; i++) {
   16.72 -+			u32 val1;
   16.73 -+			u32 addr;
   16.74 -+
   16.75 -+			pci_conf1_read(0, k, PCI_DEVFN(i, 0), 0, 4, &val1);
   16.76 -+			if (val1 == 0xffffffff)
   16.77 -+				continue;
   16.78 -+
   16.79 -+			/* Locking probably not needed, but safer */
   16.80 -+			spin_lock_irqsave(&pci_config_lock, flags);
   16.81 -+			addr = get_base_addr(0, k, PCI_DEVFN(i, 0));
   16.82 -+			if (addr != 0)
   16.83 -+				pci_exp_set_dev_base(addr, k, PCI_DEVFN(i, 0));
   16.84 -+			if (addr == 0 ||
   16.85 -+			    readl((u32 __iomem *)mmcfg_virt_addr) != val1) {
   16.86 -+				set_bit(i, fallback_slots);
   16.87 -+				printk(KERN_NOTICE
   16.88 -+			"PCI: No mmconfig possible on %x:%x\n", k, i);
   16.89 -+			}
   16.90 -+			spin_unlock_irqrestore(&pci_config_lock, flags);
   16.91 -+		}
   16.92 -+	}
   16.93 -+}
   16.94 - 
   16.95 --		pci_conf1_read(0, 0, PCI_DEVFN(i, 0), 0, 4, &val1);
   16.96 --		if (val1 == 0xffffffff)
   16.97 -+/* NB. Ripped from arch/i386/kernel/setup.c for this Xen bugfix patch. */
   16.98 -+#ifdef CONFIG_XEN
   16.99 -+extern struct e820map machine_e820;
  16.100 -+#define e820 machine_e820
  16.101 -+#endif
  16.102 -+static int __init
  16.103 -+e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
  16.104 -+{
  16.105 -+	u64 start = s;
  16.106 -+	u64 end = e;
  16.107 -+	int i;
  16.108 -+	for (i = 0; i < e820.nr_map; i++) {
  16.109 -+		struct e820entry *ei = &e820.map[i];
  16.110 -+		if (type && ei->type != type)
  16.111 - 			continue;
  16.112 --
  16.113 --		/* Locking probably not needed, but safer */
  16.114 --		spin_lock_irqsave(&pci_config_lock, flags);
  16.115 --		addr = get_base_addr(0, 0, PCI_DEVFN(i, 0));
  16.116 --		if (addr != 0)
  16.117 --			pci_exp_set_dev_base(addr, 0, PCI_DEVFN(i, 0));
  16.118 --		if (addr == 0 || readl((u32 __iomem *)mmcfg_virt_addr) != val1)
  16.119 --			set_bit(i, fallback_slots);
  16.120 --		spin_unlock_irqrestore(&pci_config_lock, flags);
  16.121 -+		/* is the region (part) in overlap with the current region ?*/
  16.122 -+		if (ei->addr >= end || ei->addr + ei->size <= start)
  16.123 -+			continue;
  16.124 -+		/* if the region is at the beginning of <start,end> we move
  16.125 -+		 * start to the end of the region since it's ok until there
  16.126 -+		 */
  16.127 -+		if (ei->addr <= start)
  16.128 -+			start = ei->addr + ei->size;
  16.129 -+		/* if start is now at or beyond end, we're done, full
  16.130 -+		 * coverage */
  16.131 -+		if (start >= end)
  16.132 -+			return 1; /* we're done */
  16.133 - 	}
  16.134 -+	return 0;
  16.135 - }
  16.136 - 
  16.137 - static int __init pci_mmcfg_init(void)
  16.138 -@@ -183,6 +229,15 @@ static int __init pci_mmcfg_init(void)
  16.139 - 	    (pci_mmcfg_config[0].base_address == 0))
  16.140 - 		goto out;
  16.141 - 
  16.142 -+	if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
  16.143 -+			pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
  16.144 -+			E820_RESERVED)) {
  16.145 -+		printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
  16.146 -+				pci_mmcfg_config[0].base_address);
  16.147 -+		printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
  16.148 -+		goto out;
  16.149 -+	}
  16.150 -+
  16.151 - 	printk(KERN_INFO "PCI: Using MMCONFIG\n");
  16.152 - 	raw_pci_ops = &pci_mmcfg;
  16.153 - 	pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
  16.154 -diff -pruN ../orig-linux-2.6.16.29/arch/x86_64/pci/mmconfig.c ./arch/x86_64/pci/mmconfig.c
  16.155 ---- ../orig-linux-2.6.16.29/arch/x86_64/pci/mmconfig.c	2006-09-12 19:02:10.000000000 +0100
  16.156 -+++ ./arch/x86_64/pci/mmconfig.c	2006-09-21 09:35:40.000000000 +0100
  16.157 -@@ -9,11 +9,19 @@
  16.158 - #include <linux/init.h>
  16.159 - #include <linux/acpi.h>
  16.160 - #include <linux/bitmap.h>
  16.161 -+#include <asm/e820.h>
  16.162 -+
  16.163 - #include "pci.h"
  16.164 - 
  16.165 --#define MMCONFIG_APER_SIZE (256*1024*1024)
  16.166 -+/* aperture is up to 256MB but BIOS may reserve less */
  16.167 -+#define MMCONFIG_APER_MIN	(2 * 1024*1024)
  16.168 -+#define MMCONFIG_APER_MAX	(256 * 1024*1024)
  16.169 -+
  16.170 -+/* Verify the first 16 busses. We assume that systems with more busses
  16.171 -+   get MCFG right. */
  16.172 -+#define MAX_CHECK_BUS 16
  16.173 - 
  16.174 --static DECLARE_BITMAP(fallback_slots, 32);
  16.175 -+static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
  16.176 - 
  16.177 - /* Static virtual mapping of the MMCONFIG aperture */
  16.178 - struct mmcfg_virt {
  16.179 -@@ -55,7 +63,8 @@ static char __iomem *get_virt(unsigned i
  16.180 - static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
  16.181 - {
  16.182 - 	char __iomem *addr;
  16.183 --	if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), &fallback_slots))
  16.184 -+	if (seg == 0 && bus < MAX_CHECK_BUS &&
  16.185 -+		test_bit(32*bus + PCI_SLOT(devfn), fallback_slots))
  16.186 - 		return NULL;
  16.187 - 	addr = get_virt(seg, bus);
  16.188 - 	if (!addr)
  16.189 -@@ -69,8 +78,10 @@ static int pci_mmcfg_read(unsigned int s
  16.190 - 	char __iomem *addr;
  16.191 - 
  16.192 - 	/* Why do we have this when nobody checks it. How about a BUG()!? -AK */
  16.193 --	if (unlikely(!value || (bus > 255) || (devfn > 255) || (reg > 4095)))
  16.194 -+	if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) {
  16.195 -+		*value = -1;
  16.196 - 		return -EINVAL;
  16.197 -+	}
  16.198 - 
  16.199 - 	addr = pci_dev_base(seg, bus, devfn);
  16.200 - 	if (!addr)
  16.201 -@@ -129,23 +140,56 @@ static struct pci_raw_ops pci_mmcfg = {
  16.202 -    Normally this can be expressed in the MCFG by not listing them
  16.203 -    and assigning suitable _SEGs, but this isn't implemented in some BIOS.
  16.204 -    Instead try to discover all devices on bus 0 that are unreachable using MM
  16.205 --   and fallback for them.
  16.206 --   We only do this for bus 0/seg 0 */
  16.207 -+   and fallback for them. */
  16.208 - static __init void unreachable_devices(void)
  16.209 - {
  16.210 --	int i;
  16.211 --	for (i = 0; i < 32; i++) {
  16.212 --		u32 val1;
  16.213 --		char __iomem *addr;
  16.214 -+	int i, k;
  16.215 -+	/* Use the max bus number from ACPI here? */
  16.216 -+	for (k = 0; k < MAX_CHECK_BUS; k++) {
  16.217 -+		for (i = 0; i < 32; i++) {
  16.218 -+			u32 val1;
  16.219 -+			char __iomem *addr;
  16.220 -+
  16.221 -+			pci_conf1_read(0, k, PCI_DEVFN(i,0), 0, 4, &val1);
  16.222 -+			if (val1 == 0xffffffff)
  16.223 -+				continue;
  16.224 -+			addr = pci_dev_base(0, k, PCI_DEVFN(i, 0));
  16.225 -+			if (addr == NULL|| readl(addr) != val1) {
  16.226 -+				set_bit(i + 32*k, fallback_slots);
  16.227 -+				printk(KERN_NOTICE
  16.228 -+				"PCI: No mmconfig possible on device %x:%x\n",
  16.229 -+					k, i);
  16.230 -+			}
  16.231 -+		}
  16.232 -+	}
  16.233 -+}
  16.234 - 
  16.235 --		pci_conf1_read(0, 0, PCI_DEVFN(i,0), 0, 4, &val1);
  16.236 --		if (val1 == 0xffffffff)
  16.237 -+/* NB. Ripped from arch/x86_64/kernel/e820.c for this Xen bugfix patch. */
  16.238 -+#ifdef CONFIG_XEN
  16.239 -+extern struct e820map machine_e820;
  16.240 -+#define e820 machine_e820
  16.241 -+#endif
  16.242 -+static int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
  16.243 -+{
  16.244 -+	int i;
  16.245 -+	for (i = 0; i < e820.nr_map; i++) {
  16.246 -+		struct e820entry *ei = &e820.map[i];
  16.247 -+		if (type && ei->type != type)
  16.248 - 			continue;
  16.249 --		addr = pci_dev_base(0, 0, PCI_DEVFN(i, 0));
  16.250 --		if (addr == NULL|| readl(addr) != val1) {
  16.251 --			set_bit(i, &fallback_slots);
  16.252 --		}
  16.253 -+		/* is the region (part) in overlap with the current region ?*/
  16.254 -+		if (ei->addr >= end || ei->addr + ei->size <= start)
  16.255 -+			continue;
  16.256 -+
  16.257 -+		/* if the region is at the beginning of <start,end> we move
  16.258 -+		 * start to the end of the region since it's ok until there
  16.259 -+		 */
  16.260 -+		if (ei->addr <= start)
  16.261 -+			start = ei->addr + ei->size;
  16.262 -+		/* if start is now at or beyond end, we're done, full coverage */
  16.263 -+		if (start >= end)
  16.264 -+			return 1; /* we're done */
  16.265 - 	}
  16.266 -+	return 0;
  16.267 - }
  16.268 - 
  16.269 - static int __init pci_mmcfg_init(void)
  16.270 -@@ -161,6 +205,15 @@ static int __init pci_mmcfg_init(void)
  16.271 - 	    (pci_mmcfg_config[0].base_address == 0))
  16.272 - 		return 0;
  16.273 - 
  16.274 -+	if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
  16.275 -+			pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
  16.276 -+			E820_RESERVED)) {
  16.277 -+		printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
  16.278 -+				pci_mmcfg_config[0].base_address);
  16.279 -+		printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
  16.280 -+		return 0;
  16.281 -+	}
  16.282 -+
  16.283 - 	/* RED-PEN i386 doesn't do _nocache right now */
  16.284 - 	pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL);
  16.285 - 	if (pci_mmcfg_virt == NULL) {
  16.286 -@@ -169,7 +222,8 @@ static int __init pci_mmcfg_init(void)
  16.287 - 	}
  16.288 - 	for (i = 0; i < pci_mmcfg_config_num; ++i) {
  16.289 - 		pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
  16.290 --		pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, MMCONFIG_APER_SIZE);
  16.291 -+		pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address,
  16.292 -+							 MMCONFIG_APER_MAX);
  16.293 - 		if (!pci_mmcfg_virt[i].virt) {
  16.294 - 			printk("PCI: Cannot map mmconfig aperture for segment %d\n",
  16.295 - 			       pci_mmcfg_config[i].pci_segment_group_number);
    17.1 --- a/patches/linux-2.6.16.30/pmd-shared.patch	Mon Nov 27 13:50:01 2006 +0000
    17.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.3 @@ -1,111 +0,0 @@
    17.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/mm/pageattr.c ./arch/i386/mm/pageattr.c
    17.5 ---- ../orig-linux-2.6.16.29/arch/i386/mm/pageattr.c	2006-09-12 19:02:10.000000000 +0100
    17.6 -+++ ./arch/i386/mm/pageattr.c	2006-09-19 14:05:35.000000000 +0100
    17.7 -@@ -78,7 +78,7 @@ static void set_pmd_pte(pte_t *kpte, uns
    17.8 - 	unsigned long flags;
    17.9 - 
   17.10 - 	set_pte_atomic(kpte, pte); 	/* change init_mm */
   17.11 --	if (PTRS_PER_PMD > 1)
   17.12 -+	if (HAVE_SHARED_KERNEL_PMD)
   17.13 - 		return;
   17.14 - 
   17.15 - 	spin_lock_irqsave(&pgd_lock, flags);
   17.16 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/mm/pgtable.c ./arch/i386/mm/pgtable.c
   17.17 ---- ../orig-linux-2.6.16.29/arch/i386/mm/pgtable.c	2006-09-12 19:02:10.000000000 +0100
   17.18 -+++ ./arch/i386/mm/pgtable.c	2006-09-19 14:05:35.000000000 +0100
   17.19 -@@ -215,9 +215,10 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
   17.20 - 		spin_lock_irqsave(&pgd_lock, flags);
   17.21 - 	}
   17.22 - 
   17.23 --	clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
   17.24 --			swapper_pg_dir + USER_PTRS_PER_PGD,
   17.25 --			KERNEL_PGD_PTRS);
   17.26 -+	if (PTRS_PER_PMD == 1 || HAVE_SHARED_KERNEL_PMD)
   17.27 -+		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
   17.28 -+				swapper_pg_dir + USER_PTRS_PER_PGD,
   17.29 -+				KERNEL_PGD_PTRS);
   17.30 - 	if (PTRS_PER_PMD > 1)
   17.31 - 		return;
   17.32 - 
   17.33 -@@ -249,6 +250,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
   17.34 - 			goto out_oom;
   17.35 - 		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
   17.36 - 	}
   17.37 -+
   17.38 -+	if (!HAVE_SHARED_KERNEL_PMD) {
   17.39 -+		unsigned long flags;
   17.40 -+
   17.41 -+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   17.42 -+			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
   17.43 -+			if (!pmd)
   17.44 -+				goto out_oom;
   17.45 -+			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
   17.46 -+		}
   17.47 -+
   17.48 -+		spin_lock_irqsave(&pgd_lock, flags);
   17.49 -+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   17.50 -+			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
   17.51 -+			pgd_t *kpgd = pgd_offset_k(v);
   17.52 -+			pud_t *kpud = pud_offset(kpgd, v);
   17.53 -+			pmd_t *kpmd = pmd_offset(kpud, v);
   17.54 -+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   17.55 -+			memcpy(pmd, kpmd, PAGE_SIZE);
   17.56 -+		}
   17.57 -+		pgd_list_add(pgd);
   17.58 -+		spin_unlock_irqrestore(&pgd_lock, flags);
   17.59 -+	}
   17.60 -+
   17.61 - 	return pgd;
   17.62 - 
   17.63 - out_oom:
   17.64 -@@ -263,9 +288,23 @@ void pgd_free(pgd_t *pgd)
   17.65 - 	int i;
   17.66 - 
   17.67 - 	/* in the PAE case user pgd entries are overwritten before usage */
   17.68 --	if (PTRS_PER_PMD > 1)
   17.69 --		for (i = 0; i < USER_PTRS_PER_PGD; ++i)
   17.70 --			kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
   17.71 -+	if (PTRS_PER_PMD > 1) {
   17.72 -+		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
   17.73 -+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   17.74 -+			kmem_cache_free(pmd_cache, pmd);
   17.75 -+		}
   17.76 -+		if (!HAVE_SHARED_KERNEL_PMD) {
   17.77 -+			unsigned long flags;
   17.78 -+			spin_lock_irqsave(&pgd_lock, flags);
   17.79 -+			pgd_list_del(pgd);
   17.80 -+			spin_unlock_irqrestore(&pgd_lock, flags);
   17.81 -+			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   17.82 -+				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   17.83 -+				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
   17.84 -+				kmem_cache_free(pmd_cache, pmd);
   17.85 -+			}
   17.86 -+		}
   17.87 -+	}
   17.88 - 	/* in the non-PAE case, free_pgtables() clears user pgd entries */
   17.89 - 	kmem_cache_free(pgd_cache, pgd);
   17.90 - }
   17.91 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/pgtable-2level-defs.h ./include/asm-i386/pgtable-2level-defs.h
   17.92 ---- ../orig-linux-2.6.16.29/include/asm-i386/pgtable-2level-defs.h	2006-09-12 19:02:10.000000000 +0100
   17.93 -+++ ./include/asm-i386/pgtable-2level-defs.h	2006-09-19 14:05:35.000000000 +0100
   17.94 -@@ -1,6 +1,8 @@
   17.95 - #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
   17.96 - #define _I386_PGTABLE_2LEVEL_DEFS_H
   17.97 - 
   17.98 -+#define HAVE_SHARED_KERNEL_PMD 0
   17.99 -+
  17.100 - /*
  17.101 -  * traditional i386 two-level paging structure:
  17.102 -  */
  17.103 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/pgtable-3level-defs.h ./include/asm-i386/pgtable-3level-defs.h
  17.104 ---- ../orig-linux-2.6.16.29/include/asm-i386/pgtable-3level-defs.h	2006-09-12 19:02:10.000000000 +0100
  17.105 -+++ ./include/asm-i386/pgtable-3level-defs.h	2006-09-19 14:05:35.000000000 +0100
  17.106 -@@ -1,6 +1,8 @@
  17.107 - #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
  17.108 - #define _I386_PGTABLE_3LEVEL_DEFS_H
  17.109 - 
  17.110 -+#define HAVE_SHARED_KERNEL_PMD 1
  17.111 -+
  17.112 - /*
  17.113 -  * PGDIR_SHIFT determines what a top-level page table entry can map
  17.114 -  */
    18.1 --- a/patches/linux-2.6.16.30/rcu_needs_cpu.patch	Mon Nov 27 13:50:01 2006 +0000
    18.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.3 @@ -1,35 +0,0 @@
    18.4 -diff -pruN ../orig-linux-2.6.16.29/include/linux/rcupdate.h ./include/linux/rcupdate.h
    18.5 ---- ../orig-linux-2.6.16.29/include/linux/rcupdate.h	2006-09-12 19:02:10.000000000 +0100
    18.6 -+++ ./include/linux/rcupdate.h	2006-09-19 14:05:39.000000000 +0100
    18.7 -@@ -134,6 +134,7 @@ static inline void rcu_bh_qsctr_inc(int 
    18.8 - }
    18.9 - 
   18.10 - extern int rcu_pending(int cpu);
   18.11 -+extern int rcu_needs_cpu(int cpu);
   18.12 - 
   18.13 - /**
   18.14 -  * rcu_read_lock - mark the beginning of an RCU read-side critical section.
   18.15 -diff -pruN ../orig-linux-2.6.16.29/kernel/rcupdate.c ./kernel/rcupdate.c
   18.16 ---- ../orig-linux-2.6.16.29/kernel/rcupdate.c	2006-09-12 19:02:10.000000000 +0100
   18.17 -+++ ./kernel/rcupdate.c	2006-09-19 14:05:39.000000000 +0100
   18.18 -@@ -485,6 +485,20 @@ int rcu_pending(int cpu)
   18.19 - 		__rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
   18.20 - }
   18.21 - 
   18.22 -+/*
   18.23 -+ * Check to see if any future RCU-related work will need to be done
   18.24 -+ * by the current CPU, even if none need be done immediately, returning
   18.25 -+ * 1 if so.  This function is part of the RCU implementation; it is -not-
   18.26 -+ * an exported member of the RCU API.
   18.27 -+ */
   18.28 -+int rcu_needs_cpu(int cpu)
   18.29 -+{
   18.30 -+	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
   18.31 -+	struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
   18.32 -+
   18.33 -+	return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
   18.34 -+}
   18.35 -+
   18.36 - void rcu_check_callbacks(int cpu, int user)
   18.37 - {
   18.38 - 	if (user || 
    19.1 --- a/patches/linux-2.6.16.30/rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch	Mon Nov 27 13:50:01 2006 +0000
    19.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.3 @@ -1,30 +0,0 @@
    19.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/entry.S ./arch/i386/kernel/entry.S
    19.5 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/entry.S	2006-09-12 19:02:10.000000000 +0100
    19.6 -+++ ./arch/i386/kernel/entry.S	2006-09-19 14:05:44.000000000 +0100
    19.7 -@@ -177,7 +177,7 @@ need_resched:
    19.8 - 
    19.9 - 	# sysenter call handler stub
   19.10 - ENTRY(sysenter_entry)
   19.11 --	movl TSS_sysenter_esp0(%esp),%esp
   19.12 -+	movl SYSENTER_stack_esp0(%esp),%esp
   19.13 - sysenter_past_esp:
   19.14 - 	sti
   19.15 - 	pushl $(__USER_DS)
   19.16 -@@ -492,7 +492,7 @@ device_not_available_emulate:
   19.17 -  * that sets up the real kernel stack. Check here, since we can't
   19.18 -  * allow the wrong stack to be used.
   19.19 -  *
   19.20 -- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
   19.21 -+ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
   19.22 -  * already pushed 3 words if it hits on the sysenter instruction:
   19.23 -  * eflags, cs and eip.
   19.24 -  *
   19.25 -@@ -504,7 +504,7 @@ device_not_available_emulate:
   19.26 - 	cmpw $__KERNEL_CS,4(%esp);		\
   19.27 - 	jne ok;					\
   19.28 - label:						\
   19.29 --	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
   19.30 -+	movl SYSENTER_stack_esp0+offset(%esp),%esp;	\
   19.31 - 	pushfl;					\
   19.32 - 	pushl $__KERNEL_CS;			\
   19.33 - 	pushl $sysenter_past_esp
    20.1 --- a/patches/linux-2.6.16.30/series	Mon Nov 27 13:50:01 2006 +0000
    20.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.3 @@ -1,25 +0,0 @@
    20.4 -blktap-aio-16_03_06.patch
    20.5 -device_bind.patch
    20.6 -fix-hz-suspend.patch
    20.7 -fix-ide-cd-pio-mode.patch
    20.8 -i386-mach-io-check-nmi.patch
    20.9 -ipv6-no-autoconf.patch
   20.10 -net-csum.patch
   20.11 -net-gso-0-base.patch
   20.12 -net-gso-1-check-dodgy.patch
   20.13 -net-gso-2-checksum-fix.patch
   20.14 -net-gso-3-fix-errorcheck.patch
   20.15 -net-gso-4-kill-warnon.patch
   20.16 -net-gso-5-rcv-mss.patch
   20.17 -pci-mmconfig-fix-from-2.6.17.patch
   20.18 -pmd-shared.patch
   20.19 -rcu_needs_cpu.patch
   20.20 -rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch
   20.21 -smp-alts.patch
   20.22 -tpm_plugin_2.6.17.patch
   20.23 -x86-increase-interrupt-vector-range.patch
   20.24 -xen-hotplug.patch
   20.25 -xenoprof-generic.patch
   20.26 -x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch
   20.27 -x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch
   20.28 -x86-elfnote-as-preprocessor-macro.patch
    21.1 --- a/patches/linux-2.6.16.30/smp-alts.patch	Mon Nov 27 13:50:01 2006 +0000
    21.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.3 @@ -1,591 +0,0 @@
    21.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/Kconfig ./arch/i386/Kconfig
    21.5 ---- ../orig-linux-2.6.16.29/arch/i386/Kconfig	2006-09-12 19:02:10.000000000 +0100
    21.6 -+++ ./arch/i386/Kconfig	2006-09-19 14:05:48.000000000 +0100
    21.7 -@@ -202,6 +202,19 @@ config SMP
    21.8 - 
    21.9 - 	  If you don't know what to do here, say N.
   21.10 - 
   21.11 -+config SMP_ALTERNATIVES
   21.12 -+	bool "SMP alternatives support (EXPERIMENTAL)"
   21.13 -+	depends on SMP && EXPERIMENTAL
   21.14 -+	help
   21.15 -+	  Try to reduce the overhead of running an SMP kernel on a uniprocessor
   21.16 -+	  host slightly by replacing certain key instruction sequences
   21.17 -+	  according to whether we currently have more than one CPU available.
   21.18 -+	  This should provide a noticeable boost to performance when
   21.19 -+	  running SMP kernels on UP machines, and have negligible impact
   21.20 -+	  when running on an true SMP host.
   21.21 -+
   21.22 -+          If unsure, say N.
   21.23 -+	  
   21.24 - config NR_CPUS
   21.25 - 	int "Maximum number of CPUs (2-255)"
   21.26 - 	range 2 255
   21.27 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/Makefile ./arch/i386/kernel/Makefile
   21.28 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/Makefile	2006-09-12 19:02:10.000000000 +0100
   21.29 -+++ ./arch/i386/kernel/Makefile	2006-09-19 14:05:48.000000000 +0100
   21.30 -@@ -37,6 +37,7 @@ obj-$(CONFIG_EFI) 		+= efi.o efi_stub.o
   21.31 - obj-$(CONFIG_DOUBLEFAULT) 	+= doublefault.o
   21.32 - obj-$(CONFIG_VM86)		+= vm86.o
   21.33 - obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
   21.34 -+obj-$(CONFIG_SMP_ALTERNATIVES)  += smpalts.o
   21.35 - 
   21.36 - EXTRA_AFLAGS   := -traditional
   21.37 - 
   21.38 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/smpalts.c ./arch/i386/kernel/smpalts.c
   21.39 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/smpalts.c	1970-01-01 01:00:00.000000000 +0100
   21.40 -+++ ./arch/i386/kernel/smpalts.c	2006-09-19 14:05:48.000000000 +0100
   21.41 -@@ -0,0 +1,85 @@
   21.42 -+#include <linux/kernel.h>
   21.43 -+#include <asm/system.h>
   21.44 -+#include <asm/smp_alt.h>
   21.45 -+#include <asm/processor.h>
   21.46 -+#include <asm/string.h>
   21.47 -+
   21.48 -+struct smp_replacement_record {
   21.49 -+	unsigned char targ_size;
   21.50 -+	unsigned char smp1_size;
   21.51 -+	unsigned char smp2_size;
   21.52 -+	unsigned char up_size;
   21.53 -+	unsigned char feature;
   21.54 -+	unsigned char data[0];
   21.55 -+};
   21.56 -+
   21.57 -+struct smp_alternative_record {
   21.58 -+	void *targ_start;
   21.59 -+	struct smp_replacement_record *repl;
   21.60 -+};
   21.61 -+
   21.62 -+extern struct smp_alternative_record __start_smp_alternatives_table,
   21.63 -+  __stop_smp_alternatives_table;
   21.64 -+extern unsigned long __init_begin, __init_end;
   21.65 -+
   21.66 -+void prepare_for_smp(void)
   21.67 -+{
   21.68 -+	struct smp_alternative_record *r;
   21.69 -+	printk(KERN_INFO "Enabling SMP...\n");
   21.70 -+	for (r = &__start_smp_alternatives_table;
   21.71 -+	     r != &__stop_smp_alternatives_table;
   21.72 -+	     r++) {
   21.73 -+		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
   21.74 -+		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
   21.75 -+		BUG_ON(r->repl->targ_size < r->repl->up_size);
   21.76 -+               if (system_state == SYSTEM_RUNNING &&
   21.77 -+                   r->targ_start >= (void *)&__init_begin &&
   21.78 -+                   r->targ_start < (void *)&__init_end)
   21.79 -+                       continue;
   21.80 -+		if (r->repl->feature != (unsigned char)-1 &&
   21.81 -+		    boot_cpu_has(r->repl->feature)) {
   21.82 -+			memcpy(r->targ_start,
   21.83 -+			       r->repl->data + r->repl->smp1_size,
   21.84 -+			       r->repl->smp2_size);
   21.85 -+			memset(r->targ_start + r->repl->smp2_size,
   21.86 -+			       0x90,
   21.87 -+			       r->repl->targ_size - r->repl->smp2_size);
   21.88 -+		} else {
   21.89 -+			memcpy(r->targ_start,
   21.90 -+			       r->repl->data,
   21.91 -+			       r->repl->smp1_size);
   21.92 -+			memset(r->targ_start + r->repl->smp1_size,
   21.93 -+			       0x90,
   21.94 -+			       r->repl->targ_size - r->repl->smp1_size);
   21.95 -+		}
   21.96 -+	}
   21.97 -+	/* Paranoia */
   21.98 -+	asm volatile ("jmp 1f\n1:");
   21.99 -+	mb();
  21.100 -+}
  21.101 -+
  21.102 -+void unprepare_for_smp(void)
  21.103 -+{
  21.104 -+	struct smp_alternative_record *r;
  21.105 -+	printk(KERN_INFO "Disabling SMP...\n");
  21.106 -+	for (r = &__start_smp_alternatives_table;
  21.107 -+	     r != &__stop_smp_alternatives_table;
  21.108 -+	     r++) {
  21.109 -+		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
  21.110 -+		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
  21.111 -+		BUG_ON(r->repl->targ_size < r->repl->up_size);
  21.112 -+               if (system_state == SYSTEM_RUNNING &&
  21.113 -+                   r->targ_start >= (void *)&__init_begin &&
  21.114 -+                   r->targ_start < (void *)&__init_end)
  21.115 -+                       continue;
  21.116 -+		memcpy(r->targ_start,
  21.117 -+		       r->repl->data + r->repl->smp1_size + r->repl->smp2_size,
  21.118 -+		       r->repl->up_size);
  21.119 -+		memset(r->targ_start + r->repl->up_size,
  21.120 -+		       0x90,
  21.121 -+		       r->repl->targ_size - r->repl->up_size);
  21.122 -+	}
  21.123 -+	/* Paranoia */
  21.124 -+	asm volatile ("jmp 1f\n1:");
  21.125 -+	mb();
  21.126 -+}
  21.127 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/smpboot.c ./arch/i386/kernel/smpboot.c
  21.128 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/smpboot.c	2006-09-12 19:02:10.000000000 +0100
  21.129 -+++ ./arch/i386/kernel/smpboot.c	2006-09-19 14:05:48.000000000 +0100
  21.130 -@@ -1218,6 +1218,11 @@ static void __init smp_boot_cpus(unsigne
  21.131 - 		if (max_cpus <= cpucount+1)
  21.132 - 			continue;
  21.133 - 
  21.134 -+#ifdef CONFIG_SMP_ALTERNATIVES
  21.135 -+		if (kicked == 1)
  21.136 -+			prepare_for_smp();
  21.137 -+#endif
  21.138 -+
  21.139 - 		if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
  21.140 - 			printk("CPU #%d not responding - cannot use it.\n",
  21.141 - 								apicid);
  21.142 -@@ -1396,6 +1401,11 @@ int __devinit __cpu_up(unsigned int cpu)
  21.143 - 		return -EIO;
  21.144 - 	}
  21.145 - 
  21.146 -+#ifdef CONFIG_SMP_ALTERNATIVES
  21.147 -+	if (num_online_cpus() == 1)
  21.148 -+		prepare_for_smp();
  21.149 -+#endif
  21.150 -+
  21.151 - 	local_irq_enable();
  21.152 - 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
  21.153 - 	/* Unleash the CPU! */
  21.154 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/vmlinux.lds.S ./arch/i386/kernel/vmlinux.lds.S
  21.155 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/vmlinux.lds.S	2006-09-12 19:02:10.000000000 +0100
  21.156 -+++ ./arch/i386/kernel/vmlinux.lds.S	2006-09-19 14:05:48.000000000 +0100
  21.157 -@@ -34,6 +34,13 @@ SECTIONS
  21.158 -   __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
  21.159 -   __stop___ex_table = .;
  21.160 - 
  21.161 -+  . = ALIGN(16);
  21.162 -+  __start_smp_alternatives_table = .;
  21.163 -+  __smp_alternatives : { *(__smp_alternatives) }
  21.164 -+  __stop_smp_alternatives_table = .;
  21.165 -+
  21.166 -+  __smp_replacements : { *(__smp_replacements) }
  21.167 -+
  21.168 -   RODATA
  21.169 - 
  21.170 -   /* writeable */
  21.171 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/atomic.h ./include/asm-i386/atomic.h
  21.172 ---- ../orig-linux-2.6.16.29/include/asm-i386/atomic.h	2006-09-12 19:02:10.000000000 +0100
  21.173 -+++ ./include/asm-i386/atomic.h	2006-09-19 14:05:48.000000000 +0100
  21.174 -@@ -4,18 +4,13 @@
  21.175 - #include <linux/config.h>
  21.176 - #include <linux/compiler.h>
  21.177 - #include <asm/processor.h>
  21.178 -+#include <asm/smp_alt.h>
  21.179 - 
  21.180 - /*
  21.181 -  * Atomic operations that C can't guarantee us.  Useful for
  21.182 -  * resource counting etc..
  21.183 -  */
  21.184 - 
  21.185 --#ifdef CONFIG_SMP
  21.186 --#define LOCK "lock ; "
  21.187 --#else
  21.188 --#define LOCK ""
  21.189 --#endif
  21.190 --
  21.191 - /*
  21.192 -  * Make sure gcc doesn't try to be clever and move things around
  21.193 -  * on us. We need to use _exactly_ the address the user gave us,
  21.194 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/bitops.h ./include/asm-i386/bitops.h
  21.195 ---- ../orig-linux-2.6.16.29/include/asm-i386/bitops.h	2006-09-12 19:02:10.000000000 +0100
  21.196 -+++ ./include/asm-i386/bitops.h	2006-09-19 14:05:48.000000000 +0100
  21.197 -@@ -7,6 +7,7 @@
  21.198 - 
  21.199 - #include <linux/config.h>
  21.200 - #include <linux/compiler.h>
  21.201 -+#include <asm/smp_alt.h>
  21.202 - 
  21.203 - /*
  21.204 -  * These have to be done with inline assembly: that way the bit-setting
  21.205 -@@ -16,12 +17,6 @@
  21.206 -  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  21.207 -  */
  21.208 - 
  21.209 --#ifdef CONFIG_SMP
  21.210 --#define LOCK_PREFIX "lock ; "
  21.211 --#else
  21.212 --#define LOCK_PREFIX ""
  21.213 --#endif
  21.214 --
  21.215 - #define ADDR (*(volatile long *) addr)
  21.216 - 
  21.217 - /**
  21.218 -@@ -41,7 +36,7 @@
  21.219 -  */
  21.220 - static inline void set_bit(int nr, volatile unsigned long * addr)
  21.221 - {
  21.222 --	__asm__ __volatile__( LOCK_PREFIX
  21.223 -+	__asm__ __volatile__( LOCK
  21.224 - 		"btsl %1,%0"
  21.225 - 		:"+m" (ADDR)
  21.226 - 		:"Ir" (nr));
  21.227 -@@ -76,7 +71,7 @@ static inline void __set_bit(int nr, vol
  21.228 -  */
  21.229 - static inline void clear_bit(int nr, volatile unsigned long * addr)
  21.230 - {
  21.231 --	__asm__ __volatile__( LOCK_PREFIX
  21.232 -+	__asm__ __volatile__( LOCK
  21.233 - 		"btrl %1,%0"
  21.234 - 		:"+m" (ADDR)
  21.235 - 		:"Ir" (nr));
  21.236 -@@ -121,7 +116,7 @@ static inline void __change_bit(int nr, 
  21.237 -  */
  21.238 - static inline void change_bit(int nr, volatile unsigned long * addr)
  21.239 - {
  21.240 --	__asm__ __volatile__( LOCK_PREFIX
  21.241 -+	__asm__ __volatile__( LOCK
  21.242 - 		"btcl %1,%0"
  21.243 - 		:"+m" (ADDR)
  21.244 - 		:"Ir" (nr));
  21.245 -@@ -140,7 +135,7 @@ static inline int test_and_set_bit(int n
  21.246 - {
  21.247 - 	int oldbit;
  21.248 - 
  21.249 --	__asm__ __volatile__( LOCK_PREFIX
  21.250 -+	__asm__ __volatile__( LOCK
  21.251 - 		"btsl %2,%1\n\tsbbl %0,%0"
  21.252 - 		:"=r" (oldbit),"+m" (ADDR)
  21.253 - 		:"Ir" (nr) : "memory");
  21.254 -@@ -180,7 +175,7 @@ static inline int test_and_clear_bit(int
  21.255 - {
  21.256 - 	int oldbit;
  21.257 - 
  21.258 --	__asm__ __volatile__( LOCK_PREFIX
  21.259 -+	__asm__ __volatile__( LOCK
  21.260 - 		"btrl %2,%1\n\tsbbl %0,%0"
  21.261 - 		:"=r" (oldbit),"+m" (ADDR)
  21.262 - 		:"Ir" (nr) : "memory");
  21.263 -@@ -231,7 +226,7 @@ static inline int test_and_change_bit(in
  21.264 - {
  21.265 - 	int oldbit;
  21.266 - 
  21.267 --	__asm__ __volatile__( LOCK_PREFIX
  21.268 -+	__asm__ __volatile__( LOCK
  21.269 - 		"btcl %2,%1\n\tsbbl %0,%0"
  21.270 - 		:"=r" (oldbit),"+m" (ADDR)
  21.271 - 		:"Ir" (nr) : "memory");
  21.272 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/futex.h ./include/asm-i386/futex.h
  21.273 ---- ../orig-linux-2.6.16.29/include/asm-i386/futex.h	2006-09-12 19:02:10.000000000 +0100
  21.274 -+++ ./include/asm-i386/futex.h	2006-09-19 14:05:48.000000000 +0100
  21.275 -@@ -28,7 +28,7 @@
  21.276 - "1:	movl	%2, %0\n\
  21.277 - 	movl	%0, %3\n"					\
  21.278 - 	insn "\n"						\
  21.279 --"2:	" LOCK_PREFIX "cmpxchgl %3, %2\n\
  21.280 -+"2:	" LOCK "cmpxchgl %3, %2\n\
  21.281 - 	jnz	1b\n\
  21.282 - 3:	.section .fixup,\"ax\"\n\
  21.283 - 4:	mov	%5, %1\n\
  21.284 -@@ -68,7 +68,7 @@ futex_atomic_op_inuser (int encoded_op, 
  21.285 - #endif
  21.286 - 		switch (op) {
  21.287 - 		case FUTEX_OP_ADD:
  21.288 --			__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret,
  21.289 -+			__futex_atomic_op1(LOCK "xaddl %0, %2", ret,
  21.290 - 					   oldval, uaddr, oparg);
  21.291 - 			break;
  21.292 - 		case FUTEX_OP_OR:
  21.293 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/rwsem.h ./include/asm-i386/rwsem.h
  21.294 ---- ../orig-linux-2.6.16.29/include/asm-i386/rwsem.h	2006-09-12 19:02:10.000000000 +0100
  21.295 -+++ ./include/asm-i386/rwsem.h	2006-09-19 14:05:48.000000000 +0100
  21.296 -@@ -40,6 +40,7 @@
  21.297 - 
  21.298 - #include <linux/list.h>
  21.299 - #include <linux/spinlock.h>
  21.300 -+#include <asm/smp_alt.h>
  21.301 - 
  21.302 - struct rwsem_waiter;
  21.303 - 
  21.304 -@@ -99,7 +100,7 @@ static inline void __down_read(struct rw
  21.305 - {
  21.306 - 	__asm__ __volatile__(
  21.307 - 		"# beginning down_read\n\t"
  21.308 --LOCK_PREFIX	"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
  21.309 -+LOCK	        "  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
  21.310 - 		"  js        2f\n\t" /* jump if we weren't granted the lock */
  21.311 - 		"1:\n\t"
  21.312 - 		LOCK_SECTION_START("")
  21.313 -@@ -130,7 +131,7 @@ static inline int __down_read_trylock(st
  21.314 - 		"  movl	     %1,%2\n\t"
  21.315 - 		"  addl      %3,%2\n\t"
  21.316 - 		"  jle	     2f\n\t"
  21.317 --LOCK_PREFIX	"  cmpxchgl  %2,%0\n\t"
  21.318 -+LOCK	        "  cmpxchgl  %2,%0\n\t"
  21.319 - 		"  jnz	     1b\n\t"
  21.320 - 		"2:\n\t"
  21.321 - 		"# ending __down_read_trylock\n\t"
  21.322 -@@ -150,7 +151,7 @@ static inline void __down_write(struct r
  21.323 - 	tmp = RWSEM_ACTIVE_WRITE_BIAS;
  21.324 - 	__asm__ __volatile__(
  21.325 - 		"# beginning down_write\n\t"
  21.326 --LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
  21.327 -+LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
  21.328 - 		"  testl     %%edx,%%edx\n\t" /* was the count 0 before? */
  21.329 - 		"  jnz       2f\n\t" /* jump if we weren't granted the lock */
  21.330 - 		"1:\n\t"
  21.331 -@@ -188,7 +189,7 @@ static inline void __up_read(struct rw_s
  21.332 - 	__s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
  21.333 - 	__asm__ __volatile__(
  21.334 - 		"# beginning __up_read\n\t"
  21.335 --LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
  21.336 -+LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
  21.337 - 		"  js        2f\n\t" /* jump if the lock is being waited upon */
  21.338 - 		"1:\n\t"
  21.339 - 		LOCK_SECTION_START("")
  21.340 -@@ -214,7 +215,7 @@ static inline void __up_write(struct rw_
  21.341 - 	__asm__ __volatile__(
  21.342 - 		"# beginning __up_write\n\t"
  21.343 - 		"  movl      %2,%%edx\n\t"
  21.344 --LOCK_PREFIX	"  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
  21.345 -+LOCK	        "  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
  21.346 - 		"  jnz       2f\n\t" /* jump if the lock is being waited upon */
  21.347 - 		"1:\n\t"
  21.348 - 		LOCK_SECTION_START("")
  21.349 -@@ -239,7 +240,7 @@ static inline void __downgrade_write(str
  21.350 - {
  21.351 - 	__asm__ __volatile__(
  21.352 - 		"# beginning __downgrade_write\n\t"
  21.353 --LOCK_PREFIX	"  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
  21.354 -+LOCK	        "  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
  21.355 - 		"  js        2f\n\t" /* jump if the lock is being waited upon */
  21.356 - 		"1:\n\t"
  21.357 - 		LOCK_SECTION_START("")
  21.358 -@@ -263,7 +264,7 @@ LOCK_PREFIX	"  addl      %2,(%%eax)\n\t"
  21.359 - static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  21.360 - {
  21.361 - 	__asm__ __volatile__(
  21.362 --LOCK_PREFIX	"addl %1,%0"
  21.363 -+LOCK	          "addl %1,%0"
  21.364 - 		: "=m"(sem->count)
  21.365 - 		: "ir"(delta), "m"(sem->count));
  21.366 - }
  21.367 -@@ -276,7 +277,7 @@ static inline int rwsem_atomic_update(in
  21.368 - 	int tmp = delta;
  21.369 - 
  21.370 - 	__asm__ __volatile__(
  21.371 --LOCK_PREFIX	"xadd %0,(%2)"
  21.372 -+LOCK  	          "xadd %0,(%2)"
  21.373 - 		: "+r"(tmp), "=m"(sem->count)
  21.374 - 		: "r"(sem), "m"(sem->count)
  21.375 - 		: "memory");
  21.376 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/smp_alt.h ./include/asm-i386/smp_alt.h
  21.377 ---- ../orig-linux-2.6.16.29/include/asm-i386/smp_alt.h	1970-01-01 01:00:00.000000000 +0100
  21.378 -+++ ./include/asm-i386/smp_alt.h	2006-09-19 14:05:48.000000000 +0100
  21.379 -@@ -0,0 +1,32 @@
  21.380 -+#ifndef __ASM_SMP_ALT_H__
  21.381 -+#define __ASM_SMP_ALT_H__
  21.382 -+
  21.383 -+#include <linux/config.h>
  21.384 -+
  21.385 -+#ifdef CONFIG_SMP
  21.386 -+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
  21.387 -+#define LOCK \
  21.388 -+        "6677: nop\n" \
  21.389 -+	".section __smp_alternatives,\"a\"\n" \
  21.390 -+	".long 6677b\n" \
  21.391 -+	".long 6678f\n" \
  21.392 -+	".previous\n" \
  21.393 -+	".section __smp_replacements,\"a\"\n" \
  21.394 -+	"6678: .byte 1\n" \
  21.395 -+	".byte 1\n" \
  21.396 -+	".byte 0\n" \
  21.397 -+        ".byte 1\n" \
  21.398 -+	".byte -1\n" \
  21.399 -+	"lock\n" \
  21.400 -+	"nop\n" \
  21.401 -+	".previous\n"
  21.402 -+void prepare_for_smp(void);
  21.403 -+void unprepare_for_smp(void);
  21.404 -+#else
  21.405 -+#define LOCK "lock ; "
  21.406 -+#endif
  21.407 -+#else
  21.408 -+#define LOCK ""
  21.409 -+#endif
  21.410 -+
  21.411 -+#endif /* __ASM_SMP_ALT_H__ */
  21.412 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/spinlock.h ./include/asm-i386/spinlock.h
  21.413 ---- ../orig-linux-2.6.16.29/include/asm-i386/spinlock.h	2006-09-12 19:02:10.000000000 +0100
  21.414 -+++ ./include/asm-i386/spinlock.h	2006-09-19 14:05:48.000000000 +0100
  21.415 -@@ -6,6 +6,7 @@
  21.416 - #include <asm/page.h>
  21.417 - #include <linux/config.h>
  21.418 - #include <linux/compiler.h>
  21.419 -+#include <asm/smp_alt.h>
  21.420 - 
  21.421 - /*
  21.422 -  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  21.423 -@@ -23,7 +24,8 @@
  21.424 - 
  21.425 - #define __raw_spin_lock_string \
  21.426 - 	"\n1:\t" \
  21.427 --	"lock ; decb %0\n\t" \
  21.428 -+	LOCK \
  21.429 -+	"decb %0\n\t" \
  21.430 - 	"jns 3f\n" \
  21.431 - 	"2:\t" \
  21.432 - 	"rep;nop\n\t" \
  21.433 -@@ -34,7 +36,8 @@
  21.434 - 
  21.435 - #define __raw_spin_lock_string_flags \
  21.436 - 	"\n1:\t" \
  21.437 --	"lock ; decb %0\n\t" \
  21.438 -+	LOCK \
  21.439 -+	"decb %0\n\t" \
  21.440 - 	"jns 4f\n\t" \
  21.441 - 	"2:\t" \
  21.442 - 	"testl $0x200, %1\n\t" \
  21.443 -@@ -65,10 +68,34 @@ static inline void __raw_spin_lock_flags
  21.444 - static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  21.445 - {
  21.446 - 	char oldval;
  21.447 -+#ifdef CONFIG_SMP_ALTERNATIVES
  21.448 - 	__asm__ __volatile__(
  21.449 --		"xchgb %b0,%1"
  21.450 -+		"1:movb %1,%b0\n"
  21.451 -+		"movb $0,%1\n"
  21.452 -+		"2:"
  21.453 -+		".section __smp_alternatives,\"a\"\n"
  21.454 -+		".long 1b\n"
  21.455 -+		".long 3f\n"
  21.456 -+		".previous\n"
  21.457 -+		".section __smp_replacements,\"a\"\n"
  21.458 -+		"3: .byte 2b - 1b\n"
  21.459 -+		".byte 5f-4f\n"
  21.460 -+		".byte 0\n"
  21.461 -+		".byte 6f-5f\n"
  21.462 -+		".byte -1\n"
  21.463 -+		"4: xchgb %b0,%1\n"
  21.464 -+		"5: movb %1,%b0\n"
  21.465 -+		"movb $0,%1\n"
  21.466 -+		"6:\n"
  21.467 -+		".previous\n"
  21.468 - 		:"=q" (oldval), "=m" (lock->slock)
  21.469 - 		:"0" (0) : "memory");
  21.470 -+#else
  21.471 -+	__asm__ __volatile__(
  21.472 -+		"xchgb %b0,%1\n"
  21.473 -+		:"=q" (oldval), "=m" (lock->slock)
  21.474 -+		:"0" (0) : "memory");
  21.475 -+#endif
  21.476 - 	return oldval > 0;
  21.477 - }
  21.478 - 
  21.479 -@@ -178,12 +205,12 @@ static inline int __raw_write_trylock(ra
  21.480 - 
  21.481 - static inline void __raw_read_unlock(raw_rwlock_t *rw)
  21.482 - {
  21.483 --	asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
  21.484 -+	asm volatile(LOCK "incl %0" :"=m" (rw->lock) : : "memory");
  21.485 - }
  21.486 - 
  21.487 - static inline void __raw_write_unlock(raw_rwlock_t *rw)
  21.488 - {
  21.489 --	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
  21.490 -+	asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ", %0"
  21.491 - 				 : "=m" (rw->lock) : : "memory");
  21.492 - }
  21.493 - 
  21.494 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/system.h ./include/asm-i386/system.h
  21.495 ---- ../orig-linux-2.6.16.29/include/asm-i386/system.h	2006-09-12 19:02:10.000000000 +0100
  21.496 -+++ ./include/asm-i386/system.h	2006-09-19 14:05:48.000000000 +0100
  21.497 -@@ -5,7 +5,7 @@
  21.498 - #include <linux/kernel.h>
  21.499 - #include <asm/segment.h>
  21.500 - #include <asm/cpufeature.h>
  21.501 --#include <linux/bitops.h> /* for LOCK_PREFIX */
  21.502 -+#include <asm/smp_alt.h>
  21.503 - 
  21.504 - #ifdef __KERNEL__
  21.505 - 
  21.506 -@@ -271,19 +271,19 @@ static inline unsigned long __cmpxchg(vo
  21.507 - 	unsigned long prev;
  21.508 - 	switch (size) {
  21.509 - 	case 1:
  21.510 --		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
  21.511 -+		__asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
  21.512 - 				     : "=a"(prev)
  21.513 - 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
  21.514 - 				     : "memory");
  21.515 - 		return prev;
  21.516 - 	case 2:
  21.517 --		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
  21.518 -+		__asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
  21.519 - 				     : "=a"(prev)
  21.520 - 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
  21.521 - 				     : "memory");
  21.522 - 		return prev;
  21.523 - 	case 4:
  21.524 --		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
  21.525 -+		__asm__ __volatile__(LOCK "cmpxchgl %1,%2"
  21.526 - 				     : "=a"(prev)
  21.527 - 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
  21.528 - 				     : "memory");
  21.529 -@@ -336,7 +336,7 @@ static inline unsigned long long __cmpxc
  21.530 - 				      unsigned long long new)
  21.531 - {
  21.532 - 	unsigned long long prev;
  21.533 --	__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
  21.534 -+	__asm__ __volatile__(LOCK "cmpxchg8b %3"
  21.535 - 			     : "=A"(prev)
  21.536 - 			     : "b"((unsigned long)new),
  21.537 - 			       "c"((unsigned long)(new >> 32)),
  21.538 -@@ -503,11 +503,55 @@ struct alt_instr { 
  21.539 - #endif
  21.540 - 
  21.541 - #ifdef CONFIG_SMP
  21.542 -+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
  21.543 -+#define smp_alt_mb(instr)                                           \
  21.544 -+__asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
  21.545 -+		     ".section __smp_alternatives,\"a\"\n"          \
  21.546 -+		     ".long 6667b\n"                                \
  21.547 -+                     ".long 6673f\n"                                \
  21.548 -+		     ".previous\n"                                  \
  21.549 -+		     ".section __smp_replacements,\"a\"\n"          \
  21.550 -+		     "6673:.byte 6668b-6667b\n"                     \
  21.551 -+		     ".byte 6670f-6669f\n"                          \
  21.552 -+		     ".byte 6671f-6670f\n"                          \
  21.553 -+                     ".byte 0\n"                                    \
  21.554 -+		     ".byte %c0\n"                                  \
  21.555 -+		     "6669:lock;addl $0,0(%%esp)\n"                 \
  21.556 -+		     "6670:" instr "\n"                             \
  21.557 -+		     "6671:\n"                                      \
  21.558 -+		     ".previous\n"                                  \
  21.559 -+		     :                                              \
  21.560 -+		     : "i" (X86_FEATURE_XMM2)                       \
  21.561 -+		     : "memory")
  21.562 -+#define smp_rmb() smp_alt_mb("lfence")
  21.563 -+#define smp_mb()  smp_alt_mb("mfence")
  21.564 -+#define set_mb(var, value) do {                                     \
  21.565 -+unsigned long __set_mb_temp;                                        \
  21.566 -+__asm__ __volatile__("6667:movl %1, %0\n6668:\n"                    \
  21.567 -+		     ".section __smp_alternatives,\"a\"\n"          \
  21.568 -+		     ".long 6667b\n"                                \
  21.569 -+		     ".long 6673f\n"                                \
  21.570 -+		     ".previous\n"                                  \
  21.571 -+		     ".section __smp_replacements,\"a\"\n"          \
  21.572 -+		     "6673: .byte 6668b-6667b\n"                    \
  21.573 -+		     ".byte 6670f-6669f\n"                          \
  21.574 -+		     ".byte 0\n"                                    \
  21.575 -+		     ".byte 6671f-6670f\n"                          \
  21.576 -+		     ".byte -1\n"                                   \
  21.577 -+		     "6669: xchg %1, %0\n"                          \
  21.578 -+		     "6670:movl %1, %0\n"                           \
  21.579 -+		     "6671:\n"                                      \
  21.580 -+		     ".previous\n"                                  \
  21.581 -+		     : "=m" (var), "=r" (__set_mb_temp)             \
  21.582 -+		     : "1" (value)                                  \
  21.583 -+		     : "memory"); } while (0)
  21.584 -+#else
  21.585 - #define smp_mb()	mb()
  21.586 - #define smp_rmb()	rmb()
  21.587 -+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
  21.588 -+#endif
  21.589 - #define smp_wmb()	wmb()
  21.590 - #define smp_read_barrier_depends()	read_barrier_depends()
  21.591 --#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
  21.592 - #else
  21.593 - #define smp_mb()	barrier()
  21.594 - #define smp_rmb()	barrier()
    22.1 --- a/patches/linux-2.6.16.30/tpm_plugin_2.6.17.patch	Mon Nov 27 13:50:01 2006 +0000
    22.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.3 @@ -1,1545 +0,0 @@
    22.4 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_atmel.c ./drivers/char/tpm/tpm_atmel.c
    22.5 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_atmel.c	2006-09-12 19:02:10.000000000 +0100
    22.6 -+++ ./drivers/char/tpm/tpm_atmel.c	2006-09-19 14:05:52.000000000 +0100
    22.7 -@@ -47,12 +47,12 @@ static int tpm_atml_recv(struct tpm_chip
    22.8 - 		return -EIO;
    22.9 - 
   22.10 - 	for (i = 0; i < 6; i++) {
   22.11 --		status = ioread8(chip->vendor->iobase + 1);
   22.12 -+		status = ioread8(chip->vendor.iobase + 1);
   22.13 - 		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   22.14 - 			dev_err(chip->dev, "error reading header\n");
   22.15 - 			return -EIO;
   22.16 - 		}
   22.17 --		*buf++ = ioread8(chip->vendor->iobase);
   22.18 -+		*buf++ = ioread8(chip->vendor.iobase);
   22.19 - 	}
   22.20 - 
   22.21 - 	/* size of the data received */
   22.22 -@@ -63,7 +63,7 @@ static int tpm_atml_recv(struct tpm_chip
   22.23 - 		dev_err(chip->dev,
   22.24 - 			"Recv size(%d) less than available space\n", size);
   22.25 - 		for (; i < size; i++) {	/* clear the waiting data anyway */
   22.26 --			status = ioread8(chip->vendor->iobase + 1);
   22.27 -+			status = ioread8(chip->vendor.iobase + 1);
   22.28 - 			if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   22.29 - 				dev_err(chip->dev, "error reading data\n");
   22.30 - 				return -EIO;
   22.31 -@@ -74,16 +74,16 @@ static int tpm_atml_recv(struct tpm_chip
   22.32 - 
   22.33 - 	/* read all the data available */
   22.34 - 	for (; i < size; i++) {
   22.35 --		status = ioread8(chip->vendor->iobase + 1);
   22.36 -+		status = ioread8(chip->vendor.iobase + 1);
   22.37 - 		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   22.38 - 			dev_err(chip->dev, "error reading data\n");
   22.39 - 			return -EIO;
   22.40 - 		}
   22.41 --		*buf++ = ioread8(chip->vendor->iobase);
   22.42 -+		*buf++ = ioread8(chip->vendor.iobase);
   22.43 - 	}
   22.44 - 
   22.45 - 	/* make sure data available is gone */
   22.46 --	status = ioread8(chip->vendor->iobase + 1);
   22.47 -+	status = ioread8(chip->vendor.iobase + 1);
   22.48 - 
   22.49 - 	if (status & ATML_STATUS_DATA_AVAIL) {
   22.50 - 		dev_err(chip->dev, "data available is stuck\n");
   22.51 -@@ -100,7 +100,7 @@ static int tpm_atml_send(struct tpm_chip
   22.52 - 	dev_dbg(chip->dev, "tpm_atml_send:\n");
   22.53 - 	for (i = 0; i < count; i++) {
   22.54 - 		dev_dbg(chip->dev, "%d 0x%x(%d)\n",  i, buf[i], buf[i]);
   22.55 -- 		iowrite8(buf[i], chip->vendor->iobase);
   22.56 -+ 		iowrite8(buf[i], chip->vendor.iobase);
   22.57 - 	}
   22.58 - 
   22.59 - 	return count;
   22.60 -@@ -108,12 +108,12 @@ static int tpm_atml_send(struct tpm_chip
   22.61 - 
   22.62 - static void tpm_atml_cancel(struct tpm_chip *chip)
   22.63 - {
   22.64 --	iowrite8(ATML_STATUS_ABORT, chip->vendor->iobase + 1);
   22.65 -+	iowrite8(ATML_STATUS_ABORT, chip->vendor.iobase + 1);
   22.66 - }
   22.67 - 
   22.68 - static u8 tpm_atml_status(struct tpm_chip *chip)
   22.69 - {
   22.70 --	return ioread8(chip->vendor->iobase + 1);
   22.71 -+	return ioread8(chip->vendor.iobase + 1);
   22.72 - }
   22.73 - 
   22.74 - static struct file_operations atmel_ops = {
   22.75 -@@ -140,7 +140,7 @@ static struct attribute* atmel_attrs[] =
   22.76 - 
   22.77 - static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs };
   22.78 - 
   22.79 --static struct tpm_vendor_specific tpm_atmel = {
   22.80 -+static const struct tpm_vendor_specific tpm_atmel = {
   22.81 - 	.recv = tpm_atml_recv,
   22.82 - 	.send = tpm_atml_send,
   22.83 - 	.cancel = tpm_atml_cancel,
   22.84 -@@ -159,10 +159,10 @@ static void atml_plat_remove(void)
   22.85 - 	struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
   22.86 - 
   22.87 - 	if (chip) {
   22.88 --		if (chip->vendor->have_region)
   22.89 --			atmel_release_region(chip->vendor->base,
   22.90 --					     chip->vendor->region_size);
   22.91 --		atmel_put_base_addr(chip->vendor);
   22.92 -+		if (chip->vendor.have_region)
   22.93 -+			atmel_release_region(chip->vendor.base,
   22.94 -+					     chip->vendor.region_size);
   22.95 -+		atmel_put_base_addr(chip->vendor.iobase);
   22.96 - 		tpm_remove_hardware(chip->dev);
   22.97 - 		platform_device_unregister(pdev);
   22.98 - 	}
   22.99 -@@ -179,18 +179,22 @@ static struct device_driver atml_drv = {
  22.100 - static int __init init_atmel(void)
  22.101 - {
  22.102 - 	int rc = 0;
  22.103 -+	void __iomem *iobase = NULL;
  22.104 -+	int have_region, region_size;
  22.105 -+	unsigned long base;
  22.106 -+	struct  tpm_chip *chip;
  22.107 - 
  22.108 - 	driver_register(&atml_drv);
  22.109 - 
  22.110 --	if ((tpm_atmel.iobase = atmel_get_base_addr(&tpm_atmel)) == NULL) {
  22.111 -+	if ((iobase = atmel_get_base_addr(&base, &region_size)) == NULL) {
  22.112 - 		rc = -ENODEV;
  22.113 - 		goto err_unreg_drv;
  22.114 - 	}
  22.115 - 
  22.116 --	tpm_atmel.have_region =
  22.117 -+	have_region =
  22.118 - 	    (atmel_request_region
  22.119 --	     (tpm_atmel.base, tpm_atmel.region_size,
  22.120 --	      "tpm_atmel0") == NULL) ? 0 : 1;
  22.121 -+	     (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
  22.122 -+
  22.123 - 
  22.124 - 	if (IS_ERR
  22.125 - 	    (pdev =
  22.126 -@@ -199,17 +203,25 @@ static int __init init_atmel(void)
  22.127 - 		goto err_rel_reg;
  22.128 - 	}
  22.129 - 
  22.130 --	if ((rc = tpm_register_hardware(&pdev->dev, &tpm_atmel)) < 0)
  22.131 -+	if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_atmel))) {
  22.132 -+		rc = -ENODEV;
  22.133 - 		goto err_unreg_dev;
  22.134 -+	}
  22.135 -+
  22.136 -+	chip->vendor.iobase = iobase;
  22.137 -+	chip->vendor.base = base;
  22.138 -+	chip->vendor.have_region = have_region;
  22.139 -+	chip->vendor.region_size = region_size;
  22.140 -+
  22.141 - 	return 0;
  22.142 - 
  22.143 - err_unreg_dev:
  22.144 - 	platform_device_unregister(pdev);
  22.145 - err_rel_reg:
  22.146 --	atmel_put_base_addr(&tpm_atmel);
  22.147 --	if (tpm_atmel.have_region)
  22.148 --		atmel_release_region(tpm_atmel.base,
  22.149 --				     tpm_atmel.region_size);
  22.150 -+	atmel_put_base_addr(iobase);
  22.151 -+	if (have_region)
  22.152 -+		atmel_release_region(base,
  22.153 -+				     region_size);
  22.154 - err_unreg_drv:
  22.155 - 	driver_unregister(&atml_drv);
  22.156 - 	return rc;
  22.157 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_atmel.h ./drivers/char/tpm/tpm_atmel.h
  22.158 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_atmel.h	2006-09-12 19:02:10.000000000 +0100
  22.159 -+++ ./drivers/char/tpm/tpm_atmel.h	2006-09-19 14:05:52.000000000 +0100
  22.160 -@@ -28,13 +28,12 @@
  22.161 - #define atmel_request_region request_mem_region
  22.162 - #define atmel_release_region release_mem_region
  22.163 - 
  22.164 --static inline void atmel_put_base_addr(struct tpm_vendor_specific
  22.165 --					 *vendor)
  22.166 -+static inline void atmel_put_base_addr(void __iomem *iobase)
  22.167 - {
  22.168 --	iounmap(vendor->iobase);
  22.169 -+	iounmap(iobase);
  22.170 - }
  22.171 - 
  22.172 --static void __iomem * atmel_get_base_addr(struct tpm_vendor_specific *vendor)
  22.173 -+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
  22.174 - {
  22.175 - 	struct device_node *dn;
  22.176 - 	unsigned long address, size;
  22.177 -@@ -71,9 +70,9 @@ static void __iomem * atmel_get_base_add
  22.178 - 	else
  22.179 - 		size = reg[naddrc];
  22.180 - 
  22.181 --	vendor->base = address;
  22.182 --	vendor->region_size = size;
  22.183 --	return ioremap(vendor->base, vendor->region_size);
  22.184 -+	*base = address;
  22.185 -+	*region_size = size;
  22.186 -+	return ioremap(*base, *region_size);
  22.187 - }
  22.188 - #else
  22.189 - #define atmel_getb(chip, offset) inb(chip->vendor->base + offset)
  22.190 -@@ -106,14 +105,12 @@ static int atmel_verify_tpm11(void)
  22.191 - 	return 0;
  22.192 - }
  22.193 - 
  22.194 --static inline void atmel_put_base_addr(struct tpm_vendor_specific
  22.195 --					 *vendor)
  22.196 -+static inline void atmel_put_base_addr(void __iomem *iobase)
  22.197 - {
  22.198 - }
  22.199 - 
  22.200 - /* Determine where to talk to device */
  22.201 --static void __iomem * atmel_get_base_addr(struct tpm_vendor_specific
  22.202 --					 *vendor)
  22.203 -+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
  22.204 - {
  22.205 - 	int lo, hi;
  22.206 - 
  22.207 -@@ -123,9 +120,9 @@ static void __iomem * atmel_get_base_add
  22.208 - 	lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
  22.209 - 	hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
  22.210 - 
  22.211 --	vendor->base = (hi << 8) | lo;
  22.212 --	vendor->region_size = 2;
  22.213 -+	*base = (hi << 8) | lo;
  22.214 -+	*region_size = 2;
  22.215 - 
  22.216 --	return ioport_map(vendor->base, vendor->region_size);
  22.217 -+	return ioport_map(*base, *region_size);
  22.218 - }
  22.219 - #endif
  22.220 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_bios.c ./drivers/char/tpm/tpm_bios.c
  22.221 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_bios.c	2006-09-12 19:02:10.000000000 +0100
  22.222 -+++ ./drivers/char/tpm/tpm_bios.c	2006-09-19 14:05:52.000000000 +0100
  22.223 -@@ -29,6 +29,11 @@
  22.224 - #define MAX_TEXT_EVENT		1000	/* Max event string length */
  22.225 - #define ACPI_TCPA_SIG		"TCPA"	/* 0x41504354 /'TCPA' */
  22.226 - 
  22.227 -+enum bios_platform_class {
  22.228 -+	BIOS_CLIENT = 0x00,
  22.229 -+	BIOS_SERVER = 0x01,
  22.230 -+};
  22.231 -+
  22.232 - struct tpm_bios_log {
  22.233 - 	void *bios_event_log;
  22.234 - 	void *bios_event_log_end;
  22.235 -@@ -36,9 +41,18 @@ struct tpm_bios_log {
  22.236 - 
  22.237 - struct acpi_tcpa {
  22.238 - 	struct acpi_table_header hdr;
  22.239 --	u16 reserved;
  22.240 --	u32 log_max_len __attribute__ ((packed));
  22.241 --	u32 log_start_addr __attribute__ ((packed));
  22.242 -+	u16 platform_class;
  22.243 -+	union {
  22.244 -+		struct client_hdr {
  22.245 -+			u32 log_max_len __attribute__ ((packed));
  22.246 -+			u64 log_start_addr __attribute__ ((packed));
  22.247 -+		} client;
  22.248 -+		struct server_hdr {
  22.249 -+			u16 reserved;
  22.250 -+			u64 log_max_len __attribute__ ((packed));
  22.251 -+			u64 log_start_addr __attribute__ ((packed));
  22.252 -+		} server;
  22.253 -+	};
  22.254 - };
  22.255 - 
  22.256 - struct tcpa_event {
  22.257 -@@ -91,6 +105,12 @@ static const char* tcpa_event_type_strin
  22.258 - 	"Non-Host Info"
  22.259 - };
  22.260 - 
  22.261 -+struct tcpa_pc_event {
  22.262 -+	u32 event_id;
  22.263 -+	u32 event_size;
  22.264 -+	u8 event_data[0];
  22.265 -+};
  22.266 -+
  22.267 - enum tcpa_pc_event_ids {
  22.268 - 	SMBIOS = 1,
  22.269 - 	BIS_CERT,
  22.270 -@@ -100,14 +120,15 @@ enum tcpa_pc_event_ids {
  22.271 - 	NVRAM,
  22.272 - 	OPTION_ROM_EXEC,
  22.273 - 	OPTION_ROM_CONFIG,
  22.274 --	OPTION_ROM_MICROCODE,
  22.275 -+	OPTION_ROM_MICROCODE = 10,
  22.276 - 	S_CRTM_VERSION,
  22.277 - 	S_CRTM_CONTENTS,
  22.278 - 	POST_CONTENTS,
  22.279 -+	HOST_TABLE_OF_DEVICES,
  22.280 - };
  22.281 - 
  22.282 - static const char* tcpa_pc_event_id_strings[] = {
  22.283 --	""
  22.284 -+	"",
  22.285 - 	"SMBIOS",
  22.286 - 	"BIS Certificate",
  22.287 - 	"POST BIOS ",
  22.288 -@@ -116,10 +137,12 @@ static const char* tcpa_pc_event_id_stri
  22.289 - 	"NVRAM",
  22.290 - 	"Option ROM",
  22.291 - 	"Option ROM config",
  22.292 --	"Option ROM microcode",
  22.293 -+	"",
  22.294 -+	"Option ROM microcode ",
  22.295 - 	"S-CRTM Version",
  22.296 --	"S-CRTM Contents",
  22.297 --	"S-CRTM POST Contents",
  22.298 -+	"S-CRTM Contents ",
  22.299 -+	"POST Contents ",
  22.300 -+	"Table of Devices",
  22.301 - };
  22.302 - 
  22.303 - /* returns pointer to start of pos. entry of tcg log */
  22.304 -@@ -191,7 +214,7 @@ static int get_event_name(char *dest, st
  22.305 - 	const char *name = "";
  22.306 - 	char data[40] = "";
  22.307 - 	int i, n_len = 0, d_len = 0;
  22.308 --	u32 event_id;
  22.309 -+	struct tcpa_pc_event *pc_event;
  22.310 - 
  22.311 - 	switch(event->event_type) {
  22.312 - 	case PREBOOT:
  22.313 -@@ -220,31 +243,32 @@ static int get_event_name(char *dest, st
  22.314 - 		}
  22.315 - 		break;
  22.316 - 	case EVENT_TAG:
  22.317 --		event_id = be32_to_cpu(*((u32 *)event_entry));
  22.318 -+		pc_event = (struct tcpa_pc_event *)event_entry;
  22.319 - 
  22.320 - 		/* ToDo Row data -> Base64 */
  22.321 - 
  22.322 --		switch (event_id) {
  22.323 -+		switch (pc_event->event_id) {
  22.324 - 		case SMBIOS:
  22.325 - 		case BIS_CERT:
  22.326 - 		case CMOS:
  22.327 - 		case NVRAM:
  22.328 - 		case OPTION_ROM_EXEC:
  22.329 - 		case OPTION_ROM_CONFIG:
  22.330 --		case OPTION_ROM_MICROCODE:
  22.331 - 		case S_CRTM_VERSION:
  22.332 --		case S_CRTM_CONTENTS:
  22.333 --		case POST_CONTENTS:
  22.334 --			name = tcpa_pc_event_id_strings[event_id];
  22.335 -+			name = tcpa_pc_event_id_strings[pc_event->event_id];
  22.336 - 			n_len = strlen(name);
  22.337 - 			break;
  22.338 -+		/* hash data */
  22.339 - 		case POST_BIOS_ROM:
  22.340 - 		case ESCD:
  22.341 --			name = tcpa_pc_event_id_strings[event_id];
  22.342 -+		case OPTION_ROM_MICROCODE:
  22.343 -+		case S_CRTM_CONTENTS:
  22.344 -+		case POST_CONTENTS:
  22.345 -+			name = tcpa_pc_event_id_strings[pc_event->event_id];
  22.346 - 			n_len = strlen(name);
  22.347 - 			for (i = 0; i < 20; i++)
  22.348 --				d_len += sprintf(data, "%02x",
  22.349 --						event_entry[8 + i]);
  22.350 -+				d_len += sprintf(&data[2*i], "%02x",
  22.351 -+						pc_event->event_data[i]);
  22.352 - 			break;
  22.353 - 		default:
  22.354 - 			break;
  22.355 -@@ -260,52 +284,13 @@ static int get_event_name(char *dest, st
  22.356 - 
  22.357 - static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
  22.358 - {
  22.359 -+	struct tcpa_event *event = v;
  22.360 -+	char *data = v;
  22.361 -+	int i;
  22.362 - 
  22.363 --	char *eventname;
  22.364 --	char data[4];
  22.365 --	u32 help;
  22.366 --	int i, len;
  22.367 --	struct tcpa_event *event = (struct tcpa_event *) v;
  22.368 --	unsigned char *event_entry =
  22.369 --	    (unsigned char *) (v + sizeof(struct tcpa_event));
  22.370 --
  22.371 --	eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
  22.372 --	if (!eventname) {
  22.373 --		printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
  22.374 --		       __func__);
  22.375 --		return -ENOMEM;
  22.376 --	}
  22.377 --
  22.378 --	/* 1st: PCR used is in little-endian format (4 bytes) */
  22.379 --	help = le32_to_cpu(event->pcr_index);
  22.380 --	memcpy(data, &help, 4);
  22.381 --	for (i = 0; i < 4; i++)
  22.382 --		seq_putc(m, data[i]);
  22.383 --
  22.384 --	/* 2nd: SHA1 (20 bytes) */
  22.385 --	for (i = 0; i < 20; i++)
  22.386 --		seq_putc(m, event->pcr_value[i]);
  22.387 --
  22.388 --	/* 3rd: event type identifier (4 bytes) */
  22.389 --	help = le32_to_cpu(event->event_type);
  22.390 --	memcpy(data, &help, 4);
  22.391 --	for (i = 0; i < 4; i++)
  22.392 -+	for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
  22.393 - 		seq_putc(m, data[i]);
  22.394 - 
  22.395 --	len = 0;
  22.396 --
  22.397 --	len += get_event_name(eventname, event, event_entry);
  22.398 --
  22.399 --	/* 4th:  filename <= 255 + \'0' delimiter */
  22.400 --	if (len > TCG_EVENT_NAME_LEN_MAX)
  22.401 --		len = TCG_EVENT_NAME_LEN_MAX;
  22.402 --
  22.403 --	for (i = 0; i < len; i++)
  22.404 --		seq_putc(m, eventname[i]);
  22.405 --
  22.406 --	/* 5th: delimiter */
  22.407 --	seq_putc(m, '\0');
  22.408 --
  22.409 - 	return 0;
  22.410 - }
  22.411 - 
  22.412 -@@ -353,6 +338,7 @@ static int tpm_ascii_bios_measurements_s
  22.413 - 	/* 4th: eventname <= max + \'0' delimiter */
  22.414 - 	seq_printf(m, " %s\n", eventname);
  22.415 - 
  22.416 -+	kfree(eventname);
  22.417 - 	return 0;
  22.418 - }
  22.419 - 
  22.420 -@@ -376,6 +362,7 @@ static int read_log(struct tpm_bios_log 
  22.421 - 	struct acpi_tcpa *buff;
  22.422 - 	acpi_status status;
  22.423 - 	struct acpi_table_header *virt;
  22.424 -+	u64 len, start;
  22.425 - 
  22.426 - 	if (log->bios_event_log != NULL) {
  22.427 - 		printk(KERN_ERR
  22.428 -@@ -396,27 +383,37 @@ static int read_log(struct tpm_bios_log 
  22.429 - 		return -EIO;
  22.430 - 	}
  22.431 - 
  22.432 --	if (buff->log_max_len == 0) {
  22.433 -+	switch(buff->platform_class) {
  22.434 -+	case BIOS_SERVER:
  22.435 -+		len = buff->server.log_max_len;
  22.436 -+		start = buff->server.log_start_addr;
  22.437 -+		break;
  22.438 -+	case BIOS_CLIENT:
  22.439 -+	default:
  22.440 -+		len = buff->client.log_max_len;
  22.441 -+		start = buff->client.log_start_addr;
  22.442 -+		break;
  22.443 -+	}
  22.444 -+	if (!len) {
  22.445 - 		printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
  22.446 - 		return -EIO;
  22.447 - 	}
  22.448 - 
  22.449 - 	/* malloc EventLog space */
  22.450 --	log->bios_event_log = kmalloc(buff->log_max_len, GFP_KERNEL);
  22.451 -+	log->bios_event_log = kmalloc(len, GFP_KERNEL);
  22.452 - 	if (!log->bios_event_log) {
  22.453 --		printk
  22.454 --		    ("%s: ERROR - Not enough  Memory for BIOS measurements\n",
  22.455 --		     __func__);
  22.456 -+		printk("%s: ERROR - Not enough  Memory for BIOS measurements\n",
  22.457 -+			__func__);
  22.458 - 		return -ENOMEM;
  22.459 - 	}
  22.460 - 
  22.461 --	log->bios_event_log_end = log->bios_event_log + buff->log_max_len;
  22.462 -+	log->bios_event_log_end = log->bios_event_log + len;
  22.463 - 
  22.464 --	acpi_os_map_memory(buff->log_start_addr, buff->log_max_len, (void *) &virt);
  22.465 -+	acpi_os_map_memory(start, len, (void *) &virt);
  22.466 - 
  22.467 --	memcpy(log->bios_event_log, virt, buff->log_max_len);
  22.468 -+	memcpy(log->bios_event_log, virt, len);
  22.469 - 
  22.470 --	acpi_os_unmap_memory(virt, buff->log_max_len);
  22.471 -+	acpi_os_unmap_memory(virt, len);
  22.472 - 	return 0;
  22.473 - }
  22.474 - 
  22.475 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_infineon.c ./drivers/char/tpm/tpm_infineon.c
  22.476 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_infineon.c	2006-09-12 19:02:10.000000000 +0100
  22.477 -+++ ./drivers/char/tpm/tpm_infineon.c	2006-09-19 14:05:52.000000000 +0100
  22.478 -@@ -15,6 +15,7 @@
  22.479 -  * License.
  22.480 -  */
  22.481 - 
  22.482 -+#include <linux/init.h>
  22.483 - #include <linux/pnp.h>
  22.484 - #include "tpm.h"
  22.485 - 
  22.486 -@@ -104,7 +105,7 @@ static int empty_fifo(struct tpm_chip *c
  22.487 - 
  22.488 - 	if (clear_wrfifo) {
  22.489 - 		for (i = 0; i < 4096; i++) {
  22.490 --			status = inb(chip->vendor->base + WRFIFO);
  22.491 -+			status = inb(chip->vendor.base + WRFIFO);
  22.492 - 			if (status == 0xff) {
  22.493 - 				if (check == 5)
  22.494 - 					break;
  22.495 -@@ -124,8 +125,8 @@ static int empty_fifo(struct tpm_chip *c
  22.496 - 	 */
  22.497 - 	i = 0;
  22.498 - 	do {
  22.499 --		status = inb(chip->vendor->base + RDFIFO);
  22.500 --		status = inb(chip->vendor->base + STAT);
  22.501 -+		status = inb(chip->vendor.base + RDFIFO);
  22.502 -+		status = inb(chip->vendor.base + STAT);
  22.503 - 		i++;
  22.504 - 		if (i == TPM_MAX_TRIES)
  22.505 - 			return -EIO;
  22.506 -@@ -138,7 +139,7 @@ static int wait(struct tpm_chip *chip, i
  22.507 - 	int status;
  22.508 - 	int i;
  22.509 - 	for (i = 0; i < TPM_MAX_TRIES; i++) {
  22.510 --		status = inb(chip->vendor->base + STAT);
  22.511 -+		status = inb(chip->vendor.base + STAT);
  22.512 - 		/* check the status-register if wait_for_bit is set */
  22.513 - 		if (status & 1 << wait_for_bit)
  22.514 - 			break;
  22.515 -@@ -157,7 +158,7 @@ static int wait(struct tpm_chip *chip, i
  22.516 - static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
  22.517 - {
  22.518 - 	wait(chip, STAT_XFE);
  22.519 --	outb(sendbyte, chip->vendor->base + WRFIFO);
  22.520 -+	outb(sendbyte, chip->vendor.base + WRFIFO);
  22.521 - }
  22.522 - 
  22.523 -     /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more
  22.524 -@@ -204,7 +205,7 @@ recv_begin:
  22.525 - 		ret = wait(chip, STAT_RDA);
  22.526 - 		if (ret)
  22.527 - 			return -EIO;
  22.528 --		buf[i] = inb(chip->vendor->base + RDFIFO);
  22.529 -+		buf[i] = inb(chip->vendor.base + RDFIFO);
  22.530 - 	}
  22.531 - 
  22.532 - 	if (buf[0] != TPM_VL_VER) {
  22.533 -@@ -219,7 +220,7 @@ recv_begin:
  22.534 - 
  22.535 - 		for (i = 0; i < size; i++) {
  22.536 - 			wait(chip, STAT_RDA);
  22.537 --			buf[i] = inb(chip->vendor->base + RDFIFO);
  22.538 -+			buf[i] = inb(chip->vendor.base + RDFIFO);
  22.539 - 		}
  22.540 - 
  22.541 - 		if ((size == 0x6D00) && (buf[1] == 0x80)) {
  22.542 -@@ -268,7 +269,7 @@ static int tpm_inf_send(struct tpm_chip 
  22.543 - 	u8 count_high, count_low, count_4, count_3, count_2, count_1;
  22.544 - 
  22.545 - 	/* Disabling Reset, LP and IRQC */
  22.546 --	outb(RESET_LP_IRQC_DISABLE, chip->vendor->base + CMD);
  22.547 -+	outb(RESET_LP_IRQC_DISABLE, chip->vendor.base + CMD);
  22.548 - 
  22.549 - 	ret = empty_fifo(chip, 1);
  22.550 - 	if (ret) {
  22.551 -@@ -319,7 +320,7 @@ static void tpm_inf_cancel(struct tpm_ch
  22.552 - 
  22.553 - static u8 tpm_inf_status(struct tpm_chip *chip)
  22.554 - {
  22.555 --	return inb(chip->vendor->base + STAT);
  22.556 -+	return inb(chip->vendor.base + STAT);
  22.557 - }
  22.558 - 
  22.559 - static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
  22.560 -@@ -346,7 +347,7 @@ static struct file_operations inf_ops = 
  22.561 - 	.release = tpm_release,
  22.562 - };
  22.563 - 
  22.564 --static struct tpm_vendor_specific tpm_inf = {
  22.565 -+static const struct tpm_vendor_specific tpm_inf = {
  22.566 - 	.recv = tpm_inf_recv,
  22.567 - 	.send = tpm_inf_send,
  22.568 - 	.cancel = tpm_inf_cancel,
  22.569 -@@ -375,6 +376,7 @@ static int __devinit tpm_inf_pnp_probe(s
  22.570 - 	int version[2];
  22.571 - 	int productid[2];
  22.572 - 	char chipname[20];
  22.573 -+	struct tpm_chip *chip;
  22.574 - 
  22.575 - 	/* read IO-ports through PnP */
  22.576 - 	if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
  22.577 -@@ -395,14 +397,13 @@ static int __devinit tpm_inf_pnp_probe(s
  22.578 - 			goto err_last;
  22.579 - 		}
  22.580 - 		/* publish my base address and request region */
  22.581 --		tpm_inf.base = TPM_INF_BASE;
  22.582 - 		if (request_region
  22.583 --		    (tpm_inf.base, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) {
  22.584 -+		    (TPM_INF_BASE, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) {
  22.585 - 			rc = -EINVAL;
  22.586 - 			goto err_last;
  22.587 - 		}
  22.588 --		if (request_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN,
  22.589 --				"tpm_infineon0") == NULL) {
  22.590 -+		if (request_region
  22.591 -+		    (TPM_INF_ADDR, TPM_INF_ADDR_LEN, "tpm_infineon0") == NULL) {
  22.592 - 			rc = -EINVAL;
  22.593 - 			goto err_last;
  22.594 - 		}
  22.595 -@@ -442,9 +443,9 @@ static int __devinit tpm_inf_pnp_probe(s
  22.596 - 
  22.597 - 		/* configure TPM with IO-ports */
  22.598 - 		outb(IOLIMH, TPM_INF_ADDR);
  22.599 --		outb(((tpm_inf.base >> 8) & 0xff), TPM_INF_DATA);
  22.600 -+		outb(((TPM_INF_BASE >> 8) & 0xff), TPM_INF_DATA);
  22.601 - 		outb(IOLIML, TPM_INF_ADDR);
  22.602 --		outb((tpm_inf.base & 0xff), TPM_INF_DATA);
  22.603 -+		outb((TPM_INF_BASE & 0xff), TPM_INF_DATA);
  22.604 - 
  22.605 - 		/* control if IO-ports are set correctly */
  22.606 - 		outb(IOLIMH, TPM_INF_ADDR);
  22.607 -@@ -452,10 +453,10 @@ static int __devinit tpm_inf_pnp_probe(s
  22.608 - 		outb(IOLIML, TPM_INF_ADDR);
  22.609 - 		iol = inb(TPM_INF_DATA);
  22.610 - 
  22.611 --		if ((ioh << 8 | iol) != tpm_inf.base) {
  22.612 -+		if ((ioh << 8 | iol) != TPM_INF_BASE) {
  22.613 - 			dev_err(&dev->dev,
  22.614 --				"Could not set IO-ports to 0x%lx\n",
  22.615 --				tpm_inf.base);
  22.616 -+				"Could not set IO-ports to 0x%x\n",
  22.617 -+				TPM_INF_BASE);
  22.618 - 			rc = -EIO;
  22.619 - 			goto err_release_region;
  22.620 - 		}
  22.621 -@@ -466,15 +467,15 @@ static int __devinit tpm_inf_pnp_probe(s
  22.622 - 		outb(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
  22.623 - 
  22.624 - 		/* disable RESET, LP and IRQC */
  22.625 --		outb(RESET_LP_IRQC_DISABLE, tpm_inf.base + CMD);
  22.626 -+		outb(RESET_LP_IRQC_DISABLE, TPM_INF_BASE + CMD);
  22.627 - 
  22.628 - 		/* Finally, we're done, print some infos */
  22.629 - 		dev_info(&dev->dev, "TPM found: "
  22.630 - 			 "config base 0x%x, "
  22.631 - 			 "io base 0x%x, "
  22.632 --			 "chip version %02x%02x, "
  22.633 --			 "vendor id %x%x (Infineon), "
  22.634 --			 "product id %02x%02x"
  22.635 -+			 "chip version 0x%02x%02x, "
  22.636 -+			 "vendor id 0x%x%x (Infineon), "
  22.637 -+			 "product id 0x%02x%02x"
  22.638 - 			 "%s\n",
  22.639 - 			 TPM_INF_ADDR,
  22.640 - 			 TPM_INF_BASE,
  22.641 -@@ -482,11 +483,10 @@ static int __devinit tpm_inf_pnp_probe(s
  22.642 - 			 vendorid[0], vendorid[1],
  22.643 - 			 productid[0], productid[1], chipname);
  22.644 - 
  22.645 --		rc = tpm_register_hardware(&dev->dev, &tpm_inf);
  22.646 --		if (rc < 0) {
  22.647 --			rc = -ENODEV;
  22.648 -+		if (!(chip = tpm_register_hardware(&dev->dev, &tpm_inf))) {
  22.649 - 			goto err_release_region;
  22.650 - 		}
  22.651 -+		chip->vendor.base = TPM_INF_BASE;
  22.652 - 		return 0;
  22.653 - 	} else {
  22.654 - 		rc = -ENODEV;
  22.655 -@@ -494,7 +494,7 @@ static int __devinit tpm_inf_pnp_probe(s
  22.656 - 	}
  22.657 - 
  22.658 - err_release_region:
  22.659 --	release_region(tpm_inf.base, TPM_INF_PORT_LEN);
  22.660 -+	release_region(TPM_INF_BASE, TPM_INF_PORT_LEN);
  22.661 - 	release_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN);
  22.662 - 
  22.663 - err_last:
  22.664 -@@ -506,7 +506,8 @@ static __devexit void tpm_inf_pnp_remove
  22.665 - 	struct tpm_chip *chip = pnp_get_drvdata(dev);
  22.666 - 
  22.667 - 	if (chip) {
  22.668 --		release_region(chip->vendor->base, TPM_INF_PORT_LEN);
  22.669 -+		release_region(TPM_INF_BASE, TPM_INF_PORT_LEN);
  22.670 -+		release_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN);
  22.671 - 		tpm_remove_hardware(chip->dev);
  22.672 - 	}
  22.673 - }
  22.674 -@@ -520,7 +521,7 @@ static struct pnp_driver tpm_inf_pnp = {
  22.675 - 	},
  22.676 - 	.id_table = tpm_pnp_tbl,
  22.677 - 	.probe = tpm_inf_pnp_probe,
  22.678 --	.remove = tpm_inf_pnp_remove,
  22.679 -+	.remove = __devexit_p(tpm_inf_pnp_remove),
  22.680 - };
  22.681 - 
  22.682 - static int __init init_inf(void)
  22.683 -@@ -538,5 +539,5 @@ module_exit(cleanup_inf);
  22.684 - 
  22.685 - MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>");
  22.686 - MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
  22.687 --MODULE_VERSION("1.7");
  22.688 -+MODULE_VERSION("1.8");
  22.689 - MODULE_LICENSE("GPL");
  22.690 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_nsc.c ./drivers/char/tpm/tpm_nsc.c
  22.691 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_nsc.c	2006-09-12 19:02:10.000000000 +0100
  22.692 -+++ ./drivers/char/tpm/tpm_nsc.c	2006-09-19 14:05:52.000000000 +0100
  22.693 -@@ -71,7 +71,7 @@ static int wait_for_stat(struct tpm_chip
  22.694 - 	unsigned long stop;
  22.695 - 
  22.696 - 	/* status immediately available check */
  22.697 --	*data = inb(chip->vendor->base + NSC_STATUS);
  22.698 -+	*data = inb(chip->vendor.base + NSC_STATUS);
  22.699 - 	if ((*data & mask) == val)
  22.700 - 		return 0;
  22.701 - 
  22.702 -@@ -79,7 +79,7 @@ static int wait_for_stat(struct tpm_chip
  22.703 - 	stop = jiffies + 10 * HZ;
  22.704 - 	do {
  22.705 - 		msleep(TPM_TIMEOUT);
  22.706 --		*data = inb(chip->vendor->base + 1);
  22.707 -+		*data = inb(chip->vendor.base + 1);
  22.708 - 		if ((*data & mask) == val)
  22.709 - 			return 0;
  22.710 - 	}
  22.711 -@@ -94,9 +94,9 @@ static int nsc_wait_for_ready(struct tpm
  22.712 - 	unsigned long stop;
  22.713 - 
  22.714 - 	/* status immediately available check */
  22.715 --	status = inb(chip->vendor->base + NSC_STATUS);
  22.716 -+	status = inb(chip->vendor.base + NSC_STATUS);
  22.717 - 	if (status & NSC_STATUS_OBF)
  22.718 --		status = inb(chip->vendor->base + NSC_DATA);
  22.719 -+		status = inb(chip->vendor.base + NSC_DATA);
  22.720 - 	if (status & NSC_STATUS_RDY)
  22.721 - 		return 0;
  22.722 - 
  22.723 -@@ -104,9 +104,9 @@ static int nsc_wait_for_ready(struct tpm
  22.724 - 	stop = jiffies + 100;
  22.725 - 	do {
  22.726 - 		msleep(TPM_TIMEOUT);
  22.727 --		status = inb(chip->vendor->base + NSC_STATUS);
  22.728 -+		status = inb(chip->vendor.base + NSC_STATUS);
  22.729 - 		if (status & NSC_STATUS_OBF)
  22.730 --			status = inb(chip->vendor->base + NSC_DATA);
  22.731 -+			status = inb(chip->vendor.base + NSC_DATA);
  22.732 - 		if (status & NSC_STATUS_RDY)
  22.733 - 			return 0;
  22.734 - 	}
  22.735 -@@ -132,7 +132,7 @@ static int tpm_nsc_recv(struct tpm_chip 
  22.736 - 		return -EIO;
  22.737 - 	}
  22.738 - 	if ((data =
  22.739 --	     inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
  22.740 -+	     inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
  22.741 - 		dev_err(chip->dev, "not in normal mode (0x%x)\n",
  22.742 - 			data);
  22.743 - 		return -EIO;
  22.744 -@@ -148,7 +148,7 @@ static int tpm_nsc_recv(struct tpm_chip 
  22.745 - 		}
  22.746 - 		if (data & NSC_STATUS_F0)
  22.747 - 			break;
  22.748 --		*p = inb(chip->vendor->base + NSC_DATA);
  22.749 -+		*p = inb(chip->vendor.base + NSC_DATA);
  22.750 - 	}
  22.751 - 
  22.752 - 	if ((data & NSC_STATUS_F0) == 0 &&
  22.753 -@@ -156,7 +156,7 @@ static int tpm_nsc_recv(struct tpm_chip 
  22.754 - 		dev_err(chip->dev, "F0 not set\n");
  22.755 - 		return -EIO;
  22.756 - 	}
  22.757 --	if ((data = inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_EOC) {
  22.758 -+	if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) {
  22.759 - 		dev_err(chip->dev,
  22.760 - 			"expected end of command(0x%x)\n", data);
  22.761 - 		return -EIO;
  22.762 -@@ -182,7 +182,7 @@ static int tpm_nsc_send(struct tpm_chip 
  22.763 - 	 * fix it. Not sure why this is needed, we followed the flow
  22.764 - 	 * chart in the manual to the letter.
  22.765 - 	 */
  22.766 --	outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
  22.767 -+	outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND);
  22.768 - 
  22.769 - 	if (nsc_wait_for_ready(chip) != 0)
  22.770 - 		return -EIO;
  22.771 -@@ -192,7 +192,7 @@ static int tpm_nsc_send(struct tpm_chip 
  22.772 - 		return -EIO;
  22.773 - 	}
  22.774 - 
  22.775 --	outb(NSC_COMMAND_NORMAL, chip->vendor->base + NSC_COMMAND);
  22.776 -+	outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND);
  22.777 - 	if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
  22.778 - 		dev_err(chip->dev, "IBR timeout\n");
  22.779 - 		return -EIO;
  22.780 -@@ -204,26 +204,26 @@ static int tpm_nsc_send(struct tpm_chip 
  22.781 - 				"IBF timeout (while writing data)\n");
  22.782 - 			return -EIO;
  22.783 - 		}
  22.784 --		outb(buf[i], chip->vendor->base + NSC_DATA);
  22.785 -+		outb(buf[i], chip->vendor.base + NSC_DATA);
  22.786 - 	}
  22.787 - 
  22.788 - 	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
  22.789 - 		dev_err(chip->dev, "IBF timeout\n");
  22.790 - 		return -EIO;
  22.791 - 	}
  22.792 --	outb(NSC_COMMAND_EOC, chip->vendor->base + NSC_COMMAND);
  22.793 -+	outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND);
  22.794 - 
  22.795 - 	return count;
  22.796 - }
  22.797 - 
  22.798 - static void tpm_nsc_cancel(struct tpm_chip *chip)
  22.799 - {
  22.800 --	outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
  22.801 -+	outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND);
  22.802 - }
  22.803 - 
  22.804 - static u8 tpm_nsc_status(struct tpm_chip *chip)
  22.805 - {
  22.806 --	return inb(chip->vendor->base + NSC_STATUS);
  22.807 -+	return inb(chip->vendor.base + NSC_STATUS);
  22.808 - }
  22.809 - 
  22.810 - static struct file_operations nsc_ops = {
  22.811 -@@ -250,7 +250,7 @@ static struct attribute * nsc_attrs[] = 
  22.812 - 
  22.813 - static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs };
  22.814 - 
  22.815 --static struct tpm_vendor_specific tpm_nsc = {
  22.816 -+static const struct tpm_vendor_specific tpm_nsc = {
  22.817 - 	.recv = tpm_nsc_recv,
  22.818 - 	.send = tpm_nsc_send,
  22.819 - 	.cancel = tpm_nsc_cancel,
  22.820 -@@ -268,7 +268,7 @@ static void __devexit tpm_nsc_remove(str
  22.821 - {
  22.822 - 	struct tpm_chip *chip = dev_get_drvdata(dev);
  22.823 - 	if ( chip ) {
  22.824 --		release_region(chip->vendor->base, 2);
  22.825 -+		release_region(chip->vendor.base, 2);
  22.826 - 		tpm_remove_hardware(chip->dev);
  22.827 - 	}
  22.828 - }
  22.829 -@@ -286,7 +286,8 @@ static int __init init_nsc(void)
  22.830 - 	int rc = 0;
  22.831 - 	int lo, hi;
  22.832 - 	int nscAddrBase = TPM_ADDR;
  22.833 --
  22.834 -+	struct tpm_chip *chip;
  22.835 -+	unsigned long base;
  22.836 - 
  22.837 - 	/* verify that it is a National part (SID) */
  22.838 - 	if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
  22.839 -@@ -300,7 +301,7 @@ static int __init init_nsc(void)
  22.840 - 
  22.841 - 	hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
  22.842 - 	lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
  22.843 --	tpm_nsc.base = (hi<<8) | lo;
  22.844 -+	base = (hi<<8) | lo;
  22.845 - 
  22.846 - 	/* enable the DPM module */
  22.847 - 	tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
  22.848 -@@ -320,13 +321,15 @@ static int __init init_nsc(void)
  22.849 - 	if ((rc = platform_device_register(pdev)) < 0)
  22.850 - 		goto err_free_dev;
  22.851 - 
  22.852 --	if (request_region(tpm_nsc.base, 2, "tpm_nsc0") == NULL ) {
  22.853 -+	if (request_region(base, 2, "tpm_nsc0") == NULL ) {
  22.854 - 		rc = -EBUSY;
  22.855 - 		goto err_unreg_dev;
  22.856 - 	}
  22.857 - 
  22.858 --	if ((rc = tpm_register_hardware(&pdev->dev, &tpm_nsc)) < 0)
  22.859 -+	if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) {
  22.860 -+		rc = -ENODEV;
  22.861 - 		goto err_rel_reg;
  22.862 -+	}
  22.863 - 
  22.864 - 	dev_dbg(&pdev->dev, "NSC TPM detected\n");
  22.865 - 	dev_dbg(&pdev->dev,
  22.866 -@@ -361,10 +364,12 @@ static int __init init_nsc(void)
  22.867 - 		 "NSC TPM revision %d\n",
  22.868 - 		 tpm_read_index(nscAddrBase, 0x27) & 0x1F);
  22.869 - 
  22.870 -+	chip->vendor.base = base;
  22.871 -+
  22.872 - 	return 0;
  22.873 - 
  22.874 - err_rel_reg:
  22.875 --	release_region(tpm_nsc.base, 2);
  22.876 -+	release_region(base, 2);
  22.877 - err_unreg_dev:
  22.878 - 	platform_device_unregister(pdev);
  22.879 - err_free_dev:
  22.880 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_tis.c ./drivers/char/tpm/tpm_tis.c
  22.881 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_tis.c	1970-01-01 01:00:00.000000000 +0100
  22.882 -+++ ./drivers/char/tpm/tpm_tis.c	2006-09-19 14:05:52.000000000 +0100
  22.883 -@@ -0,0 +1,665 @@
  22.884 -+/*
  22.885 -+ * Copyright (C) 2005, 2006 IBM Corporation
  22.886 -+ *
  22.887 -+ * Authors:
  22.888 -+ * Leendert van Doorn <leendert@watson.ibm.com>
  22.889 -+ * Kylene Hall <kjhall@us.ibm.com>
  22.890 -+ *
  22.891 -+ * Device driver for TCG/TCPA TPM (trusted platform module).
  22.892 -+ * Specifications at www.trustedcomputinggroup.org
  22.893 -+ *
  22.894 -+ * This device driver implements the TPM interface as defined in
  22.895 -+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
  22.896 -+ *
  22.897 -+ * This program is free software; you can redistribute it and/or
  22.898 -+ * modify it under the terms of the GNU General Public License as
  22.899 -+ * published by the Free Software Foundation, version 2 of the
  22.900 -+ * License.
  22.901 -+ */
  22.902 -+#include <linux/init.h>
  22.903 -+#include <linux/module.h>
  22.904 -+#include <linux/moduleparam.h>
  22.905 -+#include <linux/pnp.h>
  22.906 -+#include <linux/interrupt.h>
  22.907 -+#include <linux/wait.h>
  22.908 -+#include "tpm.h"
  22.909 -+
  22.910 -+#define TPM_HEADER_SIZE 10
  22.911 -+
  22.912 -+enum tis_access {
  22.913 -+	TPM_ACCESS_VALID = 0x80,
  22.914 -+	TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
  22.915 -+	TPM_ACCESS_REQUEST_PENDING = 0x04,
  22.916 -+	TPM_ACCESS_REQUEST_USE = 0x02,
  22.917 -+};
  22.918 -+
  22.919 -+enum tis_status {
  22.920 -+	TPM_STS_VALID = 0x80,
  22.921 -+	TPM_STS_COMMAND_READY = 0x40,
  22.922 -+	TPM_STS_GO = 0x20,
  22.923 -+	TPM_STS_DATA_AVAIL = 0x10,
  22.924 -+	TPM_STS_DATA_EXPECT = 0x08,
  22.925 -+};
  22.926 -+
  22.927 -+enum tis_int_flags {
  22.928 -+	TPM_GLOBAL_INT_ENABLE = 0x80000000,
  22.929 -+	TPM_INTF_BURST_COUNT_STATIC = 0x100,
  22.930 -+	TPM_INTF_CMD_READY_INT = 0x080,
  22.931 -+	TPM_INTF_INT_EDGE_FALLING = 0x040,
  22.932 -+	TPM_INTF_INT_EDGE_RISING = 0x020,
  22.933 -+	TPM_INTF_INT_LEVEL_LOW = 0x010,
  22.934 -+	TPM_INTF_INT_LEVEL_HIGH = 0x008,
  22.935 -+	TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
  22.936 -+	TPM_INTF_STS_VALID_INT = 0x002,
  22.937 -+	TPM_INTF_DATA_AVAIL_INT = 0x001,
  22.938 -+};
  22.939 -+
  22.940 -+enum tis_defaults {
  22.941 -+	TIS_MEM_BASE = 0xFED40000,
  22.942 -+	TIS_MEM_LEN = 0x5000,
  22.943 -+	TIS_SHORT_TIMEOUT = 750,	/* ms */
  22.944 -+	TIS_LONG_TIMEOUT = 2000,	/* 2 sec */
  22.945 -+};
  22.946 -+
  22.947 -+#define	TPM_ACCESS(l)			(0x0000 | ((l) << 12))
  22.948 -+#define	TPM_INT_ENABLE(l)		(0x0008 | ((l) << 12))
  22.949 -+#define	TPM_INT_VECTOR(l)		(0x000C | ((l) << 12))
  22.950 -+#define	TPM_INT_STATUS(l)		(0x0010 | ((l) << 12))
  22.951 -+#define	TPM_INTF_CAPS(l)		(0x0014 | ((l) << 12))
  22.952 -+#define	TPM_STS(l)			(0x0018 | ((l) << 12))
  22.953 -+#define	TPM_DATA_FIFO(l)		(0x0024 | ((l) << 12))
  22.954 -+
  22.955 -+#define	TPM_DID_VID(l)			(0x0F00 | ((l) << 12))
  22.956 -+#define	TPM_RID(l)			(0x0F04 | ((l) << 12))
  22.957 -+
  22.958 -+static LIST_HEAD(tis_chips);
  22.959 -+static DEFINE_SPINLOCK(tis_lock);
  22.960 -+
  22.961 -+static int check_locality(struct tpm_chip *chip, int l)
  22.962 -+{
  22.963 -+	if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  22.964 -+	     (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
  22.965 -+	    (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
  22.966 -+		return chip->vendor.locality = l;
  22.967 -+
  22.968 -+	return -1;
  22.969 -+}
  22.970 -+
  22.971 -+static void release_locality(struct tpm_chip *chip, int l, int force)
  22.972 -+{
  22.973 -+	if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  22.974 -+		      (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
  22.975 -+	    (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
  22.976 -+		iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
  22.977 -+			 chip->vendor.iobase + TPM_ACCESS(l));
  22.978 -+}
  22.979 -+
  22.980 -+static int request_locality(struct tpm_chip *chip, int l)
  22.981 -+{
  22.982 -+	unsigned long stop;
  22.983 -+	long rc;
  22.984 -+
  22.985 -+	if (check_locality(chip, l) >= 0)
  22.986 -+		return l;
  22.987 -+
  22.988 -+	iowrite8(TPM_ACCESS_REQUEST_USE,
  22.989 -+		 chip->vendor.iobase + TPM_ACCESS(l));
  22.990 -+
  22.991 -+	if (chip->vendor.irq) {
  22.992 -+		rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
  22.993 -+						      (check_locality
  22.994 -+						       (chip, l) >= 0),
  22.995 -+						      chip->vendor.timeout_a);
  22.996 -+		if (rc > 0)
  22.997 -+			return l;
  22.998 -+
  22.999 -+	} else {
 22.1000 -+		/* wait for burstcount */
 22.1001 -+		stop = jiffies + chip->vendor.timeout_a;
 22.1002 -+		do {
 22.1003 -+			if (check_locality(chip, l) >= 0)
 22.1004 -+				return l;
 22.1005 -+			msleep(TPM_TIMEOUT);
 22.1006 -+		}
 22.1007 -+		while (time_before(jiffies, stop));
 22.1008 -+	}
 22.1009 -+	return -1;
 22.1010 -+}
 22.1011 -+
 22.1012 -+static u8 tpm_tis_status(struct tpm_chip *chip)
 22.1013 -+{
 22.1014 -+	return ioread8(chip->vendor.iobase +
 22.1015 -+		       TPM_STS(chip->vendor.locality));
 22.1016 -+}
 22.1017 -+
 22.1018 -+static void tpm_tis_ready(struct tpm_chip *chip)
 22.1019 -+{
 22.1020 -+	/* this causes the current command to be aborted */
 22.1021 -+	iowrite8(TPM_STS_COMMAND_READY,
 22.1022 -+		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
 22.1023 -+}
 22.1024 -+
 22.1025 -+static int get_burstcount(struct tpm_chip *chip)
 22.1026 -+{
 22.1027 -+	unsigned long stop;
 22.1028 -+	int burstcnt;
 22.1029 -+
 22.1030 -+	/* wait for burstcount */
 22.1031 -+	/* which timeout value, spec has 2 answers (c & d) */
 22.1032 -+	stop = jiffies + chip->vendor.timeout_d;
 22.1033 -+	do {
 22.1034 -+		burstcnt = ioread8(chip->vendor.iobase +
 22.1035 -+				   TPM_STS(chip->vendor.locality) + 1);
 22.1036 -+		burstcnt += ioread8(chip->vendor.iobase +
 22.1037 -+				    TPM_STS(chip->vendor.locality) +
 22.1038 -+				    2) << 8;
 22.1039 -+		if (burstcnt)
 22.1040 -+			return burstcnt;
 22.1041 -+		msleep(TPM_TIMEOUT);
 22.1042 -+	} while (time_before(jiffies, stop));
 22.1043 -+	return -EBUSY;
 22.1044 -+}
 22.1045 -+
 22.1046 -+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
 22.1047 -+			 wait_queue_head_t *queue)
 22.1048 -+{
 22.1049 -+	unsigned long stop;
 22.1050 -+	long rc;
 22.1051 -+	u8 status;
 22.1052 -+
 22.1053 -+	/* check current status */
 22.1054 -+	status = tpm_tis_status(chip);
 22.1055 -+	if ((status & mask) == mask)
 22.1056 -+		return 0;
 22.1057 -+
 22.1058 -+	if (chip->vendor.irq) {
 22.1059 -+		rc = wait_event_interruptible_timeout(*queue,
 22.1060 -+						      ((tpm_tis_status
 22.1061 -+							(chip) & mask) ==
 22.1062 -+						       mask), timeout);
 22.1063 -+		if (rc > 0)
 22.1064 -+			return 0;
 22.1065 -+	} else {
 22.1066 -+		stop = jiffies + timeout;
 22.1067 -+		do {
 22.1068 -+			msleep(TPM_TIMEOUT);
 22.1069 -+			status = tpm_tis_status(chip);
 22.1070 -+			if ((status & mask) == mask)
 22.1071 -+				return 0;
 22.1072 -+		} while (time_before(jiffies, stop));
 22.1073 -+	}
 22.1074 -+	return -ETIME;
 22.1075 -+}
 22.1076 -+
 22.1077 -+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
 22.1078 -+{
 22.1079 -+	int size = 0, burstcnt;
 22.1080 -+	while (size < count &&
 22.1081 -+	       wait_for_stat(chip,
 22.1082 -+			     TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 22.1083 -+			     chip->vendor.timeout_c,
 22.1084 -+			     &chip->vendor.read_queue)
 22.1085 -+	       == 0) {
 22.1086 -+		burstcnt = get_burstcount(chip);
 22.1087 -+		for (; burstcnt > 0 && size < count; burstcnt--)
 22.1088 -+			buf[size++] = ioread8(chip->vendor.iobase +
 22.1089 -+					      TPM_DATA_FIFO(chip->vendor.
 22.1090 -+							    locality));
 22.1091 -+	}
 22.1092 -+	return size;
 22.1093 -+}
 22.1094 -+
 22.1095 -+static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 22.1096 -+{
 22.1097 -+	int size = 0;
 22.1098 -+	int expected, status;
 22.1099 -+
 22.1100 -+	if (count < TPM_HEADER_SIZE) {
 22.1101 -+		size = -EIO;
 22.1102 -+		goto out;
 22.1103 -+	}
 22.1104 -+
 22.1105 -+	/* read first 10 bytes, including tag, paramsize, and result */
 22.1106 -+	if ((size =
 22.1107 -+	     recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
 22.1108 -+		dev_err(chip->dev, "Unable to read header\n");
 22.1109 -+		goto out;
 22.1110 -+	}
 22.1111 -+
 22.1112 -+	expected = be32_to_cpu(*(__be32 *) (buf + 2));
 22.1113 -+	if (expected > count) {
 22.1114 -+		size = -EIO;
 22.1115 -+		goto out;
 22.1116 -+	}
 22.1117 -+
 22.1118 -+	if ((size +=
 22.1119 -+	     recv_data(chip, &buf[TPM_HEADER_SIZE],
 22.1120 -+		       expected - TPM_HEADER_SIZE)) < expected) {
 22.1121 -+		dev_err(chip->dev, "Unable to read remainder of result\n");
 22.1122 -+		size = -ETIME;
 22.1123 -+		goto out;
 22.1124 -+	}
 22.1125 -+
 22.1126 -+	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
 22.1127 -+		      &chip->vendor.int_queue);
 22.1128 -+	status = tpm_tis_status(chip);
 22.1129 -+	if (status & TPM_STS_DATA_AVAIL) {	/* retry? */
 22.1130 -+		dev_err(chip->dev, "Error left over data\n");
 22.1131 -+		size = -EIO;
 22.1132 -+		goto out;
 22.1133 -+	}
 22.1134 -+
 22.1135 -+out:
 22.1136 -+	tpm_tis_ready(chip);
 22.1137 -+	release_locality(chip, chip->vendor.locality, 0);
 22.1138 -+	return size;
 22.1139 -+}
 22.1140 -+
 22.1141 -+/*
 22.1142 -+ * If interrupts are used (signaled by an irq set in the vendor structure)
 22.1143 -+ * tpm.c can skip polling for the data to be available as the interrupt is
 22.1144 -+ * waited for here
 22.1145 -+ */
 22.1146 -+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
 22.1147 -+{
 22.1148 -+	int rc, status, burstcnt;
 22.1149 -+	size_t count = 0;
 22.1150 -+	u32 ordinal;
 22.1151 -+
 22.1152 -+	if (request_locality(chip, 0) < 0)
 22.1153 -+		return -EBUSY;
 22.1154 -+
 22.1155 -+	status = tpm_tis_status(chip);
 22.1156 -+	if ((status & TPM_STS_COMMAND_READY) == 0) {
 22.1157 -+		tpm_tis_ready(chip);
 22.1158 -+		if (wait_for_stat
 22.1159 -+		    (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
 22.1160 -+		     &chip->vendor.int_queue) < 0) {
 22.1161 -+			rc = -ETIME;
 22.1162 -+			goto out_err;
 22.1163 -+		}
 22.1164 -+	}
 22.1165 -+
 22.1166 -+	while (count < len - 1) {
 22.1167 -+		burstcnt = get_burstcount(chip);
 22.1168 -+		for (; burstcnt > 0 && count < len - 1; burstcnt--) {
 22.1169 -+			iowrite8(buf[count], chip->vendor.iobase +
 22.1170 -+				 TPM_DATA_FIFO(chip->vendor.locality));
 22.1171 -+			count++;
 22.1172 -+		}
 22.1173 -+
 22.1174 -+		wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
 22.1175 -+			      &chip->vendor.int_queue);
 22.1176 -+		status = tpm_tis_status(chip);
 22.1177 -+		if ((status & TPM_STS_DATA_EXPECT) == 0) {
 22.1178 -+			rc = -EIO;
 22.1179 -+			goto out_err;
 22.1180 -+		}
 22.1181 -+	}
 22.1182 -+
 22.1183 -+	/* write last byte */
 22.1184 -+	iowrite8(buf[count],
 22.1185 -+		 chip->vendor.iobase +
 22.1186 -+		 TPM_DATA_FIFO(chip->vendor.locality));
 22.1187 -+	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
 22.1188 -+		      &chip->vendor.int_queue);
 22.1189 -+	status = tpm_tis_status(chip);
 22.1190 -+	if ((status & TPM_STS_DATA_EXPECT) != 0) {
 22.1191 -+		rc = -EIO;
 22.1192 -+		goto out_err;
 22.1193 -+	}
 22.1194 -+
 22.1195 -+	/* go and do it */
 22.1196 -+	iowrite8(TPM_STS_GO,
 22.1197 -+		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
 22.1198 -+
 22.1199 -+	if (chip->vendor.irq) {
 22.1200 -+		ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
 22.1201 -+		if (wait_for_stat
 22.1202 -+		    (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 22.1203 -+		     tpm_calc_ordinal_duration(chip, ordinal),
 22.1204 -+		     &chip->vendor.read_queue) < 0) {
 22.1205 -+			rc = -ETIME;
 22.1206 -+			goto out_err;
 22.1207 -+		}
 22.1208 -+	}
 22.1209 -+	return len;
 22.1210 -+out_err:
 22.1211 -+	tpm_tis_ready(chip);
 22.1212 -+	release_locality(chip, chip->vendor.locality, 0);
 22.1213 -+	return rc;
 22.1214 -+}
 22.1215 -+
 22.1216 -+static struct file_operations tis_ops = {
 22.1217 -+	.owner = THIS_MODULE,
 22.1218 -+	.llseek = no_llseek,
 22.1219 -+	.open = tpm_open,
 22.1220 -+	.read = tpm_read,
 22.1221 -+	.write = tpm_write,
 22.1222 -+	.release = tpm_release,
 22.1223 -+};
 22.1224 -+
 22.1225 -+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
 22.1226 -+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
 22.1227 -+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
 22.1228 -+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
 22.1229 -+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
 22.1230 -+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
 22.1231 -+		   NULL);
 22.1232 -+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
 22.1233 -+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
 22.1234 -+
 22.1235 -+static struct attribute *tis_attrs[] = {
 22.1236 -+	&dev_attr_pubek.attr,
 22.1237 -+	&dev_attr_pcrs.attr,
 22.1238 -+	&dev_attr_enabled.attr,
 22.1239 -+	&dev_attr_active.attr,
 22.1240 -+	&dev_attr_owned.attr,
 22.1241 -+	&dev_attr_temp_deactivated.attr,
 22.1242 -+	&dev_attr_caps.attr,
 22.1243 -+	&dev_attr_cancel.attr, NULL,
 22.1244 -+};
 22.1245 -+
 22.1246 -+static struct attribute_group tis_attr_grp = {
 22.1247 -+	.attrs = tis_attrs
 22.1248 -+};
 22.1249 -+
 22.1250 -+static struct tpm_vendor_specific tpm_tis = {
 22.1251 -+	.status = tpm_tis_status,
 22.1252 -+	.recv = tpm_tis_recv,
 22.1253 -+	.send = tpm_tis_send,
 22.1254 -+	.cancel = tpm_tis_ready,
 22.1255 -+	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 22.1256 -+	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 22.1257 -+	.req_canceled = TPM_STS_COMMAND_READY,
 22.1258 -+	.attr_group = &tis_attr_grp,
 22.1259 -+	.miscdev = {
 22.1260 -+		    .fops = &tis_ops,},
 22.1261 -+};
 22.1262 -+
 22.1263 -+static irqreturn_t tis_int_probe(int irq, void *dev_id, struct pt_regs *regs)
 22.1264 -+{
 22.1265 -+	struct tpm_chip *chip = (struct tpm_chip *) dev_id;
 22.1266 -+	u32 interrupt;
 22.1267 -+
 22.1268 -+	interrupt = ioread32(chip->vendor.iobase +
 22.1269 -+			     TPM_INT_STATUS(chip->vendor.locality));
 22.1270 -+
 22.1271 -+	if (interrupt == 0)
 22.1272 -+		return IRQ_NONE;
 22.1273 -+
 22.1274 -+	chip->vendor.irq = irq;
 22.1275 -+
 22.1276 -+	/* Clear interrupts handled with TPM_EOI */
 22.1277 -+	iowrite32(interrupt,
 22.1278 -+		  chip->vendor.iobase +
 22.1279 -+		  TPM_INT_STATUS(chip->vendor.locality));
 22.1280 -+	return IRQ_HANDLED;
 22.1281 -+}
 22.1282 -+
 22.1283 -+static irqreturn_t tis_int_handler(int irq, void *dev_id, struct pt_regs *regs)
 22.1284 -+{
 22.1285 -+	struct tpm_chip *chip = (struct tpm_chip *) dev_id;
 22.1286 -+	u32 interrupt;
 22.1287 -+	int i;
 22.1288 -+
 22.1289 -+	interrupt = ioread32(chip->vendor.iobase +
 22.1290 -+			     TPM_INT_STATUS(chip->vendor.locality));
 22.1291 -+
 22.1292 -+	if (interrupt == 0)
 22.1293 -+		return IRQ_NONE;
 22.1294 -+
 22.1295 -+	if (interrupt & TPM_INTF_DATA_AVAIL_INT)
 22.1296 -+		wake_up_interruptible(&chip->vendor.read_queue);
 22.1297 -+	if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
 22.1298 -+		for (i = 0; i < 5; i++)
 22.1299 -+			if (check_locality(chip, i) >= 0)
 22.1300 -+				break;
 22.1301 -+	if (interrupt &
 22.1302 -+	    (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
 22.1303 -+	     TPM_INTF_CMD_READY_INT))
 22.1304 -+		wake_up_interruptible(&chip->vendor.int_queue);
 22.1305 -+
 22.1306 -+	/* Clear interrupts handled with TPM_EOI */
 22.1307 -+	iowrite32(interrupt,
 22.1308 -+		  chip->vendor.iobase +
 22.1309 -+		  TPM_INT_STATUS(chip->vendor.locality));
 22.1310 -+	return IRQ_HANDLED;
 22.1311 -+}
 22.1312 -+
 22.1313 -+static int interrupts = 1;
 22.1314 -+module_param(interrupts, bool, 0444);
 22.1315 -+MODULE_PARM_DESC(interrupts, "Enable interrupts");
 22.1316 -+
 22.1317 -+static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
 22.1318 -+				      const struct pnp_device_id *pnp_id)
 22.1319 -+{
 22.1320 -+	u32 vendor, intfcaps, intmask;
 22.1321 -+	int rc, i;
 22.1322 -+	unsigned long start, len;
 22.1323 -+	struct tpm_chip *chip;
 22.1324 -+
 22.1325 -+	start = pnp_mem_start(pnp_dev, 0);
 22.1326 -+	len = pnp_mem_len(pnp_dev, 0);
 22.1327 -+
 22.1328 -+	if (!start)
 22.1329 -+		start = TIS_MEM_BASE;
 22.1330 -+	if (!len)
 22.1331 -+		len = TIS_MEM_LEN;
 22.1332 -+
 22.1333 -+	if (!(chip = tpm_register_hardware(&pnp_dev->dev, &tpm_tis)))
 22.1334 -+		return -ENODEV;
 22.1335 -+
 22.1336 -+	chip->vendor.iobase = ioremap(start, len);
 22.1337 -+	if (!chip->vendor.iobase) {
 22.1338 -+		rc = -EIO;
 22.1339 -+		goto out_err;
 22.1340 -+	}
 22.1341 -+
 22.1342 -+	vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
 22.1343 -+
 22.1344 -+	/* Default timeouts */
 22.1345 -+	chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
 22.1346 -+	chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
 22.1347 -+	chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
 22.1348 -+	chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
 22.1349 -+
 22.1350 -+	dev_info(&pnp_dev->dev,
 22.1351 -+		 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
 22.1352 -+		 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
 22.1353 -+
 22.1354 -+	/* Figure out the capabilities */
 22.1355 -+	intfcaps =
 22.1356 -+	    ioread32(chip->vendor.iobase +
 22.1357 -+		     TPM_INTF_CAPS(chip->vendor.locality));
 22.1358 -+	dev_dbg(&pnp_dev->dev, "TPM interface capabilities (0x%x):\n",
 22.1359 -+		intfcaps);
 22.1360 -+	if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
 22.1361 -+		dev_dbg(&pnp_dev->dev, "\tBurst Count Static\n");
 22.1362 -+	if (intfcaps & TPM_INTF_CMD_READY_INT)
 22.1363 -+		dev_dbg(&pnp_dev->dev, "\tCommand Ready Int Support\n");
 22.1364 -+	if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
 22.1365 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Edge Falling\n");
 22.1366 -+	if (intfcaps & TPM_INTF_INT_EDGE_RISING)
 22.1367 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Edge Rising\n");
 22.1368 -+	if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
 22.1369 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Level Low\n");
 22.1370 -+	if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
 22.1371 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Level High\n");
 22.1372 -+	if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
 22.1373 -+		dev_dbg(&pnp_dev->dev, "\tLocality Change Int Support\n");
 22.1374 -+	if (intfcaps & TPM_INTF_STS_VALID_INT)
 22.1375 -+		dev_dbg(&pnp_dev->dev, "\tSts Valid Int Support\n");
 22.1376 -+	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
 22.1377 -+		dev_dbg(&pnp_dev->dev, "\tData Avail Int Support\n");
 22.1378 -+
 22.1379 -+	if (request_locality(chip, 0) != 0) {
 22.1380 -+		rc = -ENODEV;
 22.1381 -+		goto out_err;
 22.1382 -+	}
 22.1383 -+
 22.1384 -+	/* INTERRUPT Setup */
 22.1385 -+	init_waitqueue_head(&chip->vendor.read_queue);
 22.1386 -+	init_waitqueue_head(&chip->vendor.int_queue);
 22.1387 -+
 22.1388 -+	intmask =
 22.1389 -+	    ioread32(chip->vendor.iobase +
 22.1390 -+		     TPM_INT_ENABLE(chip->vendor.locality));
 22.1391 -+
 22.1392 -+	intmask |= TPM_INTF_CMD_READY_INT
 22.1393 -+	    | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
 22.1394 -+	    | TPM_INTF_STS_VALID_INT;
 22.1395 -+
 22.1396 -+	iowrite32(intmask,
 22.1397 -+		  chip->vendor.iobase +
 22.1398 -+		  TPM_INT_ENABLE(chip->vendor.locality));
 22.1399 -+	if (interrupts) {
 22.1400 -+		chip->vendor.irq =
 22.1401 -+		    ioread8(chip->vendor.iobase +
 22.1402 -+			    TPM_INT_VECTOR(chip->vendor.locality));
 22.1403 -+
 22.1404 -+		for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
 22.1405 -+			iowrite8(i, chip->vendor.iobase +
 22.1406 -+				    TPM_INT_VECTOR(chip->vendor.locality));
 22.1407 -+			if (request_irq
 22.1408 -+			    (i, tis_int_probe, SA_SHIRQ,
 22.1409 -+			     chip->vendor.miscdev.name, chip) != 0) {
 22.1410 -+				dev_info(chip->dev,
 22.1411 -+					 "Unable to request irq: %d for probe\n",
 22.1412 -+					 i);
 22.1413 -+				continue;
 22.1414 -+			}
 22.1415 -+
 22.1416 -+			/* Clear all existing */
 22.1417 -+			iowrite32(ioread32
 22.1418 -+				  (chip->vendor.iobase +
 22.1419 -+				   TPM_INT_STATUS(chip->vendor.locality)),
 22.1420 -+				  chip->vendor.iobase +
 22.1421 -+				  TPM_INT_STATUS(chip->vendor.locality));
 22.1422 -+
 22.1423 -+			/* Turn on */
 22.1424 -+			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
 22.1425 -+				  chip->vendor.iobase +
 22.1426 -+				  TPM_INT_ENABLE(chip->vendor.locality));
 22.1427 -+
 22.1428 -+			/* Generate Interrupts */
 22.1429 -+			tpm_gen_interrupt(chip);
 22.1430 -+
 22.1431 -+			/* Turn off */
 22.1432 -+			iowrite32(intmask,
 22.1433 -+				  chip->vendor.iobase +
 22.1434 -+				  TPM_INT_ENABLE(chip->vendor.locality));
 22.1435 -+			free_irq(i, chip);
 22.1436 -+		}
 22.1437 -+	}
 22.1438 -+	if (chip->vendor.irq) {
 22.1439 -+		iowrite8(chip->vendor.irq,
 22.1440 -+			 chip->vendor.iobase +
 22.1441 -+			 TPM_INT_VECTOR(chip->vendor.locality));
 22.1442 -+		if (request_irq
 22.1443 -+		    (chip->vendor.irq, tis_int_handler, SA_SHIRQ,
 22.1444 -+		     chip->vendor.miscdev.name, chip) != 0) {
 22.1445 -+			dev_info(chip->dev,
 22.1446 -+				 "Unable to request irq: %d for use\n",
 22.1447 -+				 chip->vendor.irq);
 22.1448 -+			chip->vendor.irq = 0;
 22.1449 -+		} else {
 22.1450 -+			/* Clear all existing */
 22.1451 -+			iowrite32(ioread32
 22.1452 -+				  (chip->vendor.iobase +
 22.1453 -+				   TPM_INT_STATUS(chip->vendor.locality)),
 22.1454 -+				  chip->vendor.iobase +
 22.1455 -+				  TPM_INT_STATUS(chip->vendor.locality));
 22.1456 -+
 22.1457 -+			/* Turn on */
 22.1458 -+			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
 22.1459 -+				  chip->vendor.iobase +
 22.1460 -+				  TPM_INT_ENABLE(chip->vendor.locality));
 22.1461 -+		}
 22.1462 -+	}
 22.1463 -+
 22.1464 -+	INIT_LIST_HEAD(&chip->vendor.list);
 22.1465 -+	spin_lock(&tis_lock);
 22.1466 -+	list_add(&chip->vendor.list, &tis_chips);
 22.1467 -+	spin_unlock(&tis_lock);
 22.1468 -+
 22.1469 -+	tpm_get_timeouts(chip);
 22.1470 -+	tpm_continue_selftest(chip);
 22.1471 -+
 22.1472 -+	return 0;
 22.1473 -+out_err:
 22.1474 -+	if (chip->vendor.iobase)
 22.1475 -+		iounmap(chip->vendor.iobase);
 22.1476 -+	tpm_remove_hardware(chip->dev);
 22.1477 -+	return rc;
 22.1478 -+}
 22.1479 -+
 22.1480 -+static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
 22.1481 -+{
 22.1482 -+	return tpm_pm_suspend(&dev->dev, msg);
 22.1483 -+}
 22.1484 -+
 22.1485 -+static int tpm_tis_pnp_resume(struct pnp_dev *dev)
 22.1486 -+{
 22.1487 -+	return tpm_pm_resume(&dev->dev);
 22.1488 -+}
 22.1489 -+
 22.1490 -+static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
 22.1491 -+	{"PNP0C31", 0},		/* TPM */
 22.1492 -+	{"ATM1200", 0},		/* Atmel */
 22.1493 -+	{"IFX0102", 0},		/* Infineon */
 22.1494 -+	{"BCM0101", 0},		/* Broadcom */
 22.1495 -+	{"NSC1200", 0},		/* National */
 22.1496 -+	/* Add new here */
 22.1497 -+	{"", 0},		/* User Specified */
 22.1498 -+	{"", 0}			/* Terminator */
 22.1499 -+};
 22.1500 -+
 22.1501 -+static struct pnp_driver tis_pnp_driver = {
 22.1502 -+	.name = "tpm_tis",
 22.1503 -+	.id_table = tpm_pnp_tbl,
 22.1504 -+	.probe = tpm_tis_pnp_init,
 22.1505 -+	.suspend = tpm_tis_pnp_suspend,
 22.1506 -+	.resume = tpm_tis_pnp_resume,
 22.1507 -+};
 22.1508 -+
 22.1509 -+#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
 22.1510 -+module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
 22.1511 -+		    sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
 22.1512 -+MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
 22.1513 -+
 22.1514 -+static int __init init_tis(void)
 22.1515 -+{
 22.1516 -+	return pnp_register_driver(&tis_pnp_driver);
 22.1517 -+}
 22.1518 -+
 22.1519 -+static void __exit cleanup_tis(void)
 22.1520 -+{
 22.1521 -+	struct tpm_vendor_specific *i, *j;
 22.1522 -+	struct tpm_chip *chip;
 22.1523 -+	spin_lock(&tis_lock);
 22.1524 -+	list_for_each_entry_safe(i, j, &tis_chips, list) {
 22.1525 -+		chip = to_tpm_chip(i);
 22.1526 -+		iowrite32(~TPM_GLOBAL_INT_ENABLE &
 22.1527 -+			  ioread32(chip->vendor.iobase +
 22.1528 -+				   TPM_INT_ENABLE(chip->vendor.
 22.1529 -+						  locality)),
 22.1530 -+			  chip->vendor.iobase +
 22.1531 -+			  TPM_INT_ENABLE(chip->vendor.locality));
 22.1532 -+		release_locality(chip, chip->vendor.locality, 1);
 22.1533 -+		if (chip->vendor.irq)
 22.1534 -+			free_irq(chip->vendor.irq, chip);
 22.1535 -+		iounmap(i->iobase);
 22.1536 -+		list_del(&i->list);
 22.1537 -+		tpm_remove_hardware(chip->dev);
 22.1538 -+	}
 22.1539 -+	spin_unlock(&tis_lock);
 22.1540 -+	pnp_unregister_driver(&tis_pnp_driver);
 22.1541 -+}
 22.1542 -+
 22.1543 -+module_init(init_tis);
 22.1544 -+module_exit(cleanup_tis);
 22.1545 -+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
 22.1546 -+MODULE_DESCRIPTION("TPM Driver");
 22.1547 -+MODULE_VERSION("2.0");
 22.1548 -+MODULE_LICENSE("GPL");
    23.1 --- a/patches/linux-2.6.16.30/x86-elfnote-as-preprocessor-macro.patch	Mon Nov 27 13:50:01 2006 +0000
    23.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.3 @@ -1,43 +0,0 @@
    23.4 -diff -pruN ../orig-linux-2.6.16.29/include/linux/elfnote.h ./include/linux/elfnote.h
    23.5 ---- ../orig-linux-2.6.16.29/include/linux/elfnote.h	2006-09-19 14:06:10.000000000 +0100
    23.6 -+++ ./include/linux/elfnote.h	2006-09-19 14:06:20.000000000 +0100
    23.7 -@@ -31,22 +31,24 @@
    23.8 - /*
    23.9 -  * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
   23.10 -  * turn out to be the same size and shape), followed by the name and
   23.11 -- * desc data with appropriate padding.  The 'desc' argument includes
   23.12 -- * the assembler pseudo op defining the type of the data: .asciz
   23.13 -- * "hello, world"
   23.14 -+ * desc data with appropriate padding.  The 'desctype' argument is the
   23.15 -+ * assembler pseudo op defining the type of the data e.g. .asciz while
   23.16 -+ * 'descdata' is the data itself e.g.  "hello, world".
   23.17 -+ *
   23.18 -+ * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
   23.19 -+ *      ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
   23.20 -  */
   23.21 --.macro ELFNOTE name type desc:vararg
   23.22 --.pushsection ".note.\name"
   23.23 --  .align 4
   23.24 --  .long 2f - 1f			/* namesz */
   23.25 --  .long 4f - 3f			/* descsz */
   23.26 --  .long \type
   23.27 --1:.asciz "\name"
   23.28 --2:.align 4
   23.29 --3:\desc
   23.30 --4:.align 4
   23.31 --.popsection
   23.32 --.endm
   23.33 -+#define ELFNOTE(name, type, desctype, descdata)	\
   23.34 -+.pushsection .note.name			;	\
   23.35 -+  .align 4				;	\
   23.36 -+  .long 2f - 1f		/* namesz */	;	\
   23.37 -+  .long 4f - 3f		/* descsz */	;	\
   23.38 -+  .long type				;	\
   23.39 -+1:.asciz "name"				;	\
   23.40 -+2:.align 4				;	\
   23.41 -+3:desctype descdata			;	\
   23.42 -+4:.align 4				;	\
   23.43 -+.popsection				;
   23.44 - #else	/* !__ASSEMBLER__ */
   23.45 - #include <linux/elf.h>
   23.46 - /*
    24.1 --- a/patches/linux-2.6.16.30/x86-increase-interrupt-vector-range.patch	Mon Nov 27 13:50:01 2006 +0000
    24.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.3 @@ -1,89 +0,0 @@
    24.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/entry.S ./arch/i386/kernel/entry.S
    24.5 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/entry.S	2006-09-19 14:05:44.000000000 +0100
    24.6 -+++ ./arch/i386/kernel/entry.S	2006-09-19 14:05:56.000000000 +0100
    24.7 -@@ -406,7 +406,7 @@ vector=0
    24.8 - ENTRY(irq_entries_start)
    24.9 - .rept NR_IRQS
   24.10 - 	ALIGN
   24.11 --1:	pushl $vector-256
   24.12 -+1:	pushl $~(vector)
   24.13 - 	jmp common_interrupt
   24.14 - .data
   24.15 - 	.long 1b
   24.16 -@@ -423,7 +423,7 @@ common_interrupt:
   24.17 - 
   24.18 - #define BUILD_INTERRUPT(name, nr)	\
   24.19 - ENTRY(name)				\
   24.20 --	pushl $nr-256;			\
   24.21 -+	pushl $~(nr);			\
   24.22 - 	SAVE_ALL			\
   24.23 - 	movl %esp,%eax;			\
   24.24 - 	call smp_/**/name;		\
   24.25 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/irq.c ./arch/i386/kernel/irq.c
   24.26 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/irq.c	2006-09-12 19:02:10.000000000 +0100
   24.27 -+++ ./arch/i386/kernel/irq.c	2006-09-19 14:05:56.000000000 +0100
   24.28 -@@ -53,8 +53,8 @@ static union irq_ctx *softirq_ctx[NR_CPU
   24.29 -  */
   24.30 - fastcall unsigned int do_IRQ(struct pt_regs *regs)
   24.31 - {	
   24.32 --	/* high bits used in ret_from_ code */
   24.33 --	int irq = regs->orig_eax & 0xff;
   24.34 -+	/* high bit used in ret_from_ code */
   24.35 -+	int irq = ~regs->orig_eax;
   24.36 - #ifdef CONFIG_4KSTACKS
   24.37 - 	union irq_ctx *curctx, *irqctx;
   24.38 - 	u32 *isp;
   24.39 -diff -pruN ../orig-linux-2.6.16.29/arch/x86_64/kernel/entry.S ./arch/x86_64/kernel/entry.S
   24.40 ---- ../orig-linux-2.6.16.29/arch/x86_64/kernel/entry.S	2006-09-12 19:02:10.000000000 +0100
   24.41 -+++ ./arch/x86_64/kernel/entry.S	2006-09-19 14:05:56.000000000 +0100
   24.42 -@@ -596,7 +596,7 @@ retint_kernel:	
   24.43 -  */		
   24.44 - 	.macro apicinterrupt num,func
   24.45 - 	INTR_FRAME
   24.46 --	pushq $\num-256
   24.47 -+	pushq $~(\num)
   24.48 - 	CFI_ADJUST_CFA_OFFSET 8
   24.49 - 	interrupt \func
   24.50 - 	jmp ret_from_intr
   24.51 -diff -pruN ../orig-linux-2.6.16.29/arch/x86_64/kernel/irq.c ./arch/x86_64/kernel/irq.c
   24.52 ---- ../orig-linux-2.6.16.29/arch/x86_64/kernel/irq.c	2006-09-12 19:02:10.000000000 +0100
   24.53 -+++ ./arch/x86_64/kernel/irq.c	2006-09-19 14:05:56.000000000 +0100
   24.54 -@@ -96,8 +96,8 @@ skip:
   24.55 -  */
   24.56 - asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
   24.57 - {	
   24.58 --	/* high bits used in ret_from_ code  */
   24.59 --	unsigned irq = regs->orig_rax & 0xff;
   24.60 -+	/* high bit used in ret_from_ code  */
   24.61 -+	unsigned irq = ~regs->orig_rax;
   24.62 - 
   24.63 - 	exit_idle();
   24.64 - 	irq_enter();
   24.65 -diff -pruN ../orig-linux-2.6.16.29/arch/x86_64/kernel/smp.c ./arch/x86_64/kernel/smp.c
   24.66 ---- ../orig-linux-2.6.16.29/arch/x86_64/kernel/smp.c	2006-09-12 19:02:10.000000000 +0100
   24.67 -+++ ./arch/x86_64/kernel/smp.c	2006-09-19 14:05:56.000000000 +0100
   24.68 -@@ -135,10 +135,10 @@ asmlinkage void smp_invalidate_interrupt
   24.69 - 
   24.70 - 	cpu = smp_processor_id();
   24.71 - 	/*
   24.72 --	 * orig_rax contains the interrupt vector - 256.
   24.73 -+	 * orig_rax contains the negated interrupt vector.
   24.74 - 	 * Use that to determine where the sender put the data.
   24.75 - 	 */
   24.76 --	sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
   24.77 -+	sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
   24.78 - 	f = &per_cpu(flush_state, sender);
   24.79 - 
   24.80 - 	if (!cpu_isset(cpu, f->flush_cpumask))
   24.81 -diff -pruN ../orig-linux-2.6.16.29/include/asm-x86_64/hw_irq.h ./include/asm-x86_64/hw_irq.h
   24.82 ---- ../orig-linux-2.6.16.29/include/asm-x86_64/hw_irq.h	2006-09-12 19:02:10.000000000 +0100
   24.83 -+++ ./include/asm-x86_64/hw_irq.h	2006-09-19 14:05:56.000000000 +0100
   24.84 -@@ -127,7 +127,7 @@ asmlinkage void IRQ_NAME(nr); \
   24.85 - __asm__( \
   24.86 - "\n.p2align\n" \
   24.87 - "IRQ" #nr "_interrupt:\n\t" \
   24.88 --	"push $" #nr "-256 ; " \
   24.89 -+	"push $~(" #nr ") ; " \
   24.90 - 	"jmp common_interrupt");
   24.91 - 
   24.92 - #if defined(CONFIG_X86_IO_APIC)
    25.1 --- a/patches/linux-2.6.16.30/x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch	Mon Nov 27 13:50:01 2006 +0000
    25.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.3 @@ -1,143 +0,0 @@
    25.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/vmlinux.lds.S ./arch/i386/kernel/vmlinux.lds.S
    25.5 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/vmlinux.lds.S	2006-09-19 14:05:48.000000000 +0100
    25.6 -+++ ./arch/i386/kernel/vmlinux.lds.S	2006-09-19 14:06:10.000000000 +0100
    25.7 -@@ -12,6 +12,12 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386"
    25.8 - OUTPUT_ARCH(i386)
    25.9 - ENTRY(phys_startup_32)
   25.10 - jiffies = jiffies_64;
   25.11 -+
   25.12 -+PHDRS {
   25.13 -+	text PT_LOAD FLAGS(5);	/* R_E */
   25.14 -+	data PT_LOAD FLAGS(7);	/* RWE */
   25.15 -+	note PT_NOTE FLAGS(4);	/* R__ */
   25.16 -+}
   25.17 - SECTIONS
   25.18 - {
   25.19 -   . = __KERNEL_START;
   25.20 -@@ -25,7 +31,7 @@ SECTIONS
   25.21 - 	KPROBES_TEXT
   25.22 - 	*(.fixup)
   25.23 - 	*(.gnu.warning)
   25.24 --	} = 0x9090
   25.25 -+	} :text = 0x9090
   25.26 - 
   25.27 -   _etext = .;			/* End of text section */
   25.28 - 
   25.29 -@@ -47,7 +53,7 @@ SECTIONS
   25.30 -   .data : AT(ADDR(.data) - LOAD_OFFSET) {	/* Data */
   25.31 - 	*(.data)
   25.32 - 	CONSTRUCTORS
   25.33 --	}
   25.34 -+	} :data
   25.35 - 
   25.36 -   . = ALIGN(4096);
   25.37 -   __nosave_begin = .;
   25.38 -@@ -154,4 +160,6 @@ SECTIONS
   25.39 -   STABS_DEBUG
   25.40 - 
   25.41 -   DWARF_DEBUG
   25.42 -+
   25.43 -+  NOTES
   25.44 - }
   25.45 -diff -pruN ../orig-linux-2.6.16.29/include/asm-generic/vmlinux.lds.h ./include/asm-generic/vmlinux.lds.h
   25.46 ---- ../orig-linux-2.6.16.29/include/asm-generic/vmlinux.lds.h	2006-09-12 19:02:10.000000000 +0100
   25.47 -+++ ./include/asm-generic/vmlinux.lds.h	2006-09-19 14:06:10.000000000 +0100
   25.48 -@@ -152,3 +152,6 @@
   25.49 - 		.stab.index 0 : { *(.stab.index) }			\
   25.50 - 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
   25.51 - 		.comment 0 : { *(.comment) }
   25.52 -+
   25.53 -+#define NOTES								\
   25.54 -+		.notes : { *(.note.*) } :note
   25.55 -diff -pruN ../orig-linux-2.6.16.29/include/linux/elfnote.h ./include/linux/elfnote.h
   25.56 ---- ../orig-linux-2.6.16.29/include/linux/elfnote.h	1970-01-01 01:00:00.000000000 +0100
   25.57 -+++ ./include/linux/elfnote.h	2006-09-19 14:06:10.000000000 +0100
   25.58 -@@ -0,0 +1,88 @@
   25.59 -+#ifndef _LINUX_ELFNOTE_H
   25.60 -+#define _LINUX_ELFNOTE_H
   25.61 -+/*
   25.62 -+ * Helper macros to generate ELF Note structures, which are put into a
   25.63 -+ * PT_NOTE segment of the final vmlinux image.  These are useful for
   25.64 -+ * including name-value pairs of metadata into the kernel binary (or
   25.65 -+ * modules?) for use by external programs.
   25.66 -+ *
   25.67 -+ * Each note has three parts: a name, a type and a desc.  The name is
   25.68 -+ * intended to distinguish the note's originator, so it would be a
   25.69 -+ * company, project, subsystem, etc; it must be in a suitable form for
   25.70 -+ * use in a section name.  The type is an integer which is used to tag
   25.71 -+ * the data, and is considered to be within the "name" namespace (so
   25.72 -+ * "FooCo"'s type 42 is distinct from "BarProj"'s type 42).  The
   25.73 -+ * "desc" field is the actual data.  There are no constraints on the
   25.74 -+ * desc field's contents, though typically they're fairly small.
   25.75 -+ *
   25.76 -+ * All notes from a given NAME are put into a section named
   25.77 -+ * .note.NAME.  When the kernel image is finally linked, all the notes
   25.78 -+ * are packed into a single .notes section, which is mapped into the
   25.79 -+ * PT_NOTE segment.  Because notes for a given name are grouped into
   25.80 -+ * the same section, they'll all be adjacent the output file.
   25.81 -+ *
   25.82 -+ * This file defines macros for both C and assembler use.  Their
   25.83 -+ * syntax is slightly different, but they're semantically similar.
   25.84 -+ *
   25.85 -+ * See the ELF specification for more detail about ELF notes.
   25.86 -+ */
   25.87 -+
   25.88 -+#ifdef __ASSEMBLER__
   25.89 -+/*
   25.90 -+ * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
   25.91 -+ * turn out to be the same size and shape), followed by the name and
   25.92 -+ * desc data with appropriate padding.  The 'desc' argument includes
   25.93 -+ * the assembler pseudo op defining the type of the data: .asciz
   25.94 -+ * "hello, world"
   25.95 -+ */
   25.96 -+.macro ELFNOTE name type desc:vararg
   25.97 -+.pushsection ".note.\name"
   25.98 -+  .align 4
   25.99 -+  .long 2f - 1f			/* namesz */
  25.100 -+  .long 4f - 3f			/* descsz */
  25.101 -+  .long \type
  25.102 -+1:.asciz "\name"
  25.103 -+2:.align 4
  25.104 -+3:\desc
  25.105 -+4:.align 4
  25.106 -+.popsection
  25.107 -+.endm
  25.108 -+#else	/* !__ASSEMBLER__ */
  25.109 -+#include <linux/elf.h>
  25.110 -+/*
  25.111 -+ * Use an anonymous structure which matches the shape of
  25.112 -+ * Elf{32,64}_Nhdr, but includes the name and desc data.  The size and
  25.113 -+ * type of name and desc depend on the macro arguments.  "name" must
  25.114 -+ * be a literal string, and "desc" must be passed by value.  You may
  25.115 -+ * only define one note per line, since __LINE__ is used to generate
  25.116 -+ * unique symbols.
  25.117 -+ */
  25.118 -+#define _ELFNOTE_PASTE(a,b)	a##b
  25.119 -+#define _ELFNOTE(size, name, unique, type, desc)			\
  25.120 -+	static const struct {						\
  25.121 -+		struct elf##size##_note _nhdr;				\
  25.122 -+		unsigned char _name[sizeof(name)]			\
  25.123 -+		__attribute__((aligned(sizeof(Elf##size##_Word))));	\
  25.124 -+		typeof(desc) _desc					\
  25.125 -+			     __attribute__((aligned(sizeof(Elf##size##_Word)))); \
  25.126 -+	} _ELFNOTE_PASTE(_note_, unique)				\
  25.127 -+		__attribute_used__					\
  25.128 -+		__attribute__((section(".note." name),			\
  25.129 -+			       aligned(sizeof(Elf##size##_Word)),	\
  25.130 -+			       unused)) = {				\
  25.131 -+		{							\
  25.132 -+			sizeof(name),					\
  25.133 -+			sizeof(desc),					\
  25.134 -+			type,						\
  25.135 -+		},							\
  25.136 -+		name,							\
  25.137 -+		desc							\
  25.138 -+	}
  25.139 -+#define ELFNOTE(size, name, type, desc)		\
  25.140 -+	_ELFNOTE(size, name, __LINE__, type, desc)
  25.141 -+
  25.142 -+#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
  25.143 -+#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
  25.144 -+#endif	/* __ASSEMBLER__ */
  25.145 -+
  25.146 -+#endif /* _LINUX_ELFNOTE_H */
    26.1 --- a/patches/linux-2.6.16.30/x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch	Mon Nov 27 13:50:01 2006 +0000
    26.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.3 @@ -1,84 +0,0 @@
    26.4 -diff -pruN ../orig-linux-2.6.16.29/arch/x86_64/kernel/vmlinux.lds.S ./arch/x86_64/kernel/vmlinux.lds.S
    26.5 ---- ../orig-linux-2.6.16.29/arch/x86_64/kernel/vmlinux.lds.S	2006-09-12 19:02:10.000000000 +0100
    26.6 -+++ ./arch/x86_64/kernel/vmlinux.lds.S	2006-09-19 14:06:15.000000000 +0100
    26.7 -@@ -14,6 +14,12 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86
    26.8 - OUTPUT_ARCH(i386:x86-64)
    26.9 - ENTRY(phys_startup_64)
   26.10 - jiffies_64 = jiffies;
   26.11 -+PHDRS {
   26.12 -+	text PT_LOAD FLAGS(5);	/* R_E */
   26.13 -+	data PT_LOAD FLAGS(7);	/* RWE */
   26.14 -+	user PT_LOAD FLAGS(7);	/* RWE */
   26.15 -+	note PT_NOTE FLAGS(4);	/* R__ */
   26.16 -+}
   26.17 - SECTIONS
   26.18 - {
   26.19 -   . = __START_KERNEL;
   26.20 -@@ -26,7 +32,7 @@ SECTIONS
   26.21 - 	KPROBES_TEXT
   26.22 - 	*(.fixup)
   26.23 - 	*(.gnu.warning)
   26.24 --	} = 0x9090
   26.25 -+	} :text = 0x9090
   26.26 -   				/* out-of-line lock text */
   26.27 -   .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
   26.28 - 
   26.29 -@@ -43,17 +49,10 @@ SECTIONS
   26.30 -   .data : AT(ADDR(.data) - LOAD_OFFSET) {
   26.31 - 	*(.data)
   26.32 - 	CONSTRUCTORS
   26.33 --	}
   26.34 -+	} :data
   26.35 - 
   26.36 -   _edata = .;			/* End of data section */
   26.37 - 
   26.38 --  __bss_start = .;		/* BSS */
   26.39 --  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
   26.40 --	*(.bss.page_aligned)	
   26.41 --	*(.bss)
   26.42 --	}
   26.43 --  __bss_stop = .;
   26.44 --
   26.45 -   . = ALIGN(PAGE_SIZE);
   26.46 -   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
   26.47 -   .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
   26.48 -@@ -75,7 +74,7 @@ SECTIONS
   26.49 - #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
   26.50 - 
   26.51 -   . = VSYSCALL_ADDR;
   26.52 --  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
   26.53 -+  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
   26.54 -   __vsyscall_0 = VSYSCALL_VIRT_ADDR;
   26.55 - 
   26.56 -   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
   26.57 -@@ -118,7 +117,7 @@ SECTIONS
   26.58 -   . = ALIGN(8192);		/* init_task */
   26.59 -   .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
   26.60 - 	*(.data.init_task)
   26.61 --  }
   26.62 -+  } :data
   26.63 - 
   26.64 -   . = ALIGN(4096);
   26.65 -   .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
   26.66 -@@ -188,6 +187,14 @@ SECTIONS
   26.67 -   . = ALIGN(4096);
   26.68 -   __nosave_end = .;
   26.69 - 
   26.70 -+  __bss_start = .;		/* BSS */
   26.71 -+  . = ALIGN(4096);
   26.72 -+  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
   26.73 -+	*(.bss.page_aligned)
   26.74 -+	*(.bss)
   26.75 -+	}
   26.76 -+  __bss_stop = .;
   26.77 -+
   26.78 -   _end = . ;
   26.79 - 
   26.80 -   /* Sections to be discarded */
   26.81 -@@ -201,4 +208,6 @@ SECTIONS
   26.82 -   STABS_DEBUG
   26.83 - 
   26.84 -   DWARF_DEBUG
   26.85 -+
   26.86 -+  NOTES
   26.87 - }
    27.1 --- a/patches/linux-2.6.16.30/xen-hotplug.patch	Mon Nov 27 13:50:01 2006 +0000
    27.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.3 @@ -1,12 +0,0 @@
    27.4 -diff -pruN ../orig-linux-2.6.16.29/fs/proc/proc_misc.c ./fs/proc/proc_misc.c
    27.5 ---- ../orig-linux-2.6.16.29/fs/proc/proc_misc.c	2006-09-12 19:02:10.000000000 +0100
    27.6 -+++ ./fs/proc/proc_misc.c	2006-09-19 14:06:00.000000000 +0100
    27.7 -@@ -433,7 +433,7 @@ static int show_stat(struct seq_file *p,
    27.8 - 		(unsigned long long)cputime64_to_clock_t(irq),
    27.9 - 		(unsigned long long)cputime64_to_clock_t(softirq),
   27.10 - 		(unsigned long long)cputime64_to_clock_t(steal));
   27.11 --	for_each_online_cpu(i) {
   27.12 -+	for_each_cpu(i) {
   27.13 - 
   27.14 - 		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
   27.15 - 		user = kstat_cpu(i).cpustat.user;
    28.1 --- a/patches/linux-2.6.16.30/xenoprof-generic.patch	Mon Nov 27 13:50:01 2006 +0000
    28.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.3 @@ -1,662 +0,0 @@
    28.4 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/buffer_sync.c ./drivers/oprofile/buffer_sync.c
    28.5 ---- ../orig-linux-2.6.16.29/drivers/oprofile/buffer_sync.c	2006-11-06 14:46:52.000000000 -0800
    28.6 -+++ ./drivers/oprofile/buffer_sync.c	2006-11-06 15:16:52.000000000 -0800
    28.7 -@@ -6,6 +6,10 @@
    28.8 -  *
    28.9 -  * @author John Levon <levon@movementarian.org>
   28.10 -  *
   28.11 -+ * Modified by Aravind Menon for Xen
   28.12 -+ * These modifications are:
   28.13 -+ * Copyright (C) 2005 Hewlett-Packard Co.
   28.14 -+ *
   28.15 -  * This is the core of the buffer management. Each
   28.16 -  * CPU buffer is processed and entered into the
   28.17 -  * global event buffer. Such processing is necessary
   28.18 -@@ -38,6 +42,7 @@ static cpumask_t marked_cpus = CPU_MASK_
   28.19 - static DEFINE_SPINLOCK(task_mortuary);
   28.20 - static void process_task_mortuary(void);
   28.21 - 
   28.22 -+static int cpu_current_domain[NR_CPUS];
   28.23 - 
   28.24 - /* Take ownership of the task struct and place it on the
   28.25 -  * list for processing. Only after two full buffer syncs
   28.26 -@@ -146,6 +151,11 @@ static void end_sync(void)
   28.27 - int sync_start(void)
   28.28 - {
   28.29 - 	int err;
   28.30 -+	int i;
   28.31 -+
   28.32 -+	for (i = 0; i < NR_CPUS; i++) {
   28.33 -+		cpu_current_domain[i] = COORDINATOR_DOMAIN;
   28.34 -+	}
   28.35 - 
   28.36 - 	start_cpu_work();
   28.37 - 
   28.38 -@@ -275,15 +285,31 @@ static void add_cpu_switch(int i)
   28.39 - 	last_cookie = INVALID_COOKIE;
   28.40 - }
   28.41 - 
   28.42 --static void add_kernel_ctx_switch(unsigned int in_kernel)
   28.43 -+static void add_cpu_mode_switch(unsigned int cpu_mode)
   28.44 - {
   28.45 - 	add_event_entry(ESCAPE_CODE);
   28.46 --	if (in_kernel)
   28.47 --		add_event_entry(KERNEL_ENTER_SWITCH_CODE); 
   28.48 --	else
   28.49 --		add_event_entry(KERNEL_EXIT_SWITCH_CODE); 
   28.50 -+	switch (cpu_mode) {
   28.51 -+	case CPU_MODE_USER:
   28.52 -+		add_event_entry(USER_ENTER_SWITCH_CODE);
   28.53 -+		break;
   28.54 -+	case CPU_MODE_KERNEL:
   28.55 -+		add_event_entry(KERNEL_ENTER_SWITCH_CODE);
   28.56 -+		break;
   28.57 -+	case CPU_MODE_XEN:
   28.58 -+		add_event_entry(XEN_ENTER_SWITCH_CODE);
   28.59 -+	  	break;
   28.60 -+	default:
   28.61 -+		break;
   28.62 -+	}
   28.63 - }
   28.64 -- 
   28.65 -+
   28.66 -+static void add_domain_switch(unsigned long domain_id)
   28.67 -+{
   28.68 -+	add_event_entry(ESCAPE_CODE);
   28.69 -+	add_event_entry(DOMAIN_SWITCH_CODE);
   28.70 -+	add_event_entry(domain_id);
   28.71 -+}
   28.72 -+
   28.73 - static void
   28.74 - add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
   28.75 - {
   28.76 -@@ -348,9 +374,9 @@ static int add_us_sample(struct mm_struc
   28.77 -  * for later lookup from userspace.
   28.78 -  */
   28.79 - static int
   28.80 --add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
   28.81 -+add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
   28.82 - {
   28.83 --	if (in_kernel) {
   28.84 -+	if (cpu_mode >= CPU_MODE_KERNEL) {
   28.85 - 		add_sample_entry(s->eip, s->event);
   28.86 - 		return 1;
   28.87 - 	} else if (mm) {
   28.88 -@@ -496,15 +522,21 @@ void sync_buffer(int cpu)
   28.89 - 	struct mm_struct *mm = NULL;
   28.90 - 	struct task_struct * new;
   28.91 - 	unsigned long cookie = 0;
   28.92 --	int in_kernel = 1;
   28.93 -+	int cpu_mode = 1;
   28.94 - 	unsigned int i;
   28.95 - 	sync_buffer_state state = sb_buffer_start;
   28.96 - 	unsigned long available;
   28.97 -+	int domain_switch = 0;
   28.98 - 
   28.99 - 	down(&buffer_sem);
  28.100 -  
  28.101 - 	add_cpu_switch(cpu);
  28.102 - 
  28.103 -+	/* We need to assign the first samples in this CPU buffer to the
  28.104 -+	   same domain that we were processing at the last sync_buffer */
  28.105 -+	if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
  28.106 -+		add_domain_switch(cpu_current_domain[cpu]);
  28.107 -+	}
  28.108 - 	/* Remember, only we can modify tail_pos */
  28.109 - 
  28.110 - 	available = get_slots(cpu_buf);
  28.111 -@@ -512,16 +544,18 @@ void sync_buffer(int cpu)
  28.112 - 	for (i = 0; i < available; ++i) {
  28.113 - 		struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
  28.114 -  
  28.115 --		if (is_code(s->eip)) {
  28.116 --			if (s->event <= CPU_IS_KERNEL) {
  28.117 --				/* kernel/userspace switch */
  28.118 --				in_kernel = s->event;
  28.119 -+		if (is_code(s->eip) && !domain_switch) {
  28.120 -+			if (s->event <= CPU_MODE_XEN) {
  28.121 -+				/* xen/kernel/userspace switch */
  28.122 -+				cpu_mode = s->event;
  28.123 - 				if (state == sb_buffer_start)
  28.124 - 					state = sb_sample_start;
  28.125 --				add_kernel_ctx_switch(s->event);
  28.126 -+				add_cpu_mode_switch(s->event);
  28.127 - 			} else if (s->event == CPU_TRACE_BEGIN) {
  28.128 - 				state = sb_bt_start;
  28.129 - 				add_trace_begin();
  28.130 -+			} else if (s->event == CPU_DOMAIN_SWITCH) {
  28.131 -+					domain_switch = 1;				
  28.132 - 			} else {
  28.133 - 				struct mm_struct * oldmm = mm;
  28.134 - 
  28.135 -@@ -535,11 +569,21 @@ void sync_buffer(int cpu)
  28.136 - 				add_user_ctx_switch(new, cookie);
  28.137 - 			}
  28.138 - 		} else {
  28.139 --			if (state >= sb_bt_start &&
  28.140 --			    !add_sample(mm, s, in_kernel)) {
  28.141 --				if (state == sb_bt_start) {
  28.142 --					state = sb_bt_ignore;
  28.143 --					atomic_inc(&oprofile_stats.bt_lost_no_mapping);
  28.144 -+			if (domain_switch) {
  28.145 -+				cpu_current_domain[cpu] = s->eip;
  28.146 -+				add_domain_switch(s->eip);
  28.147 -+				domain_switch = 0;
  28.148 -+			} else {
  28.149 -+				if (cpu_current_domain[cpu] !=
  28.150 -+				    COORDINATOR_DOMAIN) {
  28.151 -+					add_sample_entry(s->eip, s->event);
  28.152 -+				}
  28.153 -+				else  if (state >= sb_bt_start &&
  28.154 -+				    !add_sample(mm, s, cpu_mode)) {
  28.155 -+					if (state == sb_bt_start) {
  28.156 -+						state = sb_bt_ignore;
  28.157 -+						atomic_inc(&oprofile_stats.bt_lost_no_mapping);
  28.158 -+					}
  28.159 - 				}
  28.160 - 			}
  28.161 - 		}
  28.162 -@@ -548,6 +592,11 @@ void sync_buffer(int cpu)
  28.163 - 	}
  28.164 - 	release_mm(mm);
  28.165 - 
  28.166 -+	/* We reset domain to COORDINATOR at each CPU switch */
  28.167 -+	if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
  28.168 -+		add_domain_switch(COORDINATOR_DOMAIN);
  28.169 -+	}
  28.170 -+
  28.171 - 	mark_done(cpu);
  28.172 - 
  28.173 - 	up(&buffer_sem);
  28.174 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.c ./drivers/oprofile/cpu_buffer.c
  28.175 ---- ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.c	2006-11-06 14:46:52.000000000 -0800
  28.176 -+++ ./drivers/oprofile/cpu_buffer.c	2006-11-06 14:47:55.000000000 -0800
  28.177 -@@ -6,6 +6,10 @@
  28.178 -  *
  28.179 -  * @author John Levon <levon@movementarian.org>
  28.180 -  *
  28.181 -+ * Modified by Aravind Menon for Xen
  28.182 -+ * These modifications are:
  28.183 -+ * Copyright (C) 2005 Hewlett-Packard Co.
  28.184 -+ *
  28.185 -  * Each CPU has a local buffer that stores PC value/event
  28.186 -  * pairs. We also log context switches when we notice them.
  28.187 -  * Eventually each CPU's buffer is processed into the global
  28.188 -@@ -34,6 +38,8 @@ static void wq_sync_buffer(void *);
  28.189 - #define DEFAULT_TIMER_EXPIRE (HZ / 10)
  28.190 - static int work_enabled;
  28.191 - 
  28.192 -+static int32_t current_domain = COORDINATOR_DOMAIN;
  28.193 -+
  28.194 - void free_cpu_buffers(void)
  28.195 - {
  28.196 - 	int i;
  28.197 -@@ -58,7 +64,7 @@ int alloc_cpu_buffers(void)
  28.198 - 			goto fail;
  28.199 -  
  28.200 - 		b->last_task = NULL;
  28.201 --		b->last_is_kernel = -1;
  28.202 -+		b->last_cpu_mode = -1;
  28.203 - 		b->tracing = 0;
  28.204 - 		b->buffer_size = buffer_size;
  28.205 - 		b->tail_pos = 0;
  28.206 -@@ -114,7 +120,7 @@ void cpu_buffer_reset(struct oprofile_cp
  28.207 - 	 * collected will populate the buffer with proper
  28.208 - 	 * values to initialize the buffer
  28.209 - 	 */
  28.210 --	cpu_buf->last_is_kernel = -1;
  28.211 -+	cpu_buf->last_cpu_mode = -1;
  28.212 - 	cpu_buf->last_task = NULL;
  28.213 - }
  28.214 - 
  28.215 -@@ -164,13 +170,13 @@ add_code(struct oprofile_cpu_buffer * bu
  28.216 -  * because of the head/tail separation of the writer and reader
  28.217 -  * of the CPU buffer.
  28.218 -  *
  28.219 -- * is_kernel is needed because on some architectures you cannot
  28.220 -+ * cpu_mode is needed because on some architectures you cannot
  28.221 -  * tell if you are in kernel or user space simply by looking at
  28.222 -- * pc. We tag this in the buffer by generating kernel enter/exit
  28.223 -- * events whenever is_kernel changes
  28.224 -+ * pc. We tag this in the buffer by generating kernel/user (and xen)
  28.225 -+ *  enter events whenever cpu_mode changes
  28.226 -  */
  28.227 - static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
  28.228 --		      int is_kernel, unsigned long event)
  28.229 -+		      int cpu_mode, unsigned long event)
  28.230 - {
  28.231 - 	struct task_struct * task;
  28.232 - 
  28.233 -@@ -181,18 +187,18 @@ static int log_sample(struct oprofile_cp
  28.234 - 		return 0;
  28.235 - 	}
  28.236 - 
  28.237 --	is_kernel = !!is_kernel;
  28.238 --
  28.239 - 	task = current;
  28.240 - 
  28.241 - 	/* notice a switch from user->kernel or vice versa */
  28.242 --	if (cpu_buf->last_is_kernel != is_kernel) {
  28.243 --		cpu_buf->last_is_kernel = is_kernel;
  28.244 --		add_code(cpu_buf, is_kernel);
  28.245 -+	if (cpu_buf->last_cpu_mode != cpu_mode) {
  28.246 -+		cpu_buf->last_cpu_mode = cpu_mode;
  28.247 -+		add_code(cpu_buf, cpu_mode);
  28.248 - 	}
  28.249 --
  28.250 -+	
  28.251 - 	/* notice a task switch */
  28.252 --	if (cpu_buf->last_task != task) {
  28.253 -+	/* if not processing other domain samples */
  28.254 -+	if ((cpu_buf->last_task != task) &&
  28.255 -+	    (current_domain == COORDINATOR_DOMAIN)) {
  28.256 - 		cpu_buf->last_task = task;
  28.257 - 		add_code(cpu_buf, (unsigned long)task);
  28.258 - 	}
  28.259 -@@ -269,6 +275,25 @@ void oprofile_add_trace(unsigned long pc
  28.260 - 	add_sample(cpu_buf, pc, 0);
  28.261 - }
  28.262 - 
  28.263 -+int oprofile_add_domain_switch(int32_t domain_id)
  28.264 -+{
  28.265 -+	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
  28.266 -+
  28.267 -+	/* should have space for switching into and out of domain 
  28.268 -+	   (2 slots each) plus one sample and one cpu mode switch */
  28.269 -+	if (((nr_available_slots(cpu_buf) < 6) && 
  28.270 -+	     (domain_id != COORDINATOR_DOMAIN)) ||
  28.271 -+	    (nr_available_slots(cpu_buf) < 2))
  28.272 -+		return 0;
  28.273 -+
  28.274 -+	add_code(cpu_buf, CPU_DOMAIN_SWITCH);
  28.275 -+	add_sample(cpu_buf, domain_id, 0);
  28.276 -+
  28.277 -+	current_domain = domain_id;
  28.278 -+
  28.279 -+	return 1;
  28.280 -+}
  28.281 -+
  28.282 - /*
  28.283 -  * This serves to avoid cpu buffer overflow, and makes sure
  28.284 -  * the task mortuary progresses
  28.285 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.h ./drivers/oprofile/cpu_buffer.h
  28.286 ---- ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.h	2006-11-06 14:46:52.000000000 -0800
  28.287 -+++ ./drivers/oprofile/cpu_buffer.h	2006-11-06 14:47:55.000000000 -0800
  28.288 -@@ -36,7 +36,7 @@ struct oprofile_cpu_buffer {
  28.289 - 	volatile unsigned long tail_pos;
  28.290 - 	unsigned long buffer_size;
  28.291 - 	struct task_struct * last_task;
  28.292 --	int last_is_kernel;
  28.293 -+	int last_cpu_mode;
  28.294 - 	int tracing;
  28.295 - 	struct op_sample * buffer;
  28.296 - 	unsigned long sample_received;
  28.297 -@@ -51,7 +51,10 @@ extern struct oprofile_cpu_buffer cpu_bu
  28.298 - void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
  28.299 - 
  28.300 - /* transient events for the CPU buffer -> event buffer */
  28.301 --#define CPU_IS_KERNEL 1
  28.302 --#define CPU_TRACE_BEGIN 2
  28.303 -+#define CPU_MODE_USER           0
  28.304 -+#define CPU_MODE_KERNEL         1
  28.305 -+#define CPU_MODE_XEN            2
  28.306 -+#define CPU_TRACE_BEGIN         3
  28.307 -+#define CPU_DOMAIN_SWITCH       4
  28.308 - 
  28.309 - #endif /* OPROFILE_CPU_BUFFER_H */
  28.310 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/event_buffer.h ./drivers/oprofile/event_buffer.h
  28.311 ---- ../orig-linux-2.6.16.29/drivers/oprofile/event_buffer.h	2006-11-06 14:46:52.000000000 -0800
  28.312 -+++ ./drivers/oprofile/event_buffer.h	2006-11-06 14:47:55.000000000 -0800
  28.313 -@@ -29,15 +29,20 @@ void wake_up_buffer_waiter(void);
  28.314 - #define CPU_SWITCH_CODE 		2
  28.315 - #define COOKIE_SWITCH_CODE 		3
  28.316 - #define KERNEL_ENTER_SWITCH_CODE	4
  28.317 --#define KERNEL_EXIT_SWITCH_CODE		5
  28.318 -+#define USER_ENTER_SWITCH_CODE		5
  28.319 - #define MODULE_LOADED_CODE		6
  28.320 - #define CTX_TGID_CODE			7
  28.321 - #define TRACE_BEGIN_CODE		8
  28.322 - #define TRACE_END_CODE			9
  28.323 -+#define XEN_ENTER_SWITCH_CODE		10
  28.324 -+#define DOMAIN_SWITCH_CODE		11
  28.325 -  
  28.326 - #define INVALID_COOKIE ~0UL
  28.327 - #define NO_COOKIE 0UL
  28.328 - 
  28.329 -+/* Constant used to refer to coordinator domain (Xen) */
  28.330 -+#define COORDINATOR_DOMAIN -1
  28.331 -+
  28.332 - /* add data to the event buffer */
  28.333 - void add_event_entry(unsigned long data);
  28.334 -  
  28.335 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprof.c ./drivers/oprofile/oprof.c
  28.336 ---- ../orig-linux-2.6.16.29/drivers/oprofile/oprof.c	2006-11-06 14:46:52.000000000 -0800
  28.337 -+++ ./drivers/oprofile/oprof.c	2006-11-06 14:47:55.000000000 -0800
  28.338 -@@ -5,6 +5,10 @@
  28.339 -  * @remark Read the file COPYING
  28.340 -  *
  28.341 -  * @author John Levon <levon@movementarian.org>
  28.342 -+ *
  28.343 -+ * Modified by Aravind Menon for Xen
  28.344 -+ * These modifications are:
  28.345 -+ * Copyright (C) 2005 Hewlett-Packard Co.
  28.346 -  */
  28.347 - 
  28.348 - #include <linux/kernel.h>
  28.349 -@@ -19,7 +23,7 @@
  28.350 - #include "cpu_buffer.h"
  28.351 - #include "buffer_sync.h"
  28.352 - #include "oprofile_stats.h"
  28.353 -- 
  28.354 -+
  28.355 - struct oprofile_operations oprofile_ops;
  28.356 - 
  28.357 - unsigned long oprofile_started;
  28.358 -@@ -33,6 +37,32 @@ static DECLARE_MUTEX(start_sem);
  28.359 -  */
  28.360 - static int timer = 0;
  28.361 - 
  28.362 -+int oprofile_set_active(int active_domains[], unsigned int adomains)
  28.363 -+{
  28.364 -+	int err;
  28.365 -+
  28.366 -+	if (!oprofile_ops.set_active)
  28.367 -+		return -EINVAL;
  28.368 -+
  28.369 -+	down(&start_sem);
  28.370 -+	err = oprofile_ops.set_active(active_domains, adomains);
  28.371 -+	up(&start_sem);
  28.372 -+	return err;
  28.373 -+}
  28.374 -+
  28.375 -+int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
  28.376 -+{
  28.377 -+	int err;
  28.378 -+
  28.379 -+	if (!oprofile_ops.set_passive)
  28.380 -+		return -EINVAL;
  28.381 -+
  28.382 -+	down(&start_sem);
  28.383 -+	err = oprofile_ops.set_passive(passive_domains, pdomains);
  28.384 -+	up(&start_sem);
  28.385 -+	return err;
  28.386 -+}
  28.387 -+
  28.388 - int oprofile_setup(void)
  28.389 - {
  28.390 - 	int err;
  28.391 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprof.h ./drivers/oprofile/oprof.h
  28.392 ---- ../orig-linux-2.6.16.29/drivers/oprofile/oprof.h	2006-11-06 14:46:52.000000000 -0800
  28.393 -+++ ./drivers/oprofile/oprof.h	2006-11-06 14:47:55.000000000 -0800
  28.394 -@@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
  28.395 - void oprofile_timer_init(struct oprofile_operations * ops);
  28.396 - 
  28.397 - int oprofile_set_backtrace(unsigned long depth);
  28.398 -+
  28.399 -+int oprofile_set_active(int active_domains[], unsigned int adomains);
  28.400 -+int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
  28.401 -  
  28.402 - #endif /* OPROF_H */
  28.403 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprofile_files.c ./drivers/oprofile/oprofile_files.c
  28.404 ---- ../orig-linux-2.6.16.29/drivers/oprofile/oprofile_files.c	2006-11-06 14:46:52.000000000 -0800
  28.405 -+++ ./drivers/oprofile/oprofile_files.c	2006-11-06 14:47:55.000000000 -0800
  28.406 -@@ -5,15 +5,21 @@
  28.407 -  * @remark Read the file COPYING
  28.408 -  *
  28.409 -  * @author John Levon <levon@movementarian.org>
  28.410 -+ *
  28.411 -+ * Modified by Aravind Menon for Xen
  28.412 -+ * These modifications are:
  28.413 -+ * Copyright (C) 2005 Hewlett-Packard Co.	
  28.414 -  */
  28.415 - 
  28.416 - #include <linux/fs.h>
  28.417 - #include <linux/oprofile.h>
  28.418 -+#include <asm/uaccess.h>
  28.419 -+#include <linux/ctype.h>
  28.420 - 
  28.421 - #include "event_buffer.h"
  28.422 - #include "oprofile_stats.h"
  28.423 - #include "oprof.h"
  28.424 -- 
  28.425 -+
  28.426 - unsigned long fs_buffer_size = 131072;
  28.427 - unsigned long fs_cpu_buffer_size = 8192;
  28.428 - unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
  28.429 -@@ -117,11 +123,202 @@ static ssize_t dump_write(struct file * 
  28.430 - static struct file_operations dump_fops = {
  28.431 - 	.write		= dump_write,
  28.432 - };
  28.433 -- 
  28.434 -+
  28.435 -+#define TMPBUFSIZE 512
  28.436 -+
  28.437 -+static unsigned int adomains = 0;
  28.438 -+static int active_domains[MAX_OPROF_DOMAINS + 1];
  28.439 -+static DEFINE_MUTEX(adom_mutex);
  28.440 -+
  28.441 -+static ssize_t adomain_write(struct file * file, char const __user * buf, 
  28.442 -+			     size_t count, loff_t * offset)
  28.443 -+{
  28.444 -+	char *tmpbuf;
  28.445 -+	char *startp, *endp;
  28.446 -+	int i;
  28.447 -+	unsigned long val;
  28.448 -+	ssize_t retval = count;
  28.449 -+	
  28.450 -+	if (*offset)
  28.451 -+		return -EINVAL;	
  28.452 -+	if (count > TMPBUFSIZE - 1)
  28.453 -+		return -EINVAL;
  28.454 -+
  28.455 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  28.456 -+		return -ENOMEM;
  28.457 -+
  28.458 -+	if (copy_from_user(tmpbuf, buf, count)) {
  28.459 -+		kfree(tmpbuf);
  28.460 -+		return -EFAULT;
  28.461 -+	}
  28.462 -+	tmpbuf[count] = 0;
  28.463 -+
  28.464 -+	mutex_lock(&adom_mutex);
  28.465 -+
  28.466 -+	startp = tmpbuf;
  28.467 -+	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
  28.468 -+	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
  28.469 -+		val = simple_strtoul(startp, &endp, 0);
  28.470 -+		if (endp == startp)
  28.471 -+			break;
  28.472 -+		while (ispunct(*endp) || isspace(*endp))
  28.473 -+			endp++;
  28.474 -+		active_domains[i] = val;
  28.475 -+		if (active_domains[i] != val)
  28.476 -+			/* Overflow, force error below */
  28.477 -+			i = MAX_OPROF_DOMAINS + 1;
  28.478 -+		startp = endp;
  28.479 -+	}
  28.480 -+	/* Force error on trailing junk */
  28.481 -+	adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
  28.482 -+
  28.483 -+	kfree(tmpbuf);
  28.484 -+
  28.485 -+	if (adomains > MAX_OPROF_DOMAINS
  28.486 -+	    || oprofile_set_active(active_domains, adomains)) {
  28.487 -+		adomains = 0;
  28.488 -+		retval = -EINVAL;
  28.489 -+	}
  28.490 -+
  28.491 -+	mutex_unlock(&adom_mutex);
  28.492 -+	return retval;
  28.493 -+}
  28.494 -+
  28.495 -+static ssize_t adomain_read(struct file * file, char __user * buf, 
  28.496 -+			    size_t count, loff_t * offset)
  28.497 -+{
  28.498 -+	char * tmpbuf;
  28.499 -+	size_t len;
  28.500 -+	int i;
  28.501 -+	ssize_t retval;
  28.502 -+
  28.503 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  28.504 -+		return -ENOMEM;
  28.505 -+
  28.506 -+	mutex_lock(&adom_mutex);
  28.507 -+
  28.508 -+	len = 0;
  28.509 -+	for (i = 0; i < adomains; i++)
  28.510 -+		len += snprintf(tmpbuf + len,
  28.511 -+				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
  28.512 -+				"%u ", active_domains[i]);
  28.513 -+	WARN_ON(len > TMPBUFSIZE);
  28.514 -+	if (len != 0 && len <= TMPBUFSIZE)
  28.515 -+		tmpbuf[len-1] = '\n';
  28.516 -+
  28.517 -+	mutex_unlock(&adom_mutex);
  28.518 -+
  28.519 -+	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
  28.520 -+
  28.521 -+	kfree(tmpbuf);
  28.522 -+	return retval;
  28.523 -+}
  28.524 -+
  28.525 -+
  28.526 -+static struct file_operations active_domain_ops = {
  28.527 -+	.read		= adomain_read,
  28.528 -+	.write		= adomain_write,
  28.529 -+};
  28.530 -+
  28.531 -+static unsigned int pdomains = 0;
  28.532 -+static int passive_domains[MAX_OPROF_DOMAINS];
  28.533 -+static DEFINE_MUTEX(pdom_mutex);
  28.534 -+
  28.535 -+static ssize_t pdomain_write(struct file * file, char const __user * buf, 
  28.536 -+			     size_t count, loff_t * offset)
  28.537 -+{
  28.538 -+	char *tmpbuf;
  28.539 -+	char *startp, *endp;
  28.540 -+	int i;
  28.541 -+	unsigned long val;
  28.542 -+	ssize_t retval = count;
  28.543 -+	
  28.544 -+	if (*offset)
  28.545 -+		return -EINVAL;	
  28.546 -+	if (count > TMPBUFSIZE - 1)
  28.547 -+		return -EINVAL;
  28.548 -+
  28.549 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  28.550 -+		return -ENOMEM;
  28.551 -+
  28.552 -+	if (copy_from_user(tmpbuf, buf, count)) {
  28.553 -+		kfree(tmpbuf);
  28.554 -+		return -EFAULT;
  28.555 -+	}
  28.556 -+	tmpbuf[count] = 0;
  28.557 -+
  28.558 -+	mutex_lock(&pdom_mutex);
  28.559 -+
  28.560 -+	startp = tmpbuf;
  28.561 -+	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
  28.562 -+	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
  28.563 -+		val = simple_strtoul(startp, &endp, 0);
  28.564 -+		if (endp == startp)
  28.565 -+			break;
  28.566 -+		while (ispunct(*endp) || isspace(*endp))
  28.567 -+			endp++;
  28.568 -+		passive_domains[i] = val;
  28.569 -+		if (passive_domains[i] != val)
  28.570 -+			/* Overflow, force error below */
  28.571 -+			i = MAX_OPROF_DOMAINS + 1;
  28.572 -+		startp = endp;
  28.573 -+	}
  28.574 -+	/* Force error on trailing junk */
  28.575 -+	pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
  28.576 -+
  28.577 -+	kfree(tmpbuf);
  28.578 -+
  28.579 -+	if (pdomains > MAX_OPROF_DOMAINS
  28.580 -+	    || oprofile_set_passive(passive_domains, pdomains)) {
  28.581 -+		pdomains = 0;
  28.582 -+		retval = -EINVAL;
  28.583 -+	}
  28.584 -+
  28.585 -+	mutex_unlock(&pdom_mutex);
  28.586 -+	return retval;
  28.587 -+}
  28.588 -+
  28.589 -+static ssize_t pdomain_read(struct file * file, char __user * buf, 
  28.590 -+			    size_t count, loff_t * offset)
  28.591 -+{
  28.592 -+	char * tmpbuf;
  28.593 -+	size_t len;
  28.594 -+	int i;
  28.595 -+	ssize_t retval;
  28.596 -+
  28.597 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  28.598 -+		return -ENOMEM;
  28.599 -+
  28.600 -+	mutex_lock(&pdom_mutex);
  28.601 -+
  28.602 -+	len = 0;
  28.603 -+	for (i = 0; i < pdomains; i++)
  28.604 -+		len += snprintf(tmpbuf + len,
  28.605 -+				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
  28.606 -+				"%u ", passive_domains[i]);
  28.607 -+	WARN_ON(len > TMPBUFSIZE);
  28.608 -+	if (len != 0 && len <= TMPBUFSIZE)
  28.609 -+		tmpbuf[len-1] = '\n';
  28.610 -+
  28.611 -+	mutex_unlock(&pdom_mutex);
  28.612 -+
  28.613 -+	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
  28.614 -+
  28.615 -+	kfree(tmpbuf);
  28.616 -+	return retval;
  28.617 -+}
  28.618 -+
  28.619 -+static struct file_operations passive_domain_ops = {
  28.620 -+	.read		= pdomain_read,
  28.621 -+	.write		= pdomain_write,
  28.622 -+};
  28.623 -+
  28.624 - void oprofile_create_files(struct super_block * sb, struct dentry * root)
  28.625 - {
  28.626 - 	oprofilefs_create_file(sb, root, "enable", &enable_fops);
  28.627 - 	oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
  28.628 -+	oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
  28.629 -+	oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
  28.630 - 	oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
  28.631 - 	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
  28.632 - 	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
  28.633 -diff -pruN ../orig-linux-2.6.16.29/include/linux/oprofile.h ./include/linux/oprofile.h
  28.634 ---- ../orig-linux-2.6.16.29/include/linux/oprofile.h	2006-11-06 14:46:42.000000000 -0800
  28.635 -+++ ./include/linux/oprofile.h	2006-11-06 14:47:55.000000000 -0800
  28.636 -@@ -16,6 +16,8 @@
  28.637 - #include <linux/types.h>
  28.638 - #include <linux/spinlock.h>
  28.639 - #include <asm/atomic.h>
  28.640 -+
  28.641 -+#include <xen/interface/xenoprof.h>
  28.642 -  
  28.643 - struct super_block;
  28.644 - struct dentry;
  28.645 -@@ -27,6 +29,11 @@ struct oprofile_operations {
  28.646 - 	/* create any necessary configuration files in the oprofile fs.
  28.647 - 	 * Optional. */
  28.648 - 	int (*create_files)(struct super_block * sb, struct dentry * root);
  28.649 -+	/* setup active domains with Xen */
  28.650 -+	int (*set_active)(int *active_domains, unsigned int adomains);
  28.651 -+        /* setup passive domains with Xen */
  28.652 -+        int (*set_passive)(int *passive_domains, unsigned int pdomains);
  28.653 -+	
  28.654 - 	/* Do any necessary interrupt setup. Optional. */
  28.655 - 	int (*setup)(void);
  28.656 - 	/* Do any necessary interrupt shutdown. Optional. */
  28.657 -@@ -68,6 +75,8 @@ void oprofile_add_pc(unsigned long pc, i
  28.658 - /* add a backtrace entry, to be called from the ->backtrace callback */
  28.659 - void oprofile_add_trace(unsigned long eip);
  28.660 - 
  28.661 -+/* add a domain switch entry */
  28.662 -+int oprofile_add_domain_switch(int32_t domain_id);
  28.663 - 
  28.664 - /**
  28.665 -  * Create a file of the given name as a child of the given root, with
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/patches/linux-2.6.16.31/blktap-aio-16_03_06.patch	Mon Nov 27 13:50:02 2006 +0000
    29.3 @@ -0,0 +1,294 @@
    29.4 +diff -pruN ../orig-linux-2.6.16.29/fs/aio.c ./fs/aio.c
    29.5 +--- ../orig-linux-2.6.16.29/fs/aio.c	2006-09-12 19:02:10.000000000 +0100
    29.6 ++++ ./fs/aio.c	2006-09-19 13:58:49.000000000 +0100
    29.7 +@@ -34,6 +34,11 @@
    29.8 + #include <asm/uaccess.h>
    29.9 + #include <asm/mmu_context.h>
   29.10 + 
   29.11 ++#ifdef CONFIG_EPOLL
   29.12 ++#include <linux/poll.h>
   29.13 ++#include <linux/eventpoll.h>
   29.14 ++#endif
   29.15 ++
   29.16 + #if DEBUG > 1
   29.17 + #define dprintk		printk
   29.18 + #else
   29.19 +@@ -1016,6 +1021,10 @@ put_rq:
   29.20 + 	if (waitqueue_active(&ctx->wait))
   29.21 + 		wake_up(&ctx->wait);
   29.22 + 
   29.23 ++#ifdef CONFIG_EPOLL
   29.24 ++	if (ctx->file && waitqueue_active(&ctx->poll_wait))
   29.25 ++		wake_up(&ctx->poll_wait);
   29.26 ++#endif
   29.27 + 	if (ret)
   29.28 + 		put_ioctx(ctx);
   29.29 + 
   29.30 +@@ -1025,6 +1034,8 @@ put_rq:
   29.31 + /* aio_read_evt
   29.32 +  *	Pull an event off of the ioctx's event ring.  Returns the number of 
   29.33 +  *	events fetched (0 or 1 ;-)
   29.34 ++ *	If ent parameter is 0, just returns the number of events that would
   29.35 ++ *	be fetched.
   29.36 +  *	FIXME: make this use cmpxchg.
   29.37 +  *	TODO: make the ringbuffer user mmap()able (requires FIXME).
   29.38 +  */
   29.39 +@@ -1047,13 +1058,18 @@ static int aio_read_evt(struct kioctx *i
   29.40 + 
   29.41 + 	head = ring->head % info->nr;
   29.42 + 	if (head != ring->tail) {
   29.43 +-		struct io_event *evp = aio_ring_event(info, head, KM_USER1);
   29.44 +-		*ent = *evp;
   29.45 +-		head = (head + 1) % info->nr;
   29.46 +-		smp_mb(); /* finish reading the event before updatng the head */
   29.47 +-		ring->head = head;
   29.48 +-		ret = 1;
   29.49 +-		put_aio_ring_event(evp, KM_USER1);
   29.50 ++		if (ent) { /* event requested */
   29.51 ++			struct io_event *evp =
   29.52 ++				aio_ring_event(info, head, KM_USER1);
   29.53 ++			*ent = *evp;
   29.54 ++			head = (head + 1) % info->nr;
   29.55 ++			/* finish reading the event before updatng the head */
   29.56 ++			smp_mb();
   29.57 ++			ring->head = head;
   29.58 ++			ret = 1;
   29.59 ++			put_aio_ring_event(evp, KM_USER1);
   29.60 ++		} else /* only need to know availability */
   29.61 ++			ret = 1;
   29.62 + 	}
   29.63 + 	spin_unlock(&info->ring_lock);
   29.64 + 
   29.65 +@@ -1236,9 +1252,78 @@ static void io_destroy(struct kioctx *io
   29.66 + 
   29.67 + 	aio_cancel_all(ioctx);
   29.68 + 	wait_for_all_aios(ioctx);
   29.69 ++#ifdef CONFIG_EPOLL
   29.70 ++	/* forget the poll file, but it's up to the user to close it */
   29.71 ++	if (ioctx->file) {
   29.72 ++		ioctx->file->private_data = 0;
   29.73 ++		ioctx->file = 0;
   29.74 ++	}
   29.75 ++#endif
   29.76 + 	put_ioctx(ioctx);	/* once for the lookup */
   29.77 + }
   29.78 + 
   29.79 ++#ifdef CONFIG_EPOLL
   29.80 ++
   29.81 ++static int aio_queue_fd_close(struct inode *inode, struct file *file)
   29.82 ++{
   29.83 ++	struct kioctx *ioctx = file->private_data;
   29.84 ++	if (ioctx) {
   29.85 ++		file->private_data = 0;
   29.86 ++		spin_lock_irq(&ioctx->ctx_lock);
   29.87 ++		ioctx->file = 0;
   29.88 ++		spin_unlock_irq(&ioctx->ctx_lock);
   29.89 ++	}
   29.90 ++	return 0;
   29.91 ++}
   29.92 ++
   29.93 ++static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
   29.94 ++{	unsigned int pollflags = 0;
   29.95 ++	struct kioctx *ioctx = file->private_data;
   29.96 ++
   29.97 ++	if (ioctx) {
   29.98 ++
   29.99 ++		spin_lock_irq(&ioctx->ctx_lock);
  29.100 ++		/* Insert inside our poll wait queue */
  29.101 ++		poll_wait(file, &ioctx->poll_wait, wait);
  29.102 ++
  29.103 ++		/* Check our condition */
  29.104 ++		if (aio_read_evt(ioctx, 0))
  29.105 ++			pollflags = POLLIN | POLLRDNORM;
  29.106 ++		spin_unlock_irq(&ioctx->ctx_lock);
  29.107 ++	}
  29.108 ++
  29.109 ++	return pollflags;
  29.110 ++}
  29.111 ++
  29.112 ++static struct file_operations aioq_fops = {
  29.113 ++	.release	= aio_queue_fd_close,
  29.114 ++	.poll		= aio_queue_fd_poll
  29.115 ++};
  29.116 ++
  29.117 ++/* make_aio_fd:
  29.118 ++ *  Create a file descriptor that can be used to poll the event queue.
  29.119 ++ *  Based and piggybacked on the excellent epoll code.
  29.120 ++ */
  29.121 ++
  29.122 ++static int make_aio_fd(struct kioctx *ioctx)
  29.123 ++{
  29.124 ++	int error, fd;
  29.125 ++	struct inode *inode;
  29.126 ++	struct file *file;
  29.127 ++
  29.128 ++	error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
  29.129 ++	if (error)
  29.130 ++		return error;
  29.131 ++
  29.132 ++	/* associate the file with the IO context */
  29.133 ++	file->private_data = ioctx;
  29.134 ++	ioctx->file = file;
  29.135 ++	init_waitqueue_head(&ioctx->poll_wait);
  29.136 ++	return fd;
  29.137 ++}
  29.138 ++#endif
  29.139 ++
  29.140 ++
  29.141 + /* sys_io_setup:
  29.142 +  *	Create an aio_context capable of receiving at least nr_events.
  29.143 +  *	ctxp must not point to an aio_context that already exists, and
  29.144 +@@ -1251,18 +1336,30 @@ static void io_destroy(struct kioctx *io
  29.145 +  *	resources are available.  May fail with -EFAULT if an invalid
  29.146 +  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
  29.147 +  *	implemented.
  29.148 ++ *
  29.149 ++ *	To request a selectable fd, the user context has to be initialized
  29.150 ++ *	to 1, instead of 0, and the return value is the fd.
  29.151 ++ *	This keeps the system call compatible, since a non-zero value
  29.152 ++ *	was not allowed so far.
  29.153 +  */
  29.154 + asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
  29.155 + {
  29.156 + 	struct kioctx *ioctx = NULL;
  29.157 + 	unsigned long ctx;
  29.158 + 	long ret;
  29.159 ++	int make_fd = 0;
  29.160 + 
  29.161 + 	ret = get_user(ctx, ctxp);
  29.162 + 	if (unlikely(ret))
  29.163 + 		goto out;
  29.164 + 
  29.165 + 	ret = -EINVAL;
  29.166 ++#ifdef CONFIG_EPOLL
  29.167 ++	if (ctx == 1) {
  29.168 ++		make_fd = 1;
  29.169 ++		ctx = 0;
  29.170 ++	}
  29.171 ++#endif
  29.172 + 	if (unlikely(ctx || nr_events == 0)) {
  29.173 + 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
  29.174 + 		         ctx, nr_events);
  29.175 +@@ -1273,8 +1370,12 @@ asmlinkage long sys_io_setup(unsigned nr
  29.176 + 	ret = PTR_ERR(ioctx);
  29.177 + 	if (!IS_ERR(ioctx)) {
  29.178 + 		ret = put_user(ioctx->user_id, ctxp);
  29.179 +-		if (!ret)
  29.180 +-			return 0;
  29.181 ++#ifdef CONFIG_EPOLL
  29.182 ++		if (make_fd && ret >= 0)
  29.183 ++			ret = make_aio_fd(ioctx);
  29.184 ++#endif
  29.185 ++		if (ret >= 0)
  29.186 ++			return ret;
  29.187 + 
  29.188 + 		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
  29.189 + 		io_destroy(ioctx);
  29.190 +diff -pruN ../orig-linux-2.6.16.29/fs/eventpoll.c ./fs/eventpoll.c
  29.191 +--- ../orig-linux-2.6.16.29/fs/eventpoll.c	2006-09-12 19:02:10.000000000 +0100
  29.192 ++++ ./fs/eventpoll.c	2006-09-19 13:58:49.000000000 +0100
  29.193 +@@ -235,8 +235,6 @@ struct ep_pqueue {
  29.194 + 
  29.195 + static void ep_poll_safewake_init(struct poll_safewake *psw);
  29.196 + static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
  29.197 +-static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  29.198 +-		    struct eventpoll *ep);
  29.199 + static int ep_alloc(struct eventpoll **pep);
  29.200 + static void ep_free(struct eventpoll *ep);
  29.201 + static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
  29.202 +@@ -266,7 +264,7 @@ static int ep_events_transfer(struct eve
  29.203 + static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
  29.204 + 		   int maxevents, long timeout);
  29.205 + static int eventpollfs_delete_dentry(struct dentry *dentry);
  29.206 +-static struct inode *ep_eventpoll_inode(void);
  29.207 ++static struct inode *ep_eventpoll_inode(struct file_operations *fops);
  29.208 + static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
  29.209 + 					      int flags, const char *dev_name,
  29.210 + 					      void *data);
  29.211 +@@ -525,7 +523,7 @@ asmlinkage long sys_epoll_create(int siz
  29.212 + 	 * Creates all the items needed to setup an eventpoll file. That is,
  29.213 + 	 * a file structure, and inode and a free file descriptor.
  29.214 + 	 */
  29.215 +-	error = ep_getfd(&fd, &inode, &file, ep);
  29.216 ++	error = ep_getfd(&fd, &inode, &file, ep, &eventpoll_fops);
  29.217 + 	if (error)
  29.218 + 		goto eexit_2;
  29.219 + 
  29.220 +@@ -710,8 +708,8 @@ eexit_1:
  29.221 + /*
  29.222 +  * Creates the file descriptor to be used by the epoll interface.
  29.223 +  */
  29.224 +-static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  29.225 +-		    struct eventpoll *ep)
  29.226 ++int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  29.227 ++		    struct eventpoll *ep, struct file_operations *fops)
  29.228 + {
  29.229 + 	struct qstr this;
  29.230 + 	char name[32];
  29.231 +@@ -727,7 +725,7 @@ static int ep_getfd(int *efd, struct ino
  29.232 + 		goto eexit_1;
  29.233 + 
  29.234 + 	/* Allocates an inode from the eventpoll file system */
  29.235 +-	inode = ep_eventpoll_inode();
  29.236 ++	inode = ep_eventpoll_inode(fops);
  29.237 + 	error = PTR_ERR(inode);
  29.238 + 	if (IS_ERR(inode))
  29.239 + 		goto eexit_2;
  29.240 +@@ -758,7 +756,7 @@ static int ep_getfd(int *efd, struct ino
  29.241 + 
  29.242 + 	file->f_pos = 0;
  29.243 + 	file->f_flags = O_RDONLY;
  29.244 +-	file->f_op = &eventpoll_fops;
  29.245 ++	file->f_op = fops;
  29.246 + 	file->f_mode = FMODE_READ;
  29.247 + 	file->f_version = 0;
  29.248 + 	file->private_data = ep;
  29.249 +@@ -1574,7 +1572,7 @@ static int eventpollfs_delete_dentry(str
  29.250 + }
  29.251 + 
  29.252 + 
  29.253 +-static struct inode *ep_eventpoll_inode(void)
  29.254 ++static struct inode *ep_eventpoll_inode(struct file_operations *fops)
  29.255 + {
  29.256 + 	int error = -ENOMEM;
  29.257 + 	struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
  29.258 +@@ -1582,7 +1580,7 @@ static struct inode *ep_eventpoll_inode(
  29.259 + 	if (!inode)
  29.260 + 		goto eexit_1;
  29.261 + 
  29.262 +-	inode->i_fop = &eventpoll_fops;
  29.263 ++	inode->i_fop = fops;
  29.264 + 
  29.265 + 	/*
  29.266 + 	 * Mark the inode dirty from the very beginning,
  29.267 +diff -pruN ../orig-linux-2.6.16.29/include/linux/aio.h ./include/linux/aio.h
  29.268 +--- ../orig-linux-2.6.16.29/include/linux/aio.h	2006-09-12 19:02:10.000000000 +0100
  29.269 ++++ ./include/linux/aio.h	2006-09-19 13:58:49.000000000 +0100
  29.270 +@@ -191,6 +191,11 @@ struct kioctx {
  29.271 + 	struct aio_ring_info	ring_info;
  29.272 + 
  29.273 + 	struct work_struct	wq;
  29.274 ++#ifdef CONFIG_EPOLL
  29.275 ++	// poll integration
  29.276 ++	wait_queue_head_t       poll_wait;
  29.277 ++	struct file		*file;
  29.278 ++#endif
  29.279 + };
  29.280 + 
  29.281 + /* prototypes */
  29.282 +diff -pruN ../orig-linux-2.6.16.29/include/linux/eventpoll.h ./include/linux/eventpoll.h
  29.283 +--- ../orig-linux-2.6.16.29/include/linux/eventpoll.h	2006-09-12 19:02:10.000000000 +0100
  29.284 ++++ ./include/linux/eventpoll.h	2006-09-19 13:58:49.000000000 +0100
  29.285 +@@ -86,6 +86,12 @@ static inline void eventpoll_release(str
  29.286 + }
  29.287 + 
  29.288 + 
  29.289 ++/*
  29.290 ++ * called by aio code to create fd that can poll the  aio event queueQ
  29.291 ++ */
  29.292 ++struct eventpoll;
  29.293 ++int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  29.294 ++             struct eventpoll *ep, struct file_operations *fops);
  29.295 + #else
  29.296 + 
  29.297 + static inline void eventpoll_init_file(struct file *file) {}
    30.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.2 +++ b/patches/linux-2.6.16.31/device_bind.patch	Mon Nov 27 13:50:02 2006 +0000
    30.3 @@ -0,0 +1,15 @@
    30.4 +diff -pruN ../orig-linux-2.6.16.29/drivers/base/bus.c ./drivers/base/bus.c
    30.5 +--- ../orig-linux-2.6.16.29/drivers/base/bus.c	2006-09-12 19:02:10.000000000 +0100
    30.6 ++++ ./drivers/base/bus.c	2006-09-19 13:58:54.000000000 +0100
    30.7 +@@ -188,6 +188,11 @@ static ssize_t driver_bind(struct device
    30.8 + 		up(&dev->sem);
    30.9 + 		if (dev->parent)
   30.10 + 			up(&dev->parent->sem);
   30.11 ++
   30.12 ++		if (err > 0) 		/* success */
   30.13 ++			err = count;
   30.14 ++		else if (err == 0)	/* driver didn't accept device */
   30.15 ++			err = -ENODEV;
   30.16 + 	}
   30.17 + 	put_device(dev);
   30.18 + 	put_bus(bus);
    31.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.2 +++ b/patches/linux-2.6.16.31/fix-hz-suspend.patch	Mon Nov 27 13:50:02 2006 +0000
    31.3 @@ -0,0 +1,26 @@
    31.4 +diff -pruN ../orig-linux-2.6.16.29/kernel/timer.c ./kernel/timer.c
    31.5 +--- ../orig-linux-2.6.16.29/kernel/timer.c	2006-09-12 19:02:10.000000000 +0100
    31.6 ++++ ./kernel/timer.c	2006-09-19 13:58:58.000000000 +0100
    31.7 +@@ -555,6 +555,22 @@ found:
    31.8 + 	}
    31.9 + 	spin_unlock(&base->t_base.lock);
   31.10 + 
   31.11 ++	/*
   31.12 ++	 * It can happen that other CPUs service timer IRQs and increment
   31.13 ++	 * jiffies, but we have not yet got a local timer tick to process
   31.14 ++	 * the timer wheels.  In that case, the expiry time can be before
   31.15 ++	 * jiffies, but since the high-resolution timer here is relative to
   31.16 ++	 * jiffies, the default expression when high-resolution timers are
   31.17 ++	 * not active,
   31.18 ++	 *
   31.19 ++	 *   time_before(MAX_JIFFY_OFFSET + jiffies, expires)
   31.20 ++	 *
   31.21 ++	 * would falsely evaluate to true.  If that is the case, just
   31.22 ++	 * return jiffies so that we can immediately fire the local timer
   31.23 ++	 */
   31.24 ++	if (time_before(expires, jiffies))
   31.25 ++		return jiffies;
   31.26 ++
   31.27 + 	if (time_before(hr_expires, expires))
   31.28 + 		return hr_expires;
   31.29 + 
    32.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.2 +++ b/patches/linux-2.6.16.31/fix-ide-cd-pio-mode.patch	Mon Nov 27 13:50:02 2006 +0000
    32.3 @@ -0,0 +1,18 @@
    32.4 +diff -pruN ../orig-linux-2.6.16.29/drivers/ide/ide-lib.c ./drivers/ide/ide-lib.c
    32.5 +--- ../orig-linux-2.6.16.29/drivers/ide/ide-lib.c	2006-09-12 19:02:10.000000000 +0100
    32.6 ++++ ./drivers/ide/ide-lib.c	2006-09-19 13:59:03.000000000 +0100
    32.7 +@@ -410,10 +410,10 @@ void ide_toggle_bounce(ide_drive_t *driv
    32.8 + {
    32.9 + 	u64 addr = BLK_BOUNCE_HIGH;	/* dma64_addr_t */
   32.10 + 
   32.11 +-	if (!PCI_DMA_BUS_IS_PHYS) {
   32.12 +-		addr = BLK_BOUNCE_ANY;
   32.13 +-	} else if (on && drive->media == ide_disk) {
   32.14 +-		if (HWIF(drive)->pci_dev)
   32.15 ++	if (on && drive->media == ide_disk) {
   32.16 ++		if (!PCI_DMA_BUS_IS_PHYS)
   32.17 ++			addr = BLK_BOUNCE_ANY;
   32.18 ++		else if (HWIF(drive)->pci_dev)
   32.19 + 			addr = HWIF(drive)->pci_dev->dma_mask;
   32.20 + 	}
   32.21 + 
    33.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.2 +++ b/patches/linux-2.6.16.31/i386-mach-io-check-nmi.patch	Mon Nov 27 13:50:02 2006 +0000
    33.3 @@ -0,0 +1,45 @@
    33.4 +diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/traps.c ./arch/i386/kernel/traps.c
    33.5 +--- ../orig-linux-2.6.16.29/arch/i386/kernel/traps.c	2006-09-12 19:02:10.000000000 +0100
    33.6 ++++ ./arch/i386/kernel/traps.c	2006-09-19 13:59:06.000000000 +0100
    33.7 +@@ -567,18 +567,11 @@ static void mem_parity_error(unsigned ch
    33.8 + 
    33.9 + static void io_check_error(unsigned char reason, struct pt_regs * regs)
   33.10 + {
   33.11 +-	unsigned long i;
   33.12 +-
   33.13 + 	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
   33.14 + 	show_registers(regs);
   33.15 + 
   33.16 + 	/* Re-enable the IOCK line, wait for a few seconds */
   33.17 +-	reason = (reason & 0xf) | 8;
   33.18 +-	outb(reason, 0x61);
   33.19 +-	i = 2000;
   33.20 +-	while (--i) udelay(1000);
   33.21 +-	reason &= ~8;
   33.22 +-	outb(reason, 0x61);
   33.23 ++	clear_io_check_error(reason);
   33.24 + }
   33.25 + 
   33.26 + static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
   33.27 +diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/mach-default/mach_traps.h ./include/asm-i386/mach-default/mach_traps.h
   33.28 +--- ../orig-linux-2.6.16.29/include/asm-i386/mach-default/mach_traps.h	2006-09-12 19:02:10.000000000 +0100
   33.29 ++++ ./include/asm-i386/mach-default/mach_traps.h	2006-09-19 13:59:06.000000000 +0100
   33.30 +@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
   33.31 + 	outb(reason, 0x61);
   33.32 + }
   33.33 + 
   33.34 ++static inline void clear_io_check_error(unsigned char reason)
   33.35 ++{
   33.36 ++	unsigned long i;
   33.37 ++
   33.38 ++	reason = (reason & 0xf) | 8;
   33.39 ++	outb(reason, 0x61);
   33.40 ++	i = 2000;
   33.41 ++	while (--i) udelay(1000);
   33.42 ++	reason &= ~8;
   33.43 ++	outb(reason, 0x61);
   33.44 ++}
   33.45 ++
   33.46 + static inline unsigned char get_nmi_reason(void)
   33.47 + {
   33.48 + 	return inb(0x61);
    34.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.2 +++ b/patches/linux-2.6.16.31/ipv6-no-autoconf.patch	Mon Nov 27 13:50:02 2006 +0000
    34.3 @@ -0,0 +1,19 @@
    34.4 +diff -pruN ../orig-linux-2.6.16.29/net/ipv6/addrconf.c ./net/ipv6/addrconf.c
    34.5 +--- ../orig-linux-2.6.16.29/net/ipv6/addrconf.c	2006-09-12 19:02:10.000000000 +0100
    34.6 ++++ ./net/ipv6/addrconf.c	2006-09-19 13:59:11.000000000 +0100
    34.7 +@@ -2471,6 +2471,7 @@ static void addrconf_dad_start(struct in
    34.8 + 	spin_lock_bh(&ifp->lock);
    34.9 + 
   34.10 + 	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
   34.11 ++	    !(dev->flags&IFF_MULTICAST) ||
   34.12 + 	    !(ifp->flags&IFA_F_TENTATIVE)) {
   34.13 + 		ifp->flags &= ~IFA_F_TENTATIVE;
   34.14 + 		spin_unlock_bh(&ifp->lock);
   34.15 +@@ -2555,6 +2556,7 @@ static void addrconf_dad_completed(struc
   34.16 + 	if (ifp->idev->cnf.forwarding == 0 &&
   34.17 + 	    ifp->idev->cnf.rtr_solicits > 0 &&
   34.18 + 	    (dev->flags&IFF_LOOPBACK) == 0 &&
   34.19 ++	    (dev->flags & IFF_MULTICAST) &&
   34.20 + 	    (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
   34.21 + 		struct in6_addr all_routers;
   34.22 + 
    35.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.2 +++ b/patches/linux-2.6.16.31/net-csum.patch	Mon Nov 27 13:50:02 2006 +0000
    35.3 @@ -0,0 +1,63 @@
    35.4 +diff -pruN ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_tcp.c ./net/ipv4/netfilter/ip_nat_proto_tcp.c
    35.5 +--- ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-12 19:02:10.000000000 +0100
    35.6 ++++ ./net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-19 13:59:15.000000000 +0100
    35.7 +@@ -129,7 +129,12 @@ tcp_manip_pkt(struct sk_buff **pskb,
    35.8 + 	if (hdrsize < sizeof(*hdr))
    35.9 + 		return 1;
   35.10 + 
   35.11 +-	hdr->check = ip_nat_cheat_check(~oldip, newip,
   35.12 ++#ifdef CONFIG_XEN
   35.13 ++	if ((*pskb)->proto_csum_blank)
   35.14 ++		hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
   35.15 ++	else
   35.16 ++#endif
   35.17 ++		hdr->check = ip_nat_cheat_check(~oldip, newip,
   35.18 + 					ip_nat_cheat_check(oldport ^ 0xFFFF,
   35.19 + 							   newport,
   35.20 + 							   hdr->check));
   35.21 +diff -pruN ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_udp.c ./net/ipv4/netfilter/ip_nat_proto_udp.c
   35.22 +--- ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-12 19:02:10.000000000 +0100
   35.23 ++++ ./net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-19 13:59:15.000000000 +0100
   35.24 +@@ -113,11 +113,17 @@ udp_manip_pkt(struct sk_buff **pskb,
   35.25 + 		newport = tuple->dst.u.udp.port;
   35.26 + 		portptr = &hdr->dest;
   35.27 + 	}
   35.28 +-	if (hdr->check) /* 0 is a special case meaning no checksum */
   35.29 +-		hdr->check = ip_nat_cheat_check(~oldip, newip,
   35.30 ++	if (hdr->check) { /* 0 is a special case meaning no checksum */
   35.31 ++#ifdef CONFIG_XEN
   35.32 ++		if ((*pskb)->proto_csum_blank)
   35.33 ++			hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
   35.34 ++		else
   35.35 ++#endif
   35.36 ++			hdr->check = ip_nat_cheat_check(~oldip, newip,
   35.37 + 					ip_nat_cheat_check(*portptr ^ 0xFFFF,
   35.38 + 							   newport,
   35.39 + 							   hdr->check));
   35.40 ++	}
   35.41 + 	*portptr = newport;
   35.42 + 	return 1;
   35.43 + }
   35.44 +diff -pruN ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
   35.45 +--- ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c	2006-09-12 19:02:10.000000000 +0100
   35.46 ++++ ./net/ipv4/xfrm4_output.c	2006-09-19 13:59:15.000000000 +0100
   35.47 +@@ -17,6 +17,8 @@
   35.48 + #include <net/xfrm.h>
   35.49 + #include <net/icmp.h>
   35.50 + 
   35.51 ++extern int skb_checksum_setup(struct sk_buff *skb);
   35.52 ++
   35.53 + /* Add encapsulation header.
   35.54 +  *
   35.55 +  * In transport mode, the IP header will be moved forward to make space
   35.56 +@@ -103,6 +105,10 @@ static int xfrm4_output_one(struct sk_bu
   35.57 + 	struct xfrm_state *x = dst->xfrm;
   35.58 + 	int err;
   35.59 + 	
   35.60 ++	err = skb_checksum_setup(skb);
   35.61 ++	if (err)
   35.62 ++		goto error_nolock;
   35.63 ++
   35.64 + 	if (skb->ip_summed == CHECKSUM_HW) {
   35.65 + 		err = skb_checksum_help(skb, 0);
   35.66 + 		if (err)
    36.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    36.2 +++ b/patches/linux-2.6.16.31/net-gso-0-base.patch	Mon Nov 27 13:50:02 2006 +0000
    36.3 @@ -0,0 +1,2835 @@
    36.4 +diff -pruN ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt ./Documentation/networking/netdevices.txt
    36.5 +--- ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt	2006-09-12 19:02:10.000000000 +0100
    36.6 ++++ ./Documentation/networking/netdevices.txt	2006-09-19 13:59:20.000000000 +0100
    36.7 +@@ -42,9 +42,9 @@ dev->get_stats:
    36.8 + 	Context: nominally process, but don't sleep inside an rwlock
    36.9 + 
   36.10 + dev->hard_start_xmit:
   36.11 +-	Synchronization: dev->xmit_lock spinlock.
   36.12 ++	Synchronization: netif_tx_lock spinlock.
   36.13 + 	When the driver sets NETIF_F_LLTX in dev->features this will be
   36.14 +-	called without holding xmit_lock. In this case the driver 
   36.15 ++	called without holding netif_tx_lock. In this case the driver
   36.16 + 	has to lock by itself when needed. It is recommended to use a try lock
   36.17 + 	for this and return -1 when the spin lock fails. 
   36.18 + 	The locking there should also properly protect against 
   36.19 +@@ -62,12 +62,12 @@ dev->hard_start_xmit:
   36.20 + 	  Only valid when NETIF_F_LLTX is set.
   36.21 + 
   36.22 + dev->tx_timeout:
   36.23 +-	Synchronization: dev->xmit_lock spinlock.
   36.24 ++	Synchronization: netif_tx_lock spinlock.
   36.25 + 	Context: BHs disabled
   36.26 + 	Notes: netif_queue_stopped() is guaranteed true
   36.27 + 
   36.28 + dev->set_multicast_list:
   36.29 +-	Synchronization: dev->xmit_lock spinlock.
   36.30 ++	Synchronization: netif_tx_lock spinlock.
   36.31 + 	Context: BHs disabled
   36.32 + 
   36.33 + dev->poll:
   36.34 +diff -pruN ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c ./drivers/block/aoe/aoenet.c
   36.35 +--- ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c	2006-09-12 19:02:10.000000000 +0100
   36.36 ++++ ./drivers/block/aoe/aoenet.c	2006-09-19 13:59:20.000000000 +0100
   36.37 +@@ -95,9 +95,8 @@ mac_addr(char addr[6])
   36.38 + static struct sk_buff *
   36.39 + skb_check(struct sk_buff *skb)
   36.40 + {
   36.41 +-	if (skb_is_nonlinear(skb))
   36.42 + 	if ((skb = skb_share_check(skb, GFP_ATOMIC)))
   36.43 +-	if (skb_linearize(skb, GFP_ATOMIC) < 0) {
   36.44 ++	if (skb_linearize(skb)) {
   36.45 + 		dev_kfree_skb(skb);
   36.46 + 		return NULL;
   36.47 + 	}
   36.48 +diff -pruN ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c
   36.49 +--- ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-12 19:02:10.000000000 +0100
   36.50 ++++ ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-19 13:59:20.000000000 +0100
   36.51 +@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_
   36.52 + 
   36.53 + 	ipoib_mcast_stop_thread(dev, 0);
   36.54 + 
   36.55 +-	spin_lock_irqsave(&dev->xmit_lock, flags);
   36.56 ++	local_irq_save(flags);
   36.57 ++	netif_tx_lock(dev);
   36.58 + 	spin_lock(&priv->lock);
   36.59 + 
   36.60 + 	/*
   36.61 +@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_
   36.62 + 	}
   36.63 + 
   36.64 + 	spin_unlock(&priv->lock);
   36.65 +-	spin_unlock_irqrestore(&dev->xmit_lock, flags);
   36.66 ++	netif_tx_unlock(dev);
   36.67 ++	local_irq_restore(flags);
   36.68 + 
   36.69 + 	/* We have to cancel outside of the spinlock */
   36.70 + 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
   36.71 +diff -pruN ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c ./drivers/media/dvb/dvb-core/dvb_net.c
   36.72 +--- ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c	2006-09-12 19:02:10.000000000 +0100
   36.73 ++++ ./drivers/media/dvb/dvb-core/dvb_net.c	2006-09-19 13:59:20.000000000 +0100
   36.74 +@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void 
   36.75 + 
   36.76 + 	dvb_net_feed_stop(dev);
   36.77 + 	priv->rx_mode = RX_MODE_UNI;
   36.78 +-	spin_lock_bh(&dev->xmit_lock);
   36.79 ++	netif_tx_lock_bh(dev);
   36.80 + 
   36.81 + 	if (dev->flags & IFF_PROMISC) {
   36.82 + 		dprintk("%s: promiscuous mode\n", dev->name);
   36.83 +@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void 
   36.84 + 		}
   36.85 + 	}
   36.86 + 
   36.87 +-	spin_unlock_bh(&dev->xmit_lock);
   36.88 ++	netif_tx_unlock_bh(dev);
   36.89 + 	dvb_net_feed_start(dev);
   36.90 + }
   36.91 + 
   36.92 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/8139cp.c ./drivers/net/8139cp.c
   36.93 +--- ../orig-linux-2.6.16.29/drivers/net/8139cp.c	2006-09-12 19:02:10.000000000 +0100
   36.94 ++++ ./drivers/net/8139cp.c	2006-09-19 13:59:20.000000000 +0100
   36.95 +@@ -794,7 +794,7 @@ static int cp_start_xmit (struct sk_buff
   36.96 + 	entry = cp->tx_head;
   36.97 + 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
   36.98 + 	if (dev->features & NETIF_F_TSO)
   36.99 +-		mss = skb_shinfo(skb)->tso_size;
  36.100 ++		mss = skb_shinfo(skb)->gso_size;
  36.101 + 
  36.102 + 	if (skb_shinfo(skb)->nr_frags == 0) {
  36.103 + 		struct cp_desc *txd = &cp->tx_ring[entry];
  36.104 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/bnx2.c ./drivers/net/bnx2.c
  36.105 +--- ../orig-linux-2.6.16.29/drivers/net/bnx2.c	2006-09-12 19:02:10.000000000 +0100
  36.106 ++++ ./drivers/net/bnx2.c	2006-09-19 13:59:20.000000000 +0100
  36.107 +@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
  36.108 + 		skb = tx_buf->skb;
  36.109 + #ifdef BCM_TSO 
  36.110 + 		/* partial BD completions possible with TSO packets */
  36.111 +-		if (skb_shinfo(skb)->tso_size) {
  36.112 ++		if (skb_shinfo(skb)->gso_size) {
  36.113 + 			u16 last_idx, last_ring_idx;
  36.114 + 
  36.115 + 			last_idx = sw_cons +
  36.116 +@@ -1948,7 +1948,7 @@ bnx2_poll(struct net_device *dev, int *b
  36.117 + 	return 1;
  36.118 + }
  36.119 + 
  36.120 +-/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
  36.121 ++/* Called with rtnl_lock from vlan functions and also netif_tx_lock
  36.122 +  * from set_multicast.
  36.123 +  */
  36.124 + static void
  36.125 +@@ -4403,7 +4403,7 @@ bnx2_vlan_rx_kill_vid(struct net_device 
  36.126 + }
  36.127 + #endif
  36.128 + 
  36.129 +-/* Called with dev->xmit_lock.
  36.130 ++/* Called with netif_tx_lock.
  36.131 +  * hard_start_xmit is pseudo-lockless - a lock is only required when
  36.132 +  * the tx queue is full. This way, we get the benefit of lockless
  36.133 +  * operations most of the time without the complexities to handle
  36.134 +@@ -4441,7 +4441,7 @@ bnx2_start_xmit(struct sk_buff *skb, str
  36.135 + 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
  36.136 + 	}
  36.137 + #ifdef BCM_TSO 
  36.138 +-	if ((mss = skb_shinfo(skb)->tso_size) &&
  36.139 ++	if ((mss = skb_shinfo(skb)->gso_size) &&
  36.140 + 		(skb->len > (bp->dev->mtu + ETH_HLEN))) {
  36.141 + 		u32 tcp_opt_len, ip_tcp_len;
  36.142 + 
  36.143 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c ./drivers/net/bonding/bond_main.c
  36.144 +--- ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c	2006-09-12 19:02:10.000000000 +0100
  36.145 ++++ ./drivers/net/bonding/bond_main.c	2006-09-19 13:59:20.000000000 +0100
  36.146 +@@ -1145,8 +1145,7 @@ int bond_sethwaddr(struct net_device *bo
  36.147 + }
  36.148 + 
  36.149 + #define BOND_INTERSECT_FEATURES \
  36.150 +-	(NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\
  36.151 +-	NETIF_F_TSO|NETIF_F_UFO)
  36.152 ++	(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
  36.153 + 
  36.154 + /* 
  36.155 +  * Compute the common dev->feature set available to all slaves.  Some
  36.156 +@@ -1164,9 +1163,7 @@ static int bond_compute_features(struct 
  36.157 + 		features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
  36.158 + 
  36.159 + 	if ((features & NETIF_F_SG) && 
  36.160 +-	    !(features & (NETIF_F_IP_CSUM |
  36.161 +-			  NETIF_F_NO_CSUM |
  36.162 +-			  NETIF_F_HW_CSUM)))
  36.163 ++	    !(features & NETIF_F_ALL_CSUM))
  36.164 + 		features &= ~NETIF_F_SG;
  36.165 + 
  36.166 + 	/* 
  36.167 +@@ -4147,7 +4144,7 @@ static int bond_init(struct net_device *
  36.168 + 	 */
  36.169 + 	bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
  36.170 + 
  36.171 +-	/* don't acquire bond device's xmit_lock when 
  36.172 ++	/* don't acquire bond device's netif_tx_lock when
  36.173 + 	 * transmitting */
  36.174 + 	bond_dev->features |= NETIF_F_LLTX;
  36.175 + 
  36.176 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c ./drivers/net/chelsio/sge.c
  36.177 +--- ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c	2006-09-12 19:02:10.000000000 +0100
  36.178 ++++ ./drivers/net/chelsio/sge.c	2006-09-19 13:59:20.000000000 +0100
  36.179 +@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
  36.180 + 	struct cpl_tx_pkt *cpl;
  36.181 + 
  36.182 + #ifdef NETIF_F_TSO
  36.183 +-	if (skb_shinfo(skb)->tso_size) {
  36.184 ++	if (skb_shinfo(skb)->gso_size) {
  36.185 + 		int eth_type;
  36.186 + 		struct cpl_tx_pkt_lso *hdr;
  36.187 + 
  36.188 +@@ -1434,7 +1434,7 @@ int t1_start_xmit(struct sk_buff *skb, s
  36.189 + 		hdr->ip_hdr_words = skb->nh.iph->ihl;
  36.190 + 		hdr->tcp_hdr_words = skb->h.th->doff;
  36.191 + 		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
  36.192 +-						skb_shinfo(skb)->tso_size));
  36.193 ++						skb_shinfo(skb)->gso_size));
  36.194 + 		hdr->len = htonl(skb->len - sizeof(*hdr));
  36.195 + 		cpl = (struct cpl_tx_pkt *)hdr;
  36.196 + 		sge->stats.tx_lso_pkts++;
  36.197 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c ./drivers/net/e1000/e1000_main.c
  36.198 +--- ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c	2006-09-12 19:02:10.000000000 +0100
  36.199 ++++ ./drivers/net/e1000/e1000_main.c	2006-09-19 13:59:20.000000000 +0100
  36.200 +@@ -2526,7 +2526,7 @@ e1000_tso(struct e1000_adapter *adapter,
  36.201 + 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
  36.202 + 	int err;
  36.203 + 
  36.204 +-	if (skb_shinfo(skb)->tso_size) {
  36.205 ++	if (skb_shinfo(skb)->gso_size) {
  36.206 + 		if (skb_header_cloned(skb)) {
  36.207 + 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  36.208 + 			if (err)
  36.209 +@@ -2534,7 +2534,7 @@ e1000_tso(struct e1000_adapter *adapter,
  36.210 + 		}
  36.211 + 
  36.212 + 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  36.213 +-		mss = skb_shinfo(skb)->tso_size;
  36.214 ++		mss = skb_shinfo(skb)->gso_size;
  36.215 + 		if (skb->protocol == ntohs(ETH_P_IP)) {
  36.216 + 			skb->nh.iph->tot_len = 0;
  36.217 + 			skb->nh.iph->check = 0;
  36.218 +@@ -2651,7 +2651,7 @@ e1000_tx_map(struct e1000_adapter *adapt
  36.219 + 		 * tso gets written back prematurely before the data is fully
  36.220 + 		 * DMAd to the controller */
  36.221 + 		if (!skb->data_len && tx_ring->last_tx_tso &&
  36.222 +-				!skb_shinfo(skb)->tso_size) {
  36.223 ++				!skb_shinfo(skb)->gso_size) {
  36.224 + 			tx_ring->last_tx_tso = 0;
  36.225 + 			size -= 4;
  36.226 + 		}
  36.227 +@@ -2893,7 +2893,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
  36.228 + 	}
  36.229 + 
  36.230 + #ifdef NETIF_F_TSO
  36.231 +-	mss = skb_shinfo(skb)->tso_size;
  36.232 ++	mss = skb_shinfo(skb)->gso_size;
  36.233 + 	/* The controller does a simple calculation to 
  36.234 + 	 * make sure there is enough room in the FIFO before
  36.235 + 	 * initiating the DMA for each buffer.  The calc is:
  36.236 +@@ -2935,7 +2935,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
  36.237 + #ifdef NETIF_F_TSO
  36.238 + 	/* Controller Erratum workaround */
  36.239 + 	if (!skb->data_len && tx_ring->last_tx_tso &&
  36.240 +-		!skb_shinfo(skb)->tso_size)
  36.241 ++		!skb_shinfo(skb)->gso_size)
  36.242 + 		count++;
  36.243 + #endif
  36.244 + 
  36.245 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/forcedeth.c ./drivers/net/forcedeth.c
  36.246 +--- ../orig-linux-2.6.16.29/drivers/net/forcedeth.c	2006-09-12 19:02:10.000000000 +0100
  36.247 ++++ ./drivers/net/forcedeth.c	2006-09-19 13:59:20.000000000 +0100
  36.248 +@@ -482,9 +482,9 @@ typedef union _ring_type {
  36.249 +  * critical parts:
  36.250 +  * - rx is (pseudo-) lockless: it relies on the single-threading provided
  36.251 +  *	by the arch code for interrupts.
  36.252 +- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
  36.253 ++ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  36.254 +  *	needs dev->priv->lock :-(
  36.255 +- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
  36.256 ++ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  36.257 +  */
  36.258 + 
  36.259 + /* in dev: base, irq */
  36.260 +@@ -1016,7 +1016,7 @@ static void drain_ring(struct net_device
  36.261 + 
  36.262 + /*
  36.263 +  * nv_start_xmit: dev->hard_start_xmit function
  36.264 +- * Called with dev->xmit_lock held.
  36.265 ++ * Called with netif_tx_lock held.
  36.266 +  */
  36.267 + static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  36.268 + {
  36.269 +@@ -1105,8 +1105,8 @@ static int nv_start_xmit(struct sk_buff 
  36.270 + 	np->tx_skbuff[nr] = skb;
  36.271 + 
  36.272 + #ifdef NETIF_F_TSO
  36.273 +-	if (skb_shinfo(skb)->tso_size)
  36.274 +-		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
  36.275 ++	if (skb_shinfo(skb)->gso_size)
  36.276 ++		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  36.277 + 	else
  36.278 + #endif
  36.279 + 	tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
  36.280 +@@ -1203,7 +1203,7 @@ static void nv_tx_done(struct net_device
  36.281 + 
  36.282 + /*
  36.283 +  * nv_tx_timeout: dev->tx_timeout function
  36.284 +- * Called with dev->xmit_lock held.
  36.285 ++ * Called with netif_tx_lock held.
  36.286 +  */
  36.287 + static void nv_tx_timeout(struct net_device *dev)
  36.288 + {
  36.289 +@@ -1524,7 +1524,7 @@ static int nv_change_mtu(struct net_devi
  36.290 + 		 * Changing the MTU is a rare event, it shouldn't matter.
  36.291 + 		 */
  36.292 + 		disable_irq(dev->irq);
  36.293 +-		spin_lock_bh(&dev->xmit_lock);
  36.294 ++		netif_tx_lock_bh(dev);
  36.295 + 		spin_lock(&np->lock);
  36.296 + 		/* stop engines */
  36.297 + 		nv_stop_rx(dev);
  36.298 +@@ -1559,7 +1559,7 @@ static int nv_change_mtu(struct net_devi
  36.299 + 		nv_start_rx(dev);
  36.300 + 		nv_start_tx(dev);
  36.301 + 		spin_unlock(&np->lock);
  36.302 +-		spin_unlock_bh(&dev->xmit_lock);
  36.303 ++		netif_tx_unlock_bh(dev);
  36.304 + 		enable_irq(dev->irq);
  36.305 + 	}
  36.306 + 	return 0;
  36.307 +@@ -1594,7 +1594,7 @@ static int nv_set_mac_address(struct net
  36.308 + 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  36.309 + 
  36.310 + 	if (netif_running(dev)) {
  36.311 +-		spin_lock_bh(&dev->xmit_lock);
  36.312 ++		netif_tx_lock_bh(dev);
  36.313 + 		spin_lock_irq(&np->lock);
  36.314 + 
  36.315 + 		/* stop rx engine */
  36.316 +@@ -1606,7 +1606,7 @@ static int nv_set_mac_address(struct net
  36.317 + 		/* restart rx engine */
  36.318 + 		nv_start_rx(dev);
  36.319 + 		spin_unlock_irq(&np->lock);
  36.320 +-		spin_unlock_bh(&dev->xmit_lock);
  36.321 ++		netif_tx_unlock_bh(dev);
  36.322 + 	} else {
  36.323 + 		nv_copy_mac_to_hw(dev);
  36.324 + 	}
  36.325 +@@ -1615,7 +1615,7 @@ static int nv_set_mac_address(struct net
  36.326 + 
  36.327 + /*
  36.328 +  * nv_set_multicast: dev->set_multicast function
  36.329 +- * Called with dev->xmit_lock held.
  36.330 ++ * Called with netif_tx_lock held.
  36.331 +  */
  36.332 + static void nv_set_multicast(struct net_device *dev)
  36.333 + {
  36.334 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c ./drivers/net/hamradio/6pack.c
  36.335 +--- ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c	2006-09-12 19:02:10.000000000 +0100
  36.336 ++++ ./drivers/net/hamradio/6pack.c	2006-09-19 13:59:20.000000000 +0100
  36.337 +@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net
  36.338 + {
  36.339 + 	struct sockaddr_ax25 *sa = addr;
  36.340 + 
  36.341 +-	spin_lock_irq(&dev->xmit_lock);
  36.342 ++	netif_tx_lock_bh(dev);
  36.343 + 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
  36.344 +-	spin_unlock_irq(&dev->xmit_lock);
  36.345 ++	netif_tx_unlock_bh(dev);
  36.346 + 
  36.347 + 	return 0;
  36.348 + }
  36.349 +@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_stru
  36.350 + 			break;
  36.351 + 		}
  36.352 + 
  36.353 +-		spin_lock_irq(&dev->xmit_lock);
  36.354 ++		netif_tx_lock_bh(dev);
  36.355 + 		memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
  36.356 +-		spin_unlock_irq(&dev->xmit_lock);
  36.357 ++		netif_tx_unlock_bh(dev);
  36.358 + 
  36.359 + 		err = 0;
  36.360 + 		break;
  36.361 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c ./drivers/net/hamradio/mkiss.c
  36.362 +--- ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c	2006-09-12 19:02:10.000000000 +0100
  36.363 ++++ ./drivers/net/hamradio/mkiss.c	2006-09-19 13:59:20.000000000 +0100
  36.364 +@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net
  36.365 + {
  36.366 + 	struct sockaddr_ax25 *sa = addr;
  36.367 + 
  36.368 +-	spin_lock_irq(&dev->xmit_lock);
  36.369 ++	netif_tx_lock_bh(dev);
  36.370 + 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
  36.371 +-	spin_unlock_irq(&dev->xmit_lock);
  36.372 ++	netif_tx_unlock_bh(dev);
  36.373 + 
  36.374 + 	return 0;
  36.375 + }
  36.376 +@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct
  36.377 + 			break;
  36.378 + 		}
  36.379 + 
  36.380 +-		spin_lock_irq(&dev->xmit_lock);
  36.381 ++		netif_tx_lock_bh(dev);
  36.382 + 		memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
  36.383 +-		spin_unlock_irq(&dev->xmit_lock);
  36.384 ++		netif_tx_unlock_bh(dev);
  36.385 + 
  36.386 + 		err = 0;
  36.387 + 		break;
  36.388 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/ifb.c ./drivers/net/ifb.c
  36.389 +--- ../orig-linux-2.6.16.29/drivers/net/ifb.c	2006-09-12 19:02:10.000000000 +0100
  36.390 ++++ ./drivers/net/ifb.c	2006-09-19 13:59:20.000000000 +0100
  36.391 +@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev
  36.392 + 	dp->st_task_enter++;
  36.393 + 	if ((skb = skb_peek(&dp->tq)) == NULL) {
  36.394 + 		dp->st_txq_refl_try++;
  36.395 +-		if (spin_trylock(&_dev->xmit_lock)) {
  36.396 ++		if (netif_tx_trylock(_dev)) {
  36.397 + 			dp->st_rxq_enter++;
  36.398 + 			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
  36.399 + 				skb_queue_tail(&dp->tq, skb);
  36.400 + 				dp->st_rx2tx_tran++;
  36.401 + 			}
  36.402 +-			spin_unlock(&_dev->xmit_lock);
  36.403 ++			netif_tx_unlock(_dev);
  36.404 + 		} else {
  36.405 + 			/* reschedule */
  36.406 + 			dp->st_rxq_notenter++;
  36.407 +@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev
  36.408 + 		}
  36.409 + 	}
  36.410 + 
  36.411 +-	if (spin_trylock(&_dev->xmit_lock)) {
  36.412 ++	if (netif_tx_trylock(_dev)) {
  36.413 + 		dp->st_rxq_check++;
  36.414 + 		if ((skb = skb_peek(&dp->rq)) == NULL) {
  36.415 + 			dp->tasklet_pending = 0;
  36.416 +@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev
  36.417 + 				netif_wake_queue(_dev);
  36.418 + 		} else {
  36.419 + 			dp->st_rxq_rsch++;
  36.420 +-			spin_unlock(&_dev->xmit_lock);
  36.421 ++			netif_tx_unlock(_dev);
  36.422 + 			goto resched;
  36.423 + 		}
  36.424 +-		spin_unlock(&_dev->xmit_lock);
  36.425 ++		netif_tx_unlock(_dev);
  36.426 + 	} else {
  36.427 + resched:
  36.428 + 		dp->tasklet_pending = 1;
  36.429 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c ./drivers/net/irda/vlsi_ir.c
  36.430 +--- ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c	2006-09-12 19:02:10.000000000 +0100
  36.431 ++++ ./drivers/net/irda/vlsi_ir.c	2006-09-19 13:59:20.000000000 +0100
  36.432 +@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct s
  36.433 + 			    ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
  36.434 + 			    	break;
  36.435 + 			udelay(100);
  36.436 +-			/* must not sleep here - we are called under xmit_lock! */
  36.437 ++			/* must not sleep here - called under netif_tx_lock! */
  36.438 + 		}
  36.439 + 	}
  36.440 + 
  36.441 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c ./drivers/net/ixgb/ixgb_main.c
  36.442 +--- ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c	2006-09-12 19:02:10.000000000 +0100
  36.443 ++++ ./drivers/net/ixgb/ixgb_main.c	2006-09-19 13:59:20.000000000 +0100
  36.444 +@@ -1163,7 +1163,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
  36.445 + 	uint16_t ipcse, tucse, mss;
  36.446 + 	int err;
  36.447 + 
  36.448 +-	if(likely(skb_shinfo(skb)->tso_size)) {
  36.449 ++	if(likely(skb_shinfo(skb)->gso_size)) {
  36.450 + 		if (skb_header_cloned(skb)) {
  36.451 + 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  36.452 + 			if (err)
  36.453 +@@ -1171,7 +1171,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
  36.454 + 		}
  36.455 + 
  36.456 + 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  36.457 +-		mss = skb_shinfo(skb)->tso_size;
  36.458 ++		mss = skb_shinfo(skb)->gso_size;
  36.459 + 		skb->nh.iph->tot_len = 0;
  36.460 + 		skb->nh.iph->check = 0;
  36.461 + 		skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
  36.462 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/loopback.c ./drivers/net/loopback.c
  36.463 +--- ../orig-linux-2.6.16.29/drivers/net/loopback.c	2006-09-12 19:02:10.000000000 +0100
  36.464 ++++ ./drivers/net/loopback.c	2006-09-19 13:59:20.000000000 +0100
  36.465 +@@ -74,7 +74,7 @@ static void emulate_large_send_offload(s
  36.466 + 	struct iphdr *iph = skb->nh.iph;
  36.467 + 	struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
  36.468 + 	unsigned int doffset = (iph->ihl + th->doff) * 4;
  36.469 +-	unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
  36.470 ++	unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
  36.471 + 	unsigned int offset = 0;
  36.472 + 	u32 seq = ntohl(th->seq);
  36.473 + 	u16 id  = ntohs(iph->id);
  36.474 +@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff 
  36.475 + #endif
  36.476 + 
  36.477 + #ifdef LOOPBACK_TSO
  36.478 +-	if (skb_shinfo(skb)->tso_size) {
  36.479 ++	if (skb_shinfo(skb)->gso_size) {
  36.480 + 		BUG_ON(skb->protocol != htons(ETH_P_IP));
  36.481 + 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
  36.482 + 
  36.483 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c ./drivers/net/mv643xx_eth.c
  36.484 +--- ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c	2006-09-12 19:02:10.000000000 +0100
  36.485 ++++ ./drivers/net/mv643xx_eth.c	2006-09-19 13:59:20.000000000 +0100
  36.486 +@@ -1107,7 +1107,7 @@ static int mv643xx_eth_start_xmit(struct
  36.487 + 
  36.488 + #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
  36.489 + 	if (has_tiny_unaligned_frags(skb)) {
  36.490 +-		if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
  36.491 ++		if (__skb_linearize(skb)) {
  36.492 + 			stats->tx_dropped++;
  36.493 + 			printk(KERN_DEBUG "%s: failed to linearize tiny "
  36.494 + 					"unaligned fragment\n", dev->name);
  36.495 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/natsemi.c ./drivers/net/natsemi.c
  36.496 +--- ../orig-linux-2.6.16.29/drivers/net/natsemi.c	2006-09-12 19:02:10.000000000 +0100
  36.497 ++++ ./drivers/net/natsemi.c	2006-09-19 13:59:20.000000000 +0100
  36.498 +@@ -323,12 +323,12 @@ performance critical codepaths:
  36.499 + The rx process only runs in the interrupt handler. Access from outside
  36.500 + the interrupt handler is only permitted after disable_irq().
  36.501 + 
  36.502 +-The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
  36.503 ++The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
  36.504 + is set, then access is permitted under spin_lock_irq(&np->lock).
  36.505 + 
  36.506 + Thus configuration functions that want to access everything must call
  36.507 + 	disable_irq(dev->irq);
  36.508 +-	spin_lock_bh(dev->xmit_lock);
  36.509 ++	netif_tx_lock_bh(dev);
  36.510 + 	spin_lock_irq(&np->lock);
  36.511 + 
  36.512 + IV. Notes
  36.513 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/r8169.c ./drivers/net/r8169.c
  36.514 +--- ../orig-linux-2.6.16.29/drivers/net/r8169.c	2006-09-12 19:02:10.000000000 +0100
  36.515 ++++ ./drivers/net/r8169.c	2006-09-19 13:59:20.000000000 +0100
  36.516 +@@ -2171,7 +2171,7 @@ static int rtl8169_xmit_frags(struct rtl
  36.517 + static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
  36.518 + {
  36.519 + 	if (dev->features & NETIF_F_TSO) {
  36.520 +-		u32 mss = skb_shinfo(skb)->tso_size;
  36.521 ++		u32 mss = skb_shinfo(skb)->gso_size;
  36.522 + 
  36.523 + 		if (mss)
  36.524 + 			return LargeSend | ((mss & MSSMask) << MSSShift);
  36.525 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/s2io.c ./drivers/net/s2io.c
  36.526 +--- ../orig-linux-2.6.16.29/drivers/net/s2io.c	2006-09-12 19:02:10.000000000 +0100
  36.527 ++++ ./drivers/net/s2io.c	2006-09-19 13:59:20.000000000 +0100
  36.528 +@@ -3522,8 +3522,8 @@ static int s2io_xmit(struct sk_buff *skb
  36.529 + 	txdp->Control_1 = 0;
  36.530 + 	txdp->Control_2 = 0;
  36.531 + #ifdef NETIF_F_TSO
  36.532 +-	mss = skb_shinfo(skb)->tso_size;
  36.533 +-	if (mss) {
  36.534 ++	mss = skb_shinfo(skb)->gso_size;
  36.535 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
  36.536 + 		txdp->Control_1 |= TXD_TCP_LSO_EN;
  36.537 + 		txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
  36.538 + 	}
  36.539 +@@ -3543,10 +3543,10 @@ static int s2io_xmit(struct sk_buff *skb
  36.540 + 	}
  36.541 + 
  36.542 + 	frg_len = skb->len - skb->data_len;
  36.543 +-	if (skb_shinfo(skb)->ufo_size) {
  36.544 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
  36.545 + 		int ufo_size;
  36.546 + 
  36.547 +-		ufo_size = skb_shinfo(skb)->ufo_size;
  36.548 ++		ufo_size = skb_shinfo(skb)->gso_size;
  36.549 + 		ufo_size &= ~7;
  36.550 + 		txdp->Control_1 |= TXD_UFO_EN;
  36.551 + 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
  36.552 +@@ -3572,7 +3572,7 @@ static int s2io_xmit(struct sk_buff *skb
  36.553 + 	txdp->Host_Control = (unsigned long) skb;
  36.554 + 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
  36.555 + 
  36.556 +-	if (skb_shinfo(skb)->ufo_size)
  36.557 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  36.558 + 		txdp->Control_1 |= TXD_UFO_EN;
  36.559 + 
  36.560 + 	frg_cnt = skb_shinfo(skb)->nr_frags;
  36.561 +@@ -3587,12 +3587,12 @@ static int s2io_xmit(struct sk_buff *skb
  36.562 + 		    (sp->pdev, frag->page, frag->page_offset,
  36.563 + 		     frag->size, PCI_DMA_TODEVICE);
  36.564 + 		txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
  36.565 +-		if (skb_shinfo(skb)->ufo_size)
  36.566 ++		if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  36.567 + 			txdp->Control_1 |= TXD_UFO_EN;
  36.568 + 	}
  36.569 + 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
  36.570 + 
  36.571 +-	if (skb_shinfo(skb)->ufo_size)
  36.572 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  36.573 + 		frg_cnt++; /* as Txd0 was used for inband header */
  36.574 + 
  36.575 + 	tx_fifo = mac_control->tx_FIFO_start[queue];
  36.576 +@@ -3606,7 +3606,7 @@ static int s2io_xmit(struct sk_buff *skb
  36.577 + 	if (mss)
  36.578 + 		val64 |= TX_FIFO_SPECIAL_FUNC;
  36.579 + #endif
  36.580 +-	if (skb_shinfo(skb)->ufo_size)
  36.581 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  36.582 + 		val64 |= TX_FIFO_SPECIAL_FUNC;
  36.583 + 	writeq(val64, &tx_fifo->List_Control);
  36.584 + 
  36.585 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/sky2.c ./drivers/net/sky2.c
  36.586 +--- ../orig-linux-2.6.16.29/drivers/net/sky2.c	2006-09-12 19:02:10.000000000 +0100
  36.587 ++++ ./drivers/net/sky2.c	2006-09-19 13:59:20.000000000 +0100
  36.588 +@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
  36.589 + 	count = sizeof(dma_addr_t) / sizeof(u32);
  36.590 + 	count += skb_shinfo(skb)->nr_frags * count;
  36.591 + 
  36.592 +-	if (skb_shinfo(skb)->tso_size)
  36.593 ++	if (skb_shinfo(skb)->gso_size)
  36.594 + 		++count;
  36.595 + 
  36.596 + 	if (skb->ip_summed == CHECKSUM_HW)
  36.597 +@@ -1197,7 +1197,7 @@ static int sky2_xmit_frame(struct sk_buf
  36.598 + 	}
  36.599 + 
  36.600 + 	/* Check for TCP Segmentation Offload */
  36.601 +-	mss = skb_shinfo(skb)->tso_size;
  36.602 ++	mss = skb_shinfo(skb)->gso_size;
  36.603 + 	if (mss != 0) {
  36.604 + 		/* just drop the packet if non-linear expansion fails */
  36.605 + 		if (skb_header_cloned(skb) &&
  36.606 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/tg3.c ./drivers/net/tg3.c
  36.607 +--- ../orig-linux-2.6.16.29/drivers/net/tg3.c	2006-09-12 19:02:10.000000000 +0100
  36.608 ++++ ./drivers/net/tg3.c	2006-09-19 13:59:20.000000000 +0100
  36.609 +@@ -3664,7 +3664,7 @@ static int tg3_start_xmit(struct sk_buff
  36.610 + #if TG3_TSO_SUPPORT != 0
  36.611 + 	mss = 0;
  36.612 + 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
  36.613 +-	    (mss = skb_shinfo(skb)->tso_size) != 0) {
  36.614 ++	    (mss = skb_shinfo(skb)->gso_size) != 0) {
  36.615 + 		int tcp_opt_len, ip_tcp_len;
  36.616 + 
  36.617 + 		if (skb_header_cloned(skb) &&
  36.618 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c ./drivers/net/tulip/winbond-840.c
  36.619 +--- ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c	2006-09-12 19:02:10.000000000 +0100
  36.620 ++++ ./drivers/net/tulip/winbond-840.c	2006-09-19 13:59:20.000000000 +0100
  36.621 +@@ -1605,11 +1605,11 @@ static void __devexit w840_remove1 (stru
  36.622 +  * - get_stats:
  36.623 +  * 	spin_lock_irq(np->lock), doesn't touch hw if not present
  36.624 +  * - hard_start_xmit:
  36.625 +- * 	netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
  36.626 ++ * 	synchronize_irq + netif_tx_disable;
  36.627 +  * - tx_timeout:
  36.628 +- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
  36.629 ++ * 	netif_device_detach + netif_tx_disable;
  36.630 +  * - set_multicast_list
  36.631 +- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
  36.632 ++ * 	netif_device_detach + netif_tx_disable;
  36.633 +  * - interrupt handler
  36.634 +  * 	doesn't touch hw if not present, synchronize_irq waits for
  36.635 +  * 	running instances of the interrupt handler.
  36.636 +@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev 
  36.637 + 		netif_device_detach(dev);
  36.638 + 		update_csr6(dev, 0);
  36.639 + 		iowrite32(0, ioaddr + IntrEnable);
  36.640 +-		netif_stop_queue(dev);
  36.641 + 		spin_unlock_irq(&np->lock);
  36.642 + 
  36.643 +-		spin_unlock_wait(&dev->xmit_lock);
  36.644 + 		synchronize_irq(dev->irq);
  36.645 ++		netif_tx_disable(dev);
  36.646 + 	
  36.647 + 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
  36.648 + 
  36.649 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/typhoon.c ./drivers/net/typhoon.c
  36.650 +--- ../orig-linux-2.6.16.29/drivers/net/typhoon.c	2006-09-12 19:02:10.000000000 +0100
  36.651 ++++ ./drivers/net/typhoon.c	2006-09-19 13:59:20.000000000 +0100
  36.652 +@@ -340,7 +340,7 @@ enum state_values {
  36.653 + #endif
  36.654 + 
  36.655 + #if defined(NETIF_F_TSO)
  36.656 +-#define skb_tso_size(x)		(skb_shinfo(x)->tso_size)
  36.657 ++#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
  36.658 + #define TSO_NUM_DESCRIPTORS	2
  36.659 + #define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
  36.660 + #else
  36.661 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/via-velocity.c ./drivers/net/via-velocity.c
  36.662 +--- ../orig-linux-2.6.16.29/drivers/net/via-velocity.c	2006-09-12 19:02:10.000000000 +0100
  36.663 ++++ ./drivers/net/via-velocity.c	2006-09-19 13:59:20.000000000 +0100
  36.664 +@@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff 
  36.665 + 
  36.666 + 	int pktlen = skb->len;
  36.667 + 
  36.668 ++#ifdef VELOCITY_ZERO_COPY_SUPPORT
  36.669 ++	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
  36.670 ++		kfree_skb(skb);
  36.671 ++		return 0;
  36.672 ++	}
  36.673 ++#endif
  36.674 ++
  36.675 + 	spin_lock_irqsave(&vptr->lock, flags);
  36.676 + 
  36.677 + 	index = vptr->td_curr[qnum];
  36.678 +@@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff 
  36.679 + 	 */
  36.680 + 	if (pktlen < ETH_ZLEN) {
  36.681 + 		/* Cannot occur until ZC support */
  36.682 +-		if(skb_linearize(skb, GFP_ATOMIC))
  36.683 +-			return 0; 
  36.684 + 		pktlen = ETH_ZLEN;
  36.685 + 		memcpy(tdinfo->buf, skb->data, skb->len);
  36.686 + 		memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
  36.687 +@@ -1933,7 +1938,6 @@ static int velocity_xmit(struct sk_buff 
  36.688 + 		int nfrags = skb_shinfo(skb)->nr_frags;
  36.689 + 		tdinfo->skb = skb;
  36.690 + 		if (nfrags > 6) {
  36.691 +-			skb_linearize(skb, GFP_ATOMIC);
  36.692 + 			memcpy(tdinfo->buf, skb->data, skb->len);
  36.693 + 			tdinfo->skb_dma[0] = tdinfo->buf_dma;
  36.694 + 			td_ptr->tdesc0.pktsize = 
  36.695 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c ./drivers/net/wireless/orinoco.c
  36.696 +--- ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c	2006-09-12 19:02:10.000000000 +0100
  36.697 ++++ ./drivers/net/wireless/orinoco.c	2006-09-19 13:59:20.000000000 +0100
  36.698 +@@ -1835,7 +1835,9 @@ static int __orinoco_program_rids(struct
  36.699 + 	/* Set promiscuity / multicast*/
  36.700 + 	priv->promiscuous = 0;
  36.701 + 	priv->mc_count = 0;
  36.702 +-	__orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
  36.703 ++
  36.704 ++	/* FIXME: what about netif_tx_lock */
  36.705 ++	__orinoco_set_multicast_list(dev);
  36.706 + 
  36.707 + 	return 0;
  36.708 + }
  36.709 +diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c ./drivers/s390/net/qeth_eddp.c
  36.710 +--- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c	2006-09-12 19:02:10.000000000 +0100
  36.711 ++++ ./drivers/s390/net/qeth_eddp.c	2006-09-19 13:59:20.000000000 +0100
  36.712 +@@ -421,7 +421,7 @@ __qeth_eddp_fill_context_tcp(struct qeth
  36.713 +        }
  36.714 + 	tcph = eddp->skb->h.th;
  36.715 + 	while (eddp->skb_offset < eddp->skb->len) {
  36.716 +-		data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
  36.717 ++		data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
  36.718 + 			       (int)(eddp->skb->len - eddp->skb_offset));
  36.719 + 		/* prepare qdio hdr */
  36.720 + 		if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
  36.721 +@@ -516,20 +516,20 @@ qeth_eddp_calc_num_pages(struct qeth_edd
  36.722 + 	
  36.723 + 	QETH_DBF_TEXT(trace, 5, "eddpcanp");
  36.724 + 	/* can we put multiple skbs in one page? */
  36.725 +-	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
  36.726 ++	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
  36.727 + 	if (skbs_per_page > 1){
  36.728 +-		ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
  36.729 ++		ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
  36.730 + 				 skbs_per_page + 1;
  36.731 + 		ctx->elements_per_skb = 1;
  36.732 + 	} else {
  36.733 + 		/* no -> how many elements per skb? */
  36.734 +-		ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
  36.735 ++		ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
  36.736 + 				     PAGE_SIZE) >> PAGE_SHIFT;
  36.737 + 		ctx->num_pages = ctx->elements_per_skb *
  36.738 +-				 (skb_shinfo(skb)->tso_segs + 1);
  36.739 ++				 (skb_shinfo(skb)->gso_segs + 1);
  36.740 + 	}
  36.741 + 	ctx->num_elements = ctx->elements_per_skb *
  36.742 +-			    (skb_shinfo(skb)->tso_segs + 1);
  36.743 ++			    (skb_shinfo(skb)->gso_segs + 1);
  36.744 + }
  36.745 + 
  36.746 + static inline struct qeth_eddp_context *
  36.747 +diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c ./drivers/s390/net/qeth_main.c
  36.748 +--- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c	2006-09-12 19:02:10.000000000 +0100
  36.749 ++++ ./drivers/s390/net/qeth_main.c	2006-09-19 13:59:20.000000000 +0100
  36.750 +@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
  36.751 + 	queue = card->qdio.out_qs
  36.752 + 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
  36.753 + 
  36.754 +-	if (skb_shinfo(skb)->tso_size)
  36.755 ++	if (skb_shinfo(skb)->gso_size)
  36.756 + 		large_send = card->options.large_send;
  36.757 + 
  36.758 + 	/*are we able to do TSO ? If so ,prepare and send it from here */
  36.759 +@@ -4501,7 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
  36.760 + 		card->stats.tx_packets++;
  36.761 + 		card->stats.tx_bytes += skb->len;
  36.762 + #ifdef CONFIG_QETH_PERF_STATS
  36.763 +-		if (skb_shinfo(skb)->tso_size &&
  36.764 ++		if (skb_shinfo(skb)->gso_size &&
  36.765 + 		   !(large_send == QETH_LARGE_SEND_NO)) {
  36.766 + 			card->perf_stats.large_send_bytes += skb->len;
  36.767 + 			card->perf_stats.large_send_cnt++;
  36.768 +diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h ./drivers/s390/net/qeth_tso.h
  36.769 +--- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h	2006-09-12 19:02:10.000000000 +0100
  36.770 ++++ ./drivers/s390/net/qeth_tso.h	2006-09-19 13:59:20.000000000 +0100
  36.771 +@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *c
  36.772 + 	hdr->ext.hdr_version = 1;
  36.773 + 	hdr->ext.hdr_len     = 28;
  36.774 + 	/*insert non-fix values */
  36.775 +-	hdr->ext.mss = skb_shinfo(skb)->tso_size;
  36.776 ++	hdr->ext.mss = skb_shinfo(skb)->gso_size;
  36.777 + 	hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
  36.778 + 	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
  36.779 + 				       sizeof(struct qeth_hdr_tso));
  36.780 +diff -pruN ../orig-linux-2.6.16.29/include/linux/ethtool.h ./include/linux/ethtool.h
  36.781 +--- ../orig-linux-2.6.16.29/include/linux/ethtool.h	2006-09-12 19:02:10.000000000 +0100
  36.782 ++++ ./include/linux/ethtool.h	2006-09-19 13:59:20.000000000 +0100
  36.783 +@@ -408,6 +408,8 @@ struct ethtool_ops {
  36.784 + #define ETHTOOL_GPERMADDR	0x00000020 /* Get permanent hardware address */
  36.785 + #define ETHTOOL_GUFO		0x00000021 /* Get UFO enable (ethtool_value) */
  36.786 + #define ETHTOOL_SUFO		0x00000022 /* Set UFO enable (ethtool_value) */
  36.787 ++#define ETHTOOL_GGSO		0x00000023 /* Get GSO enable (ethtool_value) */
  36.788 ++#define ETHTOOL_SGSO		0x00000024 /* Set GSO enable (ethtool_value) */
  36.789 + 
  36.790 + /* compatibility with older code */
  36.791 + #define SPARC_ETH_GSET		ETHTOOL_GSET
  36.792 +diff -pruN ../orig-linux-2.6.16.29/include/linux/netdevice.h ./include/linux/netdevice.h
  36.793 +--- ../orig-linux-2.6.16.29/include/linux/netdevice.h	2006-09-12 19:02:10.000000000 +0100
  36.794 ++++ ./include/linux/netdevice.h	2006-09-19 13:59:20.000000000 +0100
  36.795 +@@ -230,7 +230,8 @@ enum netdev_state_t
  36.796 + 	__LINK_STATE_SCHED,
  36.797 + 	__LINK_STATE_NOCARRIER,
  36.798 + 	__LINK_STATE_RX_SCHED,
  36.799 +-	__LINK_STATE_LINKWATCH_PENDING
  36.800 ++	__LINK_STATE_LINKWATCH_PENDING,
  36.801 ++	__LINK_STATE_QDISC_RUNNING,
  36.802 + };
  36.803 + 
  36.804 + 
  36.805 +@@ -306,9 +307,17 @@ struct net_device
  36.806 + #define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */
  36.807 + #define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */
  36.808 + #define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */
  36.809 +-#define NETIF_F_TSO		2048	/* Can offload TCP/IP segmentation */
  36.810 ++#define NETIF_F_GSO		2048	/* Enable software GSO. */
  36.811 + #define NETIF_F_LLTX		4096	/* LockLess TX */
  36.812 +-#define NETIF_F_UFO             8192    /* Can offload UDP Large Send*/
  36.813 ++
  36.814 ++	/* Segmentation offload features */
  36.815 ++#define NETIF_F_GSO_SHIFT	16
  36.816 ++#define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  36.817 ++#define NETIF_F_UFO		(SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
  36.818 ++#define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  36.819 ++
  36.820 ++#define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  36.821 ++#define NETIF_F_ALL_CSUM	(NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
  36.822 + 
  36.823 + 	struct net_device	*next_sched;
  36.824 + 
  36.825 +@@ -394,6 +403,9 @@ struct net_device
  36.826 + 	struct list_head	qdisc_list;
  36.827 + 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
  36.828 + 
  36.829 ++	/* Partially transmitted GSO packet. */
  36.830 ++	struct sk_buff		*gso_skb;
  36.831 ++
  36.832 + 	/* ingress path synchronizer */
  36.833 + 	spinlock_t		ingress_lock;
  36.834 + 	struct Qdisc		*qdisc_ingress;
  36.835 +@@ -402,7 +414,7 @@ struct net_device
  36.836 +  * One part is mostly used on xmit path (device)
  36.837 +  */
  36.838 + 	/* hard_start_xmit synchronizer */
  36.839 +-	spinlock_t		xmit_lock ____cacheline_aligned_in_smp;
  36.840 ++	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
  36.841 + 	/* cpu id of processor entered to hard_start_xmit or -1,
  36.842 + 	   if nobody entered there.
  36.843 + 	 */
  36.844 +@@ -527,6 +539,8 @@ struct packet_type {
  36.845 + 					 struct net_device *,
  36.846 + 					 struct packet_type *,
  36.847 + 					 struct net_device *);
  36.848 ++	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
  36.849 ++						int features);
  36.850 + 	void			*af_packet_priv;
  36.851 + 	struct list_head	list;
  36.852 + };
  36.853 +@@ -693,7 +707,8 @@ extern int		dev_change_name(struct net_d
  36.854 + extern int		dev_set_mtu(struct net_device *, int);
  36.855 + extern int		dev_set_mac_address(struct net_device *,
  36.856 + 					    struct sockaddr *);
  36.857 +-extern void		dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
  36.858 ++extern int		dev_hard_start_xmit(struct sk_buff *skb,
  36.859 ++					    struct net_device *dev);
  36.860 + 
  36.861 + extern void		dev_init(void);
  36.862 + 
  36.863 +@@ -900,11 +915,43 @@ static inline void __netif_rx_complete(s
  36.864 + 	clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
  36.865 + }
  36.866 + 
  36.867 ++static inline void netif_tx_lock(struct net_device *dev)
  36.868 ++{
  36.869 ++	spin_lock(&dev->_xmit_lock);
  36.870 ++	dev->xmit_lock_owner = smp_processor_id();
  36.871 ++}
  36.872 ++
  36.873 ++static inline void netif_tx_lock_bh(struct net_device *dev)
  36.874 ++{
  36.875 ++	spin_lock_bh(&dev->_xmit_lock);
  36.876 ++	dev->xmit_lock_owner = smp_processor_id();
  36.877 ++}
  36.878 ++
  36.879 ++static inline int netif_tx_trylock(struct net_device *dev)
  36.880 ++{
  36.881 ++	int err = spin_trylock(&dev->_xmit_lock);
  36.882 ++	if (!err)
  36.883 ++		dev->xmit_lock_owner = smp_processor_id();
  36.884 ++	return err;
  36.885 ++}
  36.886 ++
  36.887 ++static inline void netif_tx_unlock(struct net_device *dev)
  36.888 ++{
  36.889 ++	dev->xmit_lock_owner = -1;
  36.890 ++	spin_unlock(&dev->_xmit_lock);
  36.891 ++}
  36.892 ++
  36.893 ++static inline void netif_tx_unlock_bh(struct net_device *dev)
  36.894 ++{
  36.895 ++	dev->xmit_lock_owner = -1;
  36.896 ++	spin_unlock_bh(&dev->_xmit_lock);
  36.897 ++}
  36.898 ++
  36.899 + static inline void netif_tx_disable(struct net_device *dev)
  36.900 + {
  36.901 +-	spin_lock_bh(&dev->xmit_lock);
  36.902 ++	netif_tx_lock_bh(dev);
  36.903 + 	netif_stop_queue(dev);
  36.904 +-	spin_unlock_bh(&dev->xmit_lock);
  36.905 ++	netif_tx_unlock_bh(dev);
  36.906 + }
  36.907 + 
  36.908 + /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  36.909 +@@ -932,6 +979,7 @@ extern int		netdev_max_backlog;
  36.910 + extern int		weight_p;
  36.911 + extern int		netdev_set_master(struct net_device *dev, struct net_device *master);
  36.912 + extern int skb_checksum_help(struct sk_buff *skb, int inward);
  36.913 ++extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  36.914 + #ifdef CONFIG_BUG
  36.915 + extern void netdev_rx_csum_fault(struct net_device *dev);
  36.916 + #else
  36.917 +@@ -951,6 +999,18 @@ extern void dev_seq_stop(struct seq_file
  36.918 + 
  36.919 + extern void linkwatch_run_queue(void);
  36.920 + 
  36.921 ++static inline int skb_gso_ok(struct sk_buff *skb, int features)
  36.922 ++{
  36.923 ++	int feature = skb_shinfo(skb)->gso_size ?
  36.924 ++		      skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
  36.925 ++	return (features & feature) == feature;
  36.926 ++}
  36.927 ++
  36.928 ++static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  36.929 ++{
  36.930 ++	return !skb_gso_ok(skb, dev->features);
  36.931 ++}
  36.932 ++
  36.933 + #endif /* __KERNEL__ */
  36.934 + 
  36.935 + #endif	/* _LINUX_DEV_H */
  36.936 +diff -pruN ../orig-linux-2.6.16.29/include/linux/skbuff.h ./include/linux/skbuff.h
  36.937 +--- ../orig-linux-2.6.16.29/include/linux/skbuff.h	2006-09-12 19:02:10.000000000 +0100
  36.938 ++++ ./include/linux/skbuff.h	2006-09-19 13:59:20.000000000 +0100
  36.939 +@@ -134,9 +134,10 @@ struct skb_frag_struct {
  36.940 + struct skb_shared_info {
  36.941 + 	atomic_t	dataref;
  36.942 + 	unsigned short	nr_frags;
  36.943 +-	unsigned short	tso_size;
  36.944 +-	unsigned short	tso_segs;
  36.945 +-	unsigned short  ufo_size;
  36.946 ++	unsigned short	gso_size;
  36.947 ++	/* Warning: this field is not always filled in (UFO)! */
  36.948 ++	unsigned short	gso_segs;
  36.949 ++	unsigned short  gso_type;
  36.950 + 	unsigned int    ip6_frag_id;
  36.951 + 	struct sk_buff	*frag_list;
  36.952 + 	skb_frag_t	frags[MAX_SKB_FRAGS];
  36.953 +@@ -168,6 +169,14 @@ enum {
  36.954 + 	SKB_FCLONE_CLONE,
  36.955 + };
  36.956 + 
  36.957 ++enum {
  36.958 ++	SKB_GSO_TCPV4 = 1 << 0,
  36.959 ++	SKB_GSO_UDPV4 = 1 << 1,
  36.960 ++
  36.961 ++	/* This indicates the skb is from an untrusted source. */
  36.962 ++	SKB_GSO_DODGY = 1 << 2,
  36.963 ++};
  36.964 ++
  36.965 + /** 
  36.966 +  *	struct sk_buff - socket buffer
  36.967 +  *	@next: Next buffer in list
  36.968 +@@ -1148,18 +1157,34 @@ static inline int skb_can_coalesce(struc
  36.969 + 	return 0;
  36.970 + }
  36.971 + 
  36.972 ++static inline int __skb_linearize(struct sk_buff *skb)
  36.973 ++{
  36.974 ++	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
  36.975 ++}
  36.976 ++
  36.977 + /**
  36.978 +  *	skb_linearize - convert paged skb to linear one
  36.979 +  *	@skb: buffer to linarize
  36.980 +- *	@gfp: allocation mode
  36.981 +  *
  36.982 +  *	If there is no free memory -ENOMEM is returned, otherwise zero
  36.983 +  *	is returned and the old skb data released.
  36.984 +  */
  36.985 +-extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
  36.986 +-static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
  36.987 ++static inline int skb_linearize(struct sk_buff *skb)
  36.988 ++{
  36.989 ++	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
  36.990 ++}
  36.991 ++
  36.992 ++/**
  36.993 ++ *	skb_linearize_cow - make sure skb is linear and writable
  36.994 ++ *	@skb: buffer to process
  36.995 ++ *
  36.996 ++ *	If there is no free memory -ENOMEM is returned, otherwise zero
  36.997 ++ *	is returned and the old skb data released.
  36.998 ++ */
  36.999 ++static inline int skb_linearize_cow(struct sk_buff *skb)
 36.1000 + {
 36.1001 +-	return __skb_linearize(skb, gfp);
 36.1002 ++	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
 36.1003 ++	       __skb_linearize(skb) : 0;
 36.1004 + }
 36.1005 + 
 36.1006 + /**
 36.1007 +@@ -1254,6 +1279,7 @@ extern void	       skb_split(struct sk_b
 36.1008 + 				 struct sk_buff *skb1, const u32 len);
 36.1009 + 
 36.1010 + extern void	       skb_release_data(struct sk_buff *skb);
 36.1011 ++extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
 36.1012 + 
 36.1013 + static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
 36.1014 + 				       int len, void *buffer)
 36.1015 +diff -pruN ../orig-linux-2.6.16.29/include/net/pkt_sched.h ./include/net/pkt_sched.h
 36.1016 +--- ../orig-linux-2.6.16.29/include/net/pkt_sched.h	2006-09-12 19:02:10.000000000 +0100
 36.1017 ++++ ./include/net/pkt_sched.h	2006-09-19 13:59:20.000000000 +0100
 36.1018 +@@ -218,12 +218,13 @@ extern struct qdisc_rate_table *qdisc_ge
 36.1019 + 		struct rtattr *tab);
 36.1020 + extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
 36.1021 + 
 36.1022 +-extern int qdisc_restart(struct net_device *dev);
 36.1023 ++extern void __qdisc_run(struct net_device *dev);
 36.1024 + 
 36.1025 + static inline void qdisc_run(struct net_device *dev)
 36.1026 + {
 36.1027 +-	while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
 36.1028 +-		/* NOTHING */;
 36.1029 ++	if (!netif_queue_stopped(dev) &&
 36.1030 ++	    !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
 36.1031 ++		__qdisc_run(dev);
 36.1032 + }
 36.1033 + 
 36.1034 + extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
 36.1035 +diff -pruN ../orig-linux-2.6.16.29/include/net/protocol.h ./include/net/protocol.h
 36.1036 +--- ../orig-linux-2.6.16.29/include/net/protocol.h	2006-09-12 19:02:10.000000000 +0100
 36.1037 ++++ ./include/net/protocol.h	2006-09-19 13:59:20.000000000 +0100
 36.1038 +@@ -37,6 +37,8 @@
 36.1039 + struct net_protocol {
 36.1040 + 	int			(*handler)(struct sk_buff *skb);
 36.1041 + 	void			(*err_handler)(struct sk_buff *skb, u32 info);
 36.1042 ++	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
 36.1043 ++					       int features);
 36.1044 + 	int			no_policy;
 36.1045 + };
 36.1046 + 
 36.1047 +diff -pruN ../orig-linux-2.6.16.29/include/net/sock.h ./include/net/sock.h
 36.1048 +--- ../orig-linux-2.6.16.29/include/net/sock.h	2006-09-12 19:02:10.000000000 +0100
 36.1049 ++++ ./include/net/sock.h	2006-09-19 13:59:20.000000000 +0100
 36.1050 +@@ -1064,9 +1064,13 @@ static inline void sk_setup_caps(struct 
 36.1051 + {
 36.1052 + 	__sk_dst_set(sk, dst);
 36.1053 + 	sk->sk_route_caps = dst->dev->features;
 36.1054 ++	if (sk->sk_route_caps & NETIF_F_GSO)
 36.1055 ++		sk->sk_route_caps |= NETIF_F_TSO;
 36.1056 + 	if (sk->sk_route_caps & NETIF_F_TSO) {
 36.1057 + 		if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
 36.1058 + 			sk->sk_route_caps &= ~NETIF_F_TSO;
 36.1059 ++		else 
 36.1060 ++			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
 36.1061 + 	}
 36.1062 + }
 36.1063 + 
 36.1064 +diff -pruN ../orig-linux-2.6.16.29/include/net/tcp.h ./include/net/tcp.h
 36.1065 +--- ../orig-linux-2.6.16.29/include/net/tcp.h	2006-09-12 19:02:10.000000000 +0100
 36.1066 ++++ ./include/net/tcp.h	2006-09-19 13:59:20.000000000 +0100
 36.1067 +@@ -552,13 +552,13 @@ struct tcp_skb_cb {
 36.1068 +  */
 36.1069 + static inline int tcp_skb_pcount(const struct sk_buff *skb)
 36.1070 + {
 36.1071 +-	return skb_shinfo(skb)->tso_segs;
 36.1072 ++	return skb_shinfo(skb)->gso_segs;
 36.1073 + }
 36.1074 + 
 36.1075 + /* This is valid iff tcp_skb_pcount() > 1. */
 36.1076 + static inline int tcp_skb_mss(const struct sk_buff *skb)
 36.1077 + {
 36.1078 +-	return skb_shinfo(skb)->tso_size;
 36.1079 ++	return skb_shinfo(skb)->gso_size;
 36.1080 + }
 36.1081 + 
 36.1082 + static inline void tcp_dec_pcount_approx(__u32 *count,
 36.1083 +@@ -1063,6 +1063,8 @@ extern struct request_sock_ops tcp_reque
 36.1084 + 
 36.1085 + extern int tcp_v4_destroy_sock(struct sock *sk);
 36.1086 + 
 36.1087 ++extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
 36.1088 ++
 36.1089 + #ifdef CONFIG_PROC_FS
 36.1090 + extern int  tcp4_proc_init(void);
 36.1091 + extern void tcp4_proc_exit(void);
 36.1092 +diff -pruN ../orig-linux-2.6.16.29/net/atm/clip.c ./net/atm/clip.c
 36.1093 +--- ../orig-linux-2.6.16.29/net/atm/clip.c	2006-09-12 19:02:10.000000000 +0100
 36.1094 ++++ ./net/atm/clip.c	2006-09-19 13:59:20.000000000 +0100
 36.1095 +@@ -101,7 +101,7 @@ static void unlink_clip_vcc(struct clip_
 36.1096 + 		printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc);
 36.1097 + 		return;
 36.1098 + 	}
 36.1099 +-	spin_lock_bh(&entry->neigh->dev->xmit_lock);	/* block clip_start_xmit() */
 36.1100 ++	netif_tx_lock_bh(entry->neigh->dev);	/* block clip_start_xmit() */
 36.1101 + 	entry->neigh->used = jiffies;
 36.1102 + 	for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
 36.1103 + 		if (*walk == clip_vcc) {
 36.1104 +@@ -125,7 +125,7 @@ static void unlink_clip_vcc(struct clip_
 36.1105 + 	printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
 36.1106 + 	  "0x%p)\n",entry,clip_vcc);
 36.1107 + out:
 36.1108 +-	spin_unlock_bh(&entry->neigh->dev->xmit_lock);
 36.1109 ++	netif_tx_unlock_bh(entry->neigh->dev);
 36.1110 + }
 36.1111 + 
 36.1112 + /* The neighbour entry n->lock is held. */
 36.1113 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_device.c ./net/bridge/br_device.c
 36.1114 +--- ../orig-linux-2.6.16.29/net/bridge/br_device.c	2006-09-12 19:02:10.000000000 +0100
 36.1115 ++++ ./net/bridge/br_device.c	2006-09-19 13:59:20.000000000 +0100
 36.1116 +@@ -146,9 +146,9 @@ static int br_set_tx_csum(struct net_dev
 36.1117 + 	struct net_bridge *br = netdev_priv(dev);
 36.1118 + 
 36.1119 + 	if (data)
 36.1120 +-		br->feature_mask |= NETIF_F_IP_CSUM;
 36.1121 ++		br->feature_mask |= NETIF_F_NO_CSUM;
 36.1122 + 	else
 36.1123 +-		br->feature_mask &= ~NETIF_F_IP_CSUM;
 36.1124 ++		br->feature_mask &= ~NETIF_F_ALL_CSUM;
 36.1125 + 
 36.1126 + 	br_features_recompute(br);
 36.1127 + 	return 0;
 36.1128 +@@ -185,6 +185,6 @@ void br_dev_setup(struct net_device *dev
 36.1129 + 	dev->set_mac_address = br_set_mac_address;
 36.1130 + 	dev->priv_flags = IFF_EBRIDGE;
 36.1131 + 
 36.1132 +- 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
 36.1133 +- 		| NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM;
 36.1134 ++ 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
 36.1135 ++ 			NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
 36.1136 + }
 36.1137 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_forward.c ./net/bridge/br_forward.c
 36.1138 +--- ../orig-linux-2.6.16.29/net/bridge/br_forward.c	2006-09-12 19:02:10.000000000 +0100
 36.1139 ++++ ./net/bridge/br_forward.c	2006-09-19 13:59:20.000000000 +0100
 36.1140 +@@ -32,7 +32,7 @@ static inline int should_deliver(const s
 36.1141 + int br_dev_queue_push_xmit(struct sk_buff *skb)
 36.1142 + {
 36.1143 + 	/* drop mtu oversized packets except tso */
 36.1144 +-	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
 36.1145 ++	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
 36.1146 + 		kfree_skb(skb);
 36.1147 + 	else {
 36.1148 + #ifdef CONFIG_BRIDGE_NETFILTER
 36.1149 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_if.c ./net/bridge/br_if.c
 36.1150 +--- ../orig-linux-2.6.16.29/net/bridge/br_if.c	2006-09-12 19:02:10.000000000 +0100
 36.1151 ++++ ./net/bridge/br_if.c	2006-09-19 13:59:20.000000000 +0100
 36.1152 +@@ -385,17 +385,28 @@ void br_features_recompute(struct net_br
 36.1153 + 	struct net_bridge_port *p;
 36.1154 + 	unsigned long features, checksum;
 36.1155 + 
 36.1156 +-	features = br->feature_mask &~ NETIF_F_IP_CSUM;
 36.1157 +-	checksum = br->feature_mask & NETIF_F_IP_CSUM;
 36.1158 ++	checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
 36.1159 ++	features = br->feature_mask & ~NETIF_F_ALL_CSUM;
 36.1160 + 
 36.1161 + 	list_for_each_entry(p, &br->port_list, list) {
 36.1162 +-		if (!(p->dev->features 
 36.1163 +-		      & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)))
 36.1164 ++		unsigned long feature = p->dev->features;
 36.1165 ++
 36.1166 ++		if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
 36.1167 ++			checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
 36.1168 ++		if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
 36.1169 ++			checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
 36.1170 ++		if (!(feature & NETIF_F_IP_CSUM))
 36.1171 + 			checksum = 0;
 36.1172 +-		features &= p->dev->features;
 36.1173 ++
 36.1174 ++		if (feature & NETIF_F_GSO)
 36.1175 ++			feature |= NETIF_F_TSO;
 36.1176 ++		feature |= NETIF_F_GSO;
 36.1177 ++
 36.1178 ++		features &= feature;
 36.1179 + 	}
 36.1180 + 
 36.1181 +-	br->dev->features = features | checksum | NETIF_F_LLTX;
 36.1182 ++	br->dev->features = features | checksum | NETIF_F_LLTX |
 36.1183 ++			    NETIF_F_GSO_ROBUST;
 36.1184 + }
 36.1185 + 
 36.1186 + /* called with RTNL */
 36.1187 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c ./net/bridge/br_netfilter.c
 36.1188 +--- ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c	2006-09-12 19:02:10.000000000 +0100
 36.1189 ++++ ./net/bridge/br_netfilter.c	2006-09-19 13:59:20.000000000 +0100
 36.1190 +@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
 36.1191 + {
 36.1192 + 	if (skb->protocol == htons(ETH_P_IP) &&
 36.1193 + 	    skb->len > skb->dev->mtu &&
 36.1194 +-	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
 36.1195 ++	    !skb_shinfo(skb)->gso_size)
 36.1196 + 		return ip_fragment(skb, br_dev_queue_push_xmit);
 36.1197 + 	else
 36.1198 + 		return br_dev_queue_push_xmit(skb);
 36.1199 +diff -pruN ../orig-linux-2.6.16.29/net/core/dev.c ./net/core/dev.c
 36.1200 +--- ../orig-linux-2.6.16.29/net/core/dev.c	2006-09-12 19:02:10.000000000 +0100
 36.1201 ++++ ./net/core/dev.c	2006-09-19 13:59:20.000000000 +0100
 36.1202 +@@ -115,6 +115,7 @@
 36.1203 + #include <net/iw_handler.h>
 36.1204 + #endif	/* CONFIG_NET_RADIO */
 36.1205 + #include <asm/current.h>
 36.1206 ++#include <linux/err.h>
 36.1207 + 
 36.1208 + /*
 36.1209 +  *	The list of packet types we will receive (as opposed to discard)
 36.1210 +@@ -1032,7 +1033,7 @@ static inline void net_timestamp(struct 
 36.1211 +  *	taps currently in use.
 36.1212 +  */
 36.1213 + 
 36.1214 +-void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 36.1215 ++static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 36.1216 + {
 36.1217 + 	struct packet_type *ptype;
 36.1218 + 
 36.1219 +@@ -1106,6 +1107,45 @@ out:	
 36.1220 + 	return ret;
 36.1221 + }
 36.1222 + 
 36.1223 ++/**
 36.1224 ++ *	skb_gso_segment - Perform segmentation on skb.
 36.1225 ++ *	@skb: buffer to segment
 36.1226 ++ *	@features: features for the output path (see dev->features)
 36.1227 ++ *
 36.1228 ++ *	This function segments the given skb and returns a list of segments.
 36.1229 ++ *
 36.1230 ++ *	It may return NULL if the skb requires no segmentation.  This is
 36.1231 ++ *	only possible when GSO is used for verifying header integrity.
 36.1232 ++ */
 36.1233 ++struct sk_buff *skb_gso_segm