direct-io.hg

changeset 11536:041be3f6b38e

[linux patches] Update patches for linux-2.6.16.29.

Signed-off-by: Christian Limpach <Christian.Limpach@xensource.com>
author Christian Limpach <Christian.Limpach@xensource.com>
date Tue Sep 19 14:26:47 2006 +0100 (2006-09-19)
parents c5d4d47bbeb8
children f07cf18343f1
files patches/linux-2.6.16.13/blktap-aio-16_03_06.patch patches/linux-2.6.16.13/device_bind.patch patches/linux-2.6.16.13/fix-hz-suspend.patch patches/linux-2.6.16.13/fix-ide-cd-pio-mode.patch patches/linux-2.6.16.13/i386-mach-io-check-nmi.patch patches/linux-2.6.16.13/ipv6-no-autoconf.patch patches/linux-2.6.16.13/net-csum.patch patches/linux-2.6.16.13/net-gso-0-base.patch patches/linux-2.6.16.13/net-gso-1-check-dodgy.patch patches/linux-2.6.16.13/net-gso-2-checksum-fix.patch patches/linux-2.6.16.13/net-gso-3-fix-errorcheck.patch patches/linux-2.6.16.13/net-gso-4-kill-warnon.patch patches/linux-2.6.16.13/pmd-shared.patch patches/linux-2.6.16.13/rcu_needs_cpu.patch patches/linux-2.6.16.13/rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch patches/linux-2.6.16.13/series patches/linux-2.6.16.13/smp-alts.patch patches/linux-2.6.16.13/tpm_plugin_2.6.17.patch patches/linux-2.6.16.13/x86-elfnote-as-preprocessor-macro.patch patches/linux-2.6.16.13/x86-increase-interrupt-vector-range.patch patches/linux-2.6.16.13/x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.13/x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.13/xen-hotplug.patch patches/linux-2.6.16.13/xenoprof-generic.patch patches/linux-2.6.16.29/blktap-aio-16_03_06.patch patches/linux-2.6.16.29/device_bind.patch patches/linux-2.6.16.29/fix-hz-suspend.patch patches/linux-2.6.16.29/fix-ide-cd-pio-mode.patch patches/linux-2.6.16.29/i386-mach-io-check-nmi.patch patches/linux-2.6.16.29/ipv6-no-autoconf.patch patches/linux-2.6.16.29/net-csum.patch patches/linux-2.6.16.29/net-gso-0-base.patch patches/linux-2.6.16.29/net-gso-1-check-dodgy.patch patches/linux-2.6.16.29/net-gso-2-checksum-fix.patch patches/linux-2.6.16.29/net-gso-3-fix-errorcheck.patch patches/linux-2.6.16.29/net-gso-4-kill-warnon.patch patches/linux-2.6.16.29/pmd-shared.patch patches/linux-2.6.16.29/rcu_needs_cpu.patch patches/linux-2.6.16.29/rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch patches/linux-2.6.16.29/series patches/linux-2.6.16.29/smp-alts.patch patches/linux-2.6.16.29/tpm_plugin_2.6.17.patch patches/linux-2.6.16.29/x86-elfnote-as-preprocessor-macro.patch patches/linux-2.6.16.29/x86-increase-interrupt-vector-range.patch patches/linux-2.6.16.29/x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.29/x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.29/xen-hotplug.patch patches/linux-2.6.16.29/xenoprof-generic.patch
line diff
     1.1 --- a/patches/linux-2.6.16.13/blktap-aio-16_03_06.patch	Tue Sep 19 14:26:22 2006 +0100
     1.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.3 @@ -1,297 +0,0 @@
     1.4 -diff -pruN ../pristine-linux-2.6.16-rc5/fs/aio.c ./fs/aio.c
     1.5 ---- ../pristine-linux-2.6.16-rc5/fs/aio.c	2006-03-14 14:10:10.827401387 +0000
     1.6 -+++ ./fs/aio.c	2006-03-16 09:57:53.898316582 +0000
     1.7 -@@ -34,6 +34,11 @@
     1.8 - #include <asm/uaccess.h>
     1.9 - #include <asm/mmu_context.h>
    1.10 - 
    1.11 -+#ifdef CONFIG_EPOLL
    1.12 -+#include <linux/poll.h>
    1.13 -+#include <linux/eventpoll.h>
    1.14 -+#endif
    1.15 -+
    1.16 - #if DEBUG > 1
    1.17 - #define dprintk		printk
    1.18 - #else
    1.19 -@@ -1016,6 +1021,10 @@ put_rq:
    1.20 - 	if (waitqueue_active(&ctx->wait))
    1.21 - 		wake_up(&ctx->wait);
    1.22 - 
    1.23 -+#ifdef CONFIG_EPOLL
    1.24 -+	if (ctx->file && waitqueue_active(&ctx->poll_wait))
    1.25 -+		wake_up(&ctx->poll_wait);
    1.26 -+#endif
    1.27 - 	if (ret)
    1.28 - 		put_ioctx(ctx);
    1.29 - 
    1.30 -@@ -1025,6 +1034,8 @@ put_rq:
    1.31 - /* aio_read_evt
    1.32 -  *	Pull an event off of the ioctx's event ring.  Returns the number of 
    1.33 -  *	events fetched (0 or 1 ;-)
    1.34 -+ *	If ent parameter is 0, just returns the number of events that would
    1.35 -+ *	be fetched.
    1.36 -  *	FIXME: make this use cmpxchg.
    1.37 -  *	TODO: make the ringbuffer user mmap()able (requires FIXME).
    1.38 -  */
    1.39 -@@ -1047,13 +1058,18 @@ static int aio_read_evt(struct kioctx *i
    1.40 - 
    1.41 - 	head = ring->head % info->nr;
    1.42 - 	if (head != ring->tail) {
    1.43 --		struct io_event *evp = aio_ring_event(info, head, KM_USER1);
    1.44 --		*ent = *evp;
    1.45 --		head = (head + 1) % info->nr;
    1.46 --		smp_mb(); /* finish reading the event before updatng the head */
    1.47 --		ring->head = head;
    1.48 --		ret = 1;
    1.49 --		put_aio_ring_event(evp, KM_USER1);
    1.50 -+		if (ent) { /* event requested */
    1.51 -+			struct io_event *evp =
    1.52 -+				aio_ring_event(info, head, KM_USER1);
    1.53 -+			*ent = *evp;
    1.54 -+			head = (head + 1) % info->nr;
    1.55 -+			/* finish reading the event before updatng the head */
    1.56 -+			smp_mb();
    1.57 -+			ring->head = head;
    1.58 -+			ret = 1;
    1.59 -+			put_aio_ring_event(evp, KM_USER1);
    1.60 -+		} else /* only need to know availability */
    1.61 -+			ret = 1;
    1.62 - 	}
    1.63 - 	spin_unlock(&info->ring_lock);
    1.64 - 
    1.65 -@@ -1236,9 +1252,78 @@ static void io_destroy(struct kioctx *io
    1.66 - 
    1.67 - 	aio_cancel_all(ioctx);
    1.68 - 	wait_for_all_aios(ioctx);
    1.69 -+#ifdef CONFIG_EPOLL
    1.70 -+	/* forget the poll file, but it's up to the user to close it */
    1.71 -+	if (ioctx->file) {
    1.72 -+		ioctx->file->private_data = 0;
    1.73 -+		ioctx->file = 0;
    1.74 -+	}
    1.75 -+#endif
    1.76 - 	put_ioctx(ioctx);	/* once for the lookup */
    1.77 - }
    1.78 - 
    1.79 -+#ifdef CONFIG_EPOLL
    1.80 -+
    1.81 -+static int aio_queue_fd_close(struct inode *inode, struct file *file)
    1.82 -+{
    1.83 -+	struct kioctx *ioctx = file->private_data;
    1.84 -+	if (ioctx) {
    1.85 -+		file->private_data = 0;
    1.86 -+		spin_lock_irq(&ioctx->ctx_lock);
    1.87 -+		ioctx->file = 0;
    1.88 -+		spin_unlock_irq(&ioctx->ctx_lock);
    1.89 -+	}
    1.90 -+	return 0;
    1.91 -+}
    1.92 -+
    1.93 -+static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
    1.94 -+{	unsigned int pollflags = 0;
    1.95 -+	struct kioctx *ioctx = file->private_data;
    1.96 -+
    1.97 -+	if (ioctx) {
    1.98 -+
    1.99 -+		spin_lock_irq(&ioctx->ctx_lock);
   1.100 -+		/* Insert inside our poll wait queue */
   1.101 -+		poll_wait(file, &ioctx->poll_wait, wait);
   1.102 -+
   1.103 -+		/* Check our condition */
   1.104 -+		if (aio_read_evt(ioctx, 0))
   1.105 -+			pollflags = POLLIN | POLLRDNORM;
   1.106 -+		spin_unlock_irq(&ioctx->ctx_lock);
   1.107 -+	}
   1.108 -+
   1.109 -+	return pollflags;
   1.110 -+}
   1.111 -+
   1.112 -+static struct file_operations aioq_fops = {
   1.113 -+	.release	= aio_queue_fd_close,
   1.114 -+	.poll		= aio_queue_fd_poll
   1.115 -+};
   1.116 -+
   1.117 -+/* make_aio_fd:
   1.118 -+ *  Create a file descriptor that can be used to poll the event queue.
   1.119 -+ *  Based and piggybacked on the excellent epoll code.
   1.120 -+ */
   1.121 -+
   1.122 -+static int make_aio_fd(struct kioctx *ioctx)
   1.123 -+{
   1.124 -+	int error, fd;
   1.125 -+	struct inode *inode;
   1.126 -+	struct file *file;
   1.127 -+
   1.128 -+	error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
   1.129 -+	if (error)
   1.130 -+		return error;
   1.131 -+
   1.132 -+	/* associate the file with the IO context */
   1.133 -+	file->private_data = ioctx;
   1.134 -+	ioctx->file = file;
   1.135 -+	init_waitqueue_head(&ioctx->poll_wait);
   1.136 -+	return fd;
   1.137 -+}
   1.138 -+#endif
   1.139 -+
   1.140 -+
   1.141 - /* sys_io_setup:
   1.142 -  *	Create an aio_context capable of receiving at least nr_events.
   1.143 -  *	ctxp must not point to an aio_context that already exists, and
   1.144 -@@ -1251,18 +1336,30 @@ static void io_destroy(struct kioctx *io
   1.145 -  *	resources are available.  May fail with -EFAULT if an invalid
   1.146 -  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
   1.147 -  *	implemented.
   1.148 -+ *
   1.149 -+ *	To request a selectable fd, the user context has to be initialized
   1.150 -+ *	to 1, instead of 0, and the return value is the fd.
   1.151 -+ *	This keeps the system call compatible, since a non-zero value
   1.152 -+ *	was not allowed so far.
   1.153 -  */
   1.154 - asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
   1.155 - {
   1.156 - 	struct kioctx *ioctx = NULL;
   1.157 - 	unsigned long ctx;
   1.158 - 	long ret;
   1.159 -+	int make_fd = 0;
   1.160 - 
   1.161 - 	ret = get_user(ctx, ctxp);
   1.162 - 	if (unlikely(ret))
   1.163 - 		goto out;
   1.164 - 
   1.165 - 	ret = -EINVAL;
   1.166 -+#ifdef CONFIG_EPOLL
   1.167 -+	if (ctx == 1) {
   1.168 -+		make_fd = 1;
   1.169 -+		ctx = 0;
   1.170 -+	}
   1.171 -+#endif
   1.172 - 	if (unlikely(ctx || nr_events == 0)) {
   1.173 - 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
   1.174 - 		         ctx, nr_events);
   1.175 -@@ -1273,8 +1370,12 @@ asmlinkage long sys_io_setup(unsigned nr
   1.176 - 	ret = PTR_ERR(ioctx);
   1.177 - 	if (!IS_ERR(ioctx)) {
   1.178 - 		ret = put_user(ioctx->user_id, ctxp);
   1.179 --		if (!ret)
   1.180 --			return 0;
   1.181 -+#ifdef CONFIG_EPOLL
   1.182 -+		if (make_fd && ret >= 0)
   1.183 -+			ret = make_aio_fd(ioctx);
   1.184 -+#endif
   1.185 -+		if (ret >= 0)
   1.186 -+			return ret;
   1.187 - 
   1.188 - 		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
   1.189 - 		io_destroy(ioctx);
   1.190 -
   1.191 -diff -pruN ../pristine-linux-2.6.16-rc5/fs/eventpoll.c ./fs/eventpoll.c
   1.192 ---- ../pristine-linux-2.6.16-rc5/fs/eventpoll.c	2006-01-03 03:21:10.000000000 +0000
   1.193 -+++ ./fs/eventpoll.c	2006-03-16 10:04:35.469956167 +0000
   1.194 -@@ -235,8 +235,6 @@ struct ep_pqueue {
   1.195 - 
   1.196 - static void ep_poll_safewake_init(struct poll_safewake *psw);
   1.197 - static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
   1.198 --static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   1.199 --		    struct eventpoll *ep);
   1.200 - static int ep_alloc(struct eventpoll **pep);
   1.201 - static void ep_free(struct eventpoll *ep);
   1.202 - static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
   1.203 -@@ -266,7 +264,7 @@ static int ep_events_transfer(struct eve
   1.204 - static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
   1.205 - 		   int maxevents, long timeout);
   1.206 - static int eventpollfs_delete_dentry(struct dentry *dentry);
   1.207 --static struct inode *ep_eventpoll_inode(void);
   1.208 -+static struct inode *ep_eventpoll_inode(struct file_operations *fops);
   1.209 - static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
   1.210 - 					      int flags, const char *dev_name,
   1.211 - 					      void *data);
   1.212 -@@ -525,7 +523,7 @@ asmlinkage long sys_epoll_create(int siz
   1.213 - 	 * Creates all the items needed to setup an eventpoll file. That is,
   1.214 - 	 * a file structure, and inode and a free file descriptor.
   1.215 - 	 */
   1.216 --	error = ep_getfd(&fd, &inode, &file, ep);
   1.217 -+	error = ep_getfd(&fd, &inode, &file, ep, &eventpoll_fops);
   1.218 - 	if (error)
   1.219 - 		goto eexit_2;
   1.220 - 
   1.221 -@@ -710,8 +708,8 @@ eexit_1:
   1.222 - /*
   1.223 -  * Creates the file descriptor to be used by the epoll interface.
   1.224 -  */
   1.225 --static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   1.226 --		    struct eventpoll *ep)
   1.227 -+int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   1.228 -+		    struct eventpoll *ep, struct file_operations *fops)
   1.229 - {
   1.230 - 	struct qstr this;
   1.231 - 	char name[32];
   1.232 -@@ -727,7 +725,7 @@ static int ep_getfd(int *efd, struct ino
   1.233 - 		goto eexit_1;
   1.234 - 
   1.235 - 	/* Allocates an inode from the eventpoll file system */
   1.236 --	inode = ep_eventpoll_inode();
   1.237 -+	inode = ep_eventpoll_inode(fops);
   1.238 - 	error = PTR_ERR(inode);
   1.239 - 	if (IS_ERR(inode))
   1.240 - 		goto eexit_2;
   1.241 -@@ -758,7 +756,7 @@ static int ep_getfd(int *efd, struct ino
   1.242 - 
   1.243 - 	file->f_pos = 0;
   1.244 - 	file->f_flags = O_RDONLY;
   1.245 --	file->f_op = &eventpoll_fops;
   1.246 -+	file->f_op = fops;
   1.247 - 	file->f_mode = FMODE_READ;
   1.248 - 	file->f_version = 0;
   1.249 - 	file->private_data = ep;
   1.250 -@@ -1574,7 +1572,7 @@ static int eventpollfs_delete_dentry(str
   1.251 - }
   1.252 - 
   1.253 - 
   1.254 --static struct inode *ep_eventpoll_inode(void)
   1.255 -+static struct inode *ep_eventpoll_inode(struct file_operations *fops)
   1.256 - {
   1.257 - 	int error = -ENOMEM;
   1.258 - 	struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
   1.259 -@@ -1582,7 +1580,7 @@ static struct inode *ep_eventpoll_inode(
   1.260 - 	if (!inode)
   1.261 - 		goto eexit_1;
   1.262 - 
   1.263 --	inode->i_fop = &eventpoll_fops;
   1.264 -+	inode->i_fop = fops;
   1.265 - 
   1.266 - 	/*
   1.267 - 	 * Mark the inode dirty from the very beginning,
   1.268 -
   1.269 -diff -pruN ../pristine-linux-2.6.16-rc5/include/linux/aio.h ./include/linux/aio.h
   1.270 ---- ../pristine-linux-2.6.16-rc5/include/linux/aio.h	2006-03-14 14:10:21.597916731 +0000
   1.271 -+++ ./include/linux/aio.h	2006-03-16 10:05:39.848833028 +0000
   1.272 -@@ -191,6 +191,11 @@ struct kioctx {
   1.273 - 	struct aio_ring_info	ring_info;
   1.274 - 
   1.275 - 	struct work_struct	wq;
   1.276 -+#ifdef CONFIG_EPOLL
   1.277 -+	// poll integration
   1.278 -+	wait_queue_head_t       poll_wait;
   1.279 -+	struct file		*file;
   1.280 -+#endif
   1.281 - };
   1.282 - 
   1.283 - /* prototypes */
   1.284 -
   1.285 -diff -pruN ../pristine-linux-2.6.16-rc5/include/linux/eventpoll.h ./include/linux/eventpoll.h
   1.286 ---- ../pristine-linux-2.6.16-rc5/include/linux/eventpoll.h	2006-01-03 03:21:10.000000000 +0000
   1.287 -+++ ./include/linux/eventpoll.h	2006-03-16 10:08:51.577809317 +0000
   1.288 -@@ -86,6 +86,12 @@ static inline void eventpoll_release(str
   1.289 - }
   1.290 - 
   1.291 - 
   1.292 -+/*
   1.293 -+ * called by aio code to create fd that can poll the  aio event queueQ
   1.294 -+ */
   1.295 -+struct eventpoll;
   1.296 -+int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   1.297 -+             struct eventpoll *ep, struct file_operations *fops);
   1.298 - #else
   1.299 - 
   1.300 - static inline void eventpoll_init_file(struct file *file) {}
     2.1 --- a/patches/linux-2.6.16.13/device_bind.patch	Tue Sep 19 14:26:22 2006 +0100
     2.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.3 @@ -1,15 +0,0 @@
     2.4 -diff -pruN ../pristine-linux-2.6.16.13/drivers/base/bus.c ./drivers/base/bus.c
     2.5 ---- ../pristine-linux-2.6.16.13/drivers/base/bus.c	2006-05-02 22:38:44.000000000 +0100
     2.6 -+++ ./drivers/base/bus.c	2006-05-04 17:41:30.000000000 +0100
     2.7 -@@ -188,6 +188,11 @@ static ssize_t driver_bind(struct device
     2.8 - 		up(&dev->sem);
     2.9 - 		if (dev->parent)
    2.10 - 			up(&dev->parent->sem);
    2.11 -+
    2.12 -+		if (err > 0) 		/* success */
    2.13 -+			err = count;
    2.14 -+		else if (err == 0)	/* driver didn't accept device */
    2.15 -+			err = -ENODEV;
    2.16 - 	}
    2.17 - 	put_device(dev);
    2.18 - 	put_bus(bus);
     3.1 --- a/patches/linux-2.6.16.13/fix-hz-suspend.patch	Tue Sep 19 14:26:22 2006 +0100
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,26 +0,0 @@
     3.4 -diff -pruN ../pristine-linux-2.6.16.13/kernel/timer.c ./kernel/timer.c
     3.5 ---- ../pristine-linux-2.6.16.13/kernel/timer.c	2006-05-02 22:38:44.000000000 +0100
     3.6 -+++ ./kernel/timer.c	2006-06-29 14:34:12.788957720 +0100
     3.7 -@@ -555,6 +555,22 @@ found:
     3.8 - 	}
     3.9 - 	spin_unlock(&base->t_base.lock);
    3.10 - 
    3.11 -+	/*
    3.12 -+	 * It can happen that other CPUs service timer IRQs and increment
    3.13 -+	 * jiffies, but we have not yet got a local timer tick to process
    3.14 -+	 * the timer wheels.  In that case, the expiry time can be before
    3.15 -+	 * jiffies, but since the high-resolution timer here is relative to
    3.16 -+	 * jiffies, the default expression when high-resolution timers are
    3.17 -+	 * not active,
    3.18 -+	 *
    3.19 -+	 *   time_before(MAX_JIFFY_OFFSET + jiffies, expires)
    3.20 -+	 *
    3.21 -+	 * would falsely evaluate to true.  If that is the case, just
    3.22 -+	 * return jiffies so that we can immediately fire the local timer
    3.23 -+	 */
    3.24 -+	if (time_before(expires, jiffies))
    3.25 -+		return jiffies;
    3.26 -+
    3.27 - 	if (time_before(hr_expires, expires))
    3.28 - 		return hr_expires;
    3.29 - 
     4.1 --- a/patches/linux-2.6.16.13/fix-ide-cd-pio-mode.patch	Tue Sep 19 14:26:22 2006 +0100
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,18 +0,0 @@
     4.4 -diff -ru ../pristine-linux-2.6.16.13/drivers/ide/ide-lib.c ./drivers/ide/ide-lib.c
     4.5 ---- ../pristine-linux-2.6.16.13/drivers/ide/ide-lib.c	2006-05-02 22:38:44.000000000 +0100
     4.6 -+++ ./drivers/ide/ide-lib.c	2006-05-24 18:37:05.000000000 +0100
     4.7 -@@ -410,10 +410,10 @@
     4.8 - {
     4.9 - 	u64 addr = BLK_BOUNCE_HIGH;	/* dma64_addr_t */
    4.10 - 
    4.11 --	if (!PCI_DMA_BUS_IS_PHYS) {
    4.12 --		addr = BLK_BOUNCE_ANY;
    4.13 --	} else if (on && drive->media == ide_disk) {
    4.14 --		if (HWIF(drive)->pci_dev)
    4.15 -+	if (on && drive->media == ide_disk) {
    4.16 -+		if (!PCI_DMA_BUS_IS_PHYS)
    4.17 -+			addr = BLK_BOUNCE_ANY;
    4.18 -+		else if (HWIF(drive)->pci_dev)
    4.19 - 			addr = HWIF(drive)->pci_dev->dma_mask;
    4.20 - 	}
    4.21 - 
     5.1 --- a/patches/linux-2.6.16.13/i386-mach-io-check-nmi.patch	Tue Sep 19 14:26:22 2006 +0100
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,45 +0,0 @@
     5.4 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/kernel/traps.c ./arch/i386/kernel/traps.c
     5.5 ---- ../pristine-linux-2.6.16.13/arch/i386/kernel/traps.c	2006-05-02 22:38:44.000000000 +0100
     5.6 -+++ ./arch/i386/kernel/traps.c	2006-05-04 17:41:34.000000000 +0100
     5.7 -@@ -567,18 +567,11 @@ static void mem_parity_error(unsigned ch
     5.8 - 
     5.9 - static void io_check_error(unsigned char reason, struct pt_regs * regs)
    5.10 - {
    5.11 --	unsigned long i;
    5.12 --
    5.13 - 	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
    5.14 - 	show_registers(regs);
    5.15 - 
    5.16 - 	/* Re-enable the IOCK line, wait for a few seconds */
    5.17 --	reason = (reason & 0xf) | 8;
    5.18 --	outb(reason, 0x61);
    5.19 --	i = 2000;
    5.20 --	while (--i) udelay(1000);
    5.21 --	reason &= ~8;
    5.22 --	outb(reason, 0x61);
    5.23 -+	clear_io_check_error(reason);
    5.24 - }
    5.25 - 
    5.26 - static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
    5.27 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-i386/mach-default/mach_traps.h ./include/asm-i386/mach-default/mach_traps.h
    5.28 ---- ../pristine-linux-2.6.16.13/include/asm-i386/mach-default/mach_traps.h	2006-05-02 22:38:44.000000000 +0100
    5.29 -+++ ./include/asm-i386/mach-default/mach_traps.h	2006-05-04 17:41:34.000000000 +0100
    5.30 -@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
    5.31 - 	outb(reason, 0x61);
    5.32 - }
    5.33 - 
    5.34 -+static inline void clear_io_check_error(unsigned char reason)
    5.35 -+{
    5.36 -+	unsigned long i;
    5.37 -+
    5.38 -+	reason = (reason & 0xf) | 8;
    5.39 -+	outb(reason, 0x61);
    5.40 -+	i = 2000;
    5.41 -+	while (--i) udelay(1000);
    5.42 -+	reason &= ~8;
    5.43 -+	outb(reason, 0x61);
    5.44 -+}
    5.45 -+
    5.46 - static inline unsigned char get_nmi_reason(void)
    5.47 - {
    5.48 - 	return inb(0x61);
     6.1 --- a/patches/linux-2.6.16.13/ipv6-no-autoconf.patch	Tue Sep 19 14:26:22 2006 +0100
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,23 +0,0 @@
     6.4 - net/ipv6/addrconf.c |    2 ++
     6.5 - 1 files changed, 2 insertions(+)
     6.6 -
     6.7 -Index: build/net/ipv6/addrconf.c
     6.8 -===================================================================
     6.9 ---- build.orig/net/ipv6/addrconf.c
    6.10 -+++ build/net/ipv6/addrconf.c
    6.11 -@@ -2462,6 +2462,7 @@ static void addrconf_dad_start(struct in
    6.12 - 	spin_lock_bh(&ifp->lock);
    6.13 - 
    6.14 - 	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
    6.15 -+	    !(dev->flags&IFF_MULTICAST) ||
    6.16 - 	    !(ifp->flags&IFA_F_TENTATIVE)) {
    6.17 - 		ifp->flags &= ~IFA_F_TENTATIVE;
    6.18 - 		spin_unlock_bh(&ifp->lock);
    6.19 -@@ -2546,6 +2547,7 @@ static void addrconf_dad_completed(struc
    6.20 - 	if (ifp->idev->cnf.forwarding == 0 &&
    6.21 - 	    ifp->idev->cnf.rtr_solicits > 0 &&
    6.22 - 	    (dev->flags&IFF_LOOPBACK) == 0 &&
    6.23 -+	    (dev->flags & IFF_MULTICAST) &&
    6.24 - 	    (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
    6.25 - 		struct in6_addr all_routers;
    6.26 - 
     7.1 --- a/patches/linux-2.6.16.13/net-csum.patch	Tue Sep 19 14:26:22 2006 +0100
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,63 +0,0 @@
     7.4 -diff -pruN ../pristine-linux-2.6.16.13/net/ipv4/netfilter/ip_nat_proto_tcp.c ./net/ipv4/netfilter/ip_nat_proto_tcp.c
     7.5 ---- ../pristine-linux-2.6.16.13/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-05-02 22:38:44.000000000 +0100
     7.6 -+++ ./net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-05-16 13:28:19.000000000 +0100
     7.7 -@@ -129,7 +129,12 @@ tcp_manip_pkt(struct sk_buff **pskb,
     7.8 - 	if (hdrsize < sizeof(*hdr))
     7.9 - 		return 1;
    7.10 - 
    7.11 --	hdr->check = ip_nat_cheat_check(~oldip, newip,
    7.12 -+#ifdef CONFIG_XEN
    7.13 -+	if ((*pskb)->proto_csum_blank)
    7.14 -+		hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
    7.15 -+	else
    7.16 -+#endif
    7.17 -+		hdr->check = ip_nat_cheat_check(~oldip, newip,
    7.18 - 					ip_nat_cheat_check(oldport ^ 0xFFFF,
    7.19 - 							   newport,
    7.20 - 							   hdr->check));
    7.21 -diff -pruN ../pristine-linux-2.6.16.13/net/ipv4/netfilter/ip_nat_proto_udp.c ./net/ipv4/netfilter/ip_nat_proto_udp.c
    7.22 ---- ../pristine-linux-2.6.16.13/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-05-02 22:38:44.000000000 +0100
    7.23 -+++ ./net/ipv4/netfilter/ip_nat_proto_udp.c	2006-05-16 13:30:14.000000000 +0100
    7.24 -@@ -113,11 +113,17 @@ udp_manip_pkt(struct sk_buff **pskb,
    7.25 - 		newport = tuple->dst.u.udp.port;
    7.26 - 		portptr = &hdr->dest;
    7.27 - 	}
    7.28 --	if (hdr->check) /* 0 is a special case meaning no checksum */
    7.29 --		hdr->check = ip_nat_cheat_check(~oldip, newip,
    7.30 -+	if (hdr->check) { /* 0 is a special case meaning no checksum */
    7.31 -+#ifdef CONFIG_XEN
    7.32 -+		if ((*pskb)->proto_csum_blank)
    7.33 -+			hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
    7.34 -+		else
    7.35 -+#endif
    7.36 -+			hdr->check = ip_nat_cheat_check(~oldip, newip,
    7.37 - 					ip_nat_cheat_check(*portptr ^ 0xFFFF,
    7.38 - 							   newport,
    7.39 - 							   hdr->check));
    7.40 -+	}
    7.41 - 	*portptr = newport;
    7.42 - 	return 1;
    7.43 - }
    7.44 -diff -pruN ../pristine-linux-2.6.16.13/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
    7.45 ---- ../pristine-linux-2.6.16.13/net/ipv4/xfrm4_output.c	2006-05-02 22:38:44.000000000 +0100
    7.46 -+++ ./net/ipv4/xfrm4_output.c	2006-05-04 17:41:37.000000000 +0100
    7.47 -@@ -17,6 +17,8 @@
    7.48 - #include <net/xfrm.h>
    7.49 - #include <net/icmp.h>
    7.50 - 
    7.51 -+extern int skb_checksum_setup(struct sk_buff *skb);
    7.52 -+
    7.53 - /* Add encapsulation header.
    7.54 -  *
    7.55 -  * In transport mode, the IP header will be moved forward to make space
    7.56 -@@ -103,6 +105,10 @@ static int xfrm4_output_one(struct sk_bu
    7.57 - 	struct xfrm_state *x = dst->xfrm;
    7.58 - 	int err;
    7.59 - 	
    7.60 -+	err = skb_checksum_setup(skb);
    7.61 -+	if (err)
    7.62 -+		goto error_nolock;
    7.63 -+
    7.64 - 	if (skb->ip_summed == CHECKSUM_HW) {
    7.65 - 		err = skb_checksum_help(skb, 0);
    7.66 - 		if (err)
     8.1 --- a/patches/linux-2.6.16.13/net-gso-0-base.patch	Tue Sep 19 14:26:22 2006 +0100
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,2907 +0,0 @@
     8.4 -diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
     8.5 -index 3c0a5ba..847cedb 100644
     8.6 ---- a/Documentation/networking/netdevices.txt
     8.7 -+++ b/Documentation/networking/netdevices.txt
     8.8 -@@ -42,9 +42,9 @@ dev->get_stats:
     8.9 - 	Context: nominally process, but don't sleep inside an rwlock
    8.10 - 
    8.11 - dev->hard_start_xmit:
    8.12 --	Synchronization: dev->xmit_lock spinlock.
    8.13 -+	Synchronization: netif_tx_lock spinlock.
    8.14 - 	When the driver sets NETIF_F_LLTX in dev->features this will be
    8.15 --	called without holding xmit_lock. In this case the driver 
    8.16 -+	called without holding netif_tx_lock. In this case the driver
    8.17 - 	has to lock by itself when needed. It is recommended to use a try lock
    8.18 - 	for this and return -1 when the spin lock fails. 
    8.19 - 	The locking there should also properly protect against 
    8.20 -@@ -62,12 +62,12 @@ dev->hard_start_xmit:
    8.21 - 	  Only valid when NETIF_F_LLTX is set.
    8.22 - 
    8.23 - dev->tx_timeout:
    8.24 --	Synchronization: dev->xmit_lock spinlock.
    8.25 -+	Synchronization: netif_tx_lock spinlock.
    8.26 - 	Context: BHs disabled
    8.27 - 	Notes: netif_queue_stopped() is guaranteed true
    8.28 - 
    8.29 - dev->set_multicast_list:
    8.30 --	Synchronization: dev->xmit_lock spinlock.
    8.31 -+	Synchronization: netif_tx_lock spinlock.
    8.32 - 	Context: BHs disabled
    8.33 - 
    8.34 - dev->poll:
    8.35 -diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
    8.36 -index 4be9769..2e7cac7 100644
    8.37 ---- a/drivers/block/aoe/aoenet.c
    8.38 -+++ b/drivers/block/aoe/aoenet.c
    8.39 -@@ -95,9 +95,8 @@ mac_addr(char addr[6])
    8.40 - static struct sk_buff *
    8.41 - skb_check(struct sk_buff *skb)
    8.42 - {
    8.43 --	if (skb_is_nonlinear(skb))
    8.44 - 	if ((skb = skb_share_check(skb, GFP_ATOMIC)))
    8.45 --	if (skb_linearize(skb, GFP_ATOMIC) < 0) {
    8.46 -+	if (skb_linearize(skb)) {
    8.47 - 		dev_kfree_skb(skb);
    8.48 - 		return NULL;
    8.49 - 	}
    8.50 -diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
    8.51 -index a2408d7..c90e620 100644
    8.52 ---- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
    8.53 -+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
    8.54 -@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_
    8.55 - 
    8.56 - 	ipoib_mcast_stop_thread(dev, 0);
    8.57 - 
    8.58 --	spin_lock_irqsave(&dev->xmit_lock, flags);
    8.59 -+	local_irq_save(flags);
    8.60 -+	netif_tx_lock(dev);
    8.61 - 	spin_lock(&priv->lock);
    8.62 - 
    8.63 - 	/*
    8.64 -@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_
    8.65 - 	}
    8.66 - 
    8.67 - 	spin_unlock(&priv->lock);
    8.68 --	spin_unlock_irqrestore(&dev->xmit_lock, flags);
    8.69 -+	netif_tx_unlock(dev);
    8.70 -+	local_irq_restore(flags);
    8.71 - 
    8.72 - 	/* We have to cancel outside of the spinlock */
    8.73 - 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
    8.74 -diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
    8.75 -index 6711eb6..8d2351f 100644
    8.76 ---- a/drivers/media/dvb/dvb-core/dvb_net.c
    8.77 -+++ b/drivers/media/dvb/dvb-core/dvb_net.c
    8.78 -@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void 
    8.79 - 
    8.80 - 	dvb_net_feed_stop(dev);
    8.81 - 	priv->rx_mode = RX_MODE_UNI;
    8.82 --	spin_lock_bh(&dev->xmit_lock);
    8.83 -+	netif_tx_lock_bh(dev);
    8.84 - 
    8.85 - 	if (dev->flags & IFF_PROMISC) {
    8.86 - 		dprintk("%s: promiscuous mode\n", dev->name);
    8.87 -@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void 
    8.88 - 		}
    8.89 - 	}
    8.90 - 
    8.91 --	spin_unlock_bh(&dev->xmit_lock);
    8.92 -+	netif_tx_unlock_bh(dev);
    8.93 - 	dvb_net_feed_start(dev);
    8.94 - }
    8.95 - 
    8.96 -diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
    8.97 -index dd41049..6615583 100644
    8.98 ---- a/drivers/net/8139cp.c
    8.99 -+++ b/drivers/net/8139cp.c
   8.100 -@@ -794,7 +794,7 @@ #endif
   8.101 - 	entry = cp->tx_head;
   8.102 - 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
   8.103 - 	if (dev->features & NETIF_F_TSO)
   8.104 --		mss = skb_shinfo(skb)->tso_size;
   8.105 -+		mss = skb_shinfo(skb)->gso_size;
   8.106 - 
   8.107 - 	if (skb_shinfo(skb)->nr_frags == 0) {
   8.108 - 		struct cp_desc *txd = &cp->tx_ring[entry];
   8.109 -diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
   8.110 -index a24200d..b5e39a1 100644
   8.111 ---- a/drivers/net/bnx2.c
   8.112 -+++ b/drivers/net/bnx2.c
   8.113 -@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
   8.114 - 		skb = tx_buf->skb;
   8.115 - #ifdef BCM_TSO 
   8.116 - 		/* partial BD completions possible with TSO packets */
   8.117 --		if (skb_shinfo(skb)->tso_size) {
   8.118 -+		if (skb_shinfo(skb)->gso_size) {
   8.119 - 			u16 last_idx, last_ring_idx;
   8.120 - 
   8.121 - 			last_idx = sw_cons +
   8.122 -@@ -1948,7 +1948,7 @@ bnx2_poll(struct net_device *dev, int *b
   8.123 - 	return 1;
   8.124 - }
   8.125 - 
   8.126 --/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
   8.127 -+/* Called with rtnl_lock from vlan functions and also netif_tx_lock
   8.128 -  * from set_multicast.
   8.129 -  */
   8.130 - static void
   8.131 -@@ -4403,7 +4403,7 @@ bnx2_vlan_rx_kill_vid(struct net_device 
   8.132 - }
   8.133 - #endif
   8.134 - 
   8.135 --/* Called with dev->xmit_lock.
   8.136 -+/* Called with netif_tx_lock.
   8.137 -  * hard_start_xmit is pseudo-lockless - a lock is only required when
   8.138 -  * the tx queue is full. This way, we get the benefit of lockless
   8.139 -  * operations most of the time without the complexities to handle
   8.140 -@@ -4441,7 +4441,7 @@ bnx2_start_xmit(struct sk_buff *skb, str
   8.141 - 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
   8.142 - 	}
   8.143 - #ifdef BCM_TSO 
   8.144 --	if ((mss = skb_shinfo(skb)->tso_size) &&
   8.145 -+	if ((mss = skb_shinfo(skb)->gso_size) &&
   8.146 - 		(skb->len > (bp->dev->mtu + ETH_HLEN))) {
   8.147 - 		u32 tcp_opt_len, ip_tcp_len;
   8.148 - 
   8.149 -diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
   8.150 -index bcf9f17..e970921 100644
   8.151 ---- a/drivers/net/bonding/bond_main.c
   8.152 -+++ b/drivers/net/bonding/bond_main.c
   8.153 -@@ -1145,8 +1145,7 @@ int bond_sethwaddr(struct net_device *bo
   8.154 - }
   8.155 - 
   8.156 - #define BOND_INTERSECT_FEATURES \
   8.157 --	(NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\
   8.158 --	NETIF_F_TSO|NETIF_F_UFO)
   8.159 -+	(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
   8.160 - 
   8.161 - /* 
   8.162 -  * Compute the common dev->feature set available to all slaves.  Some
   8.163 -@@ -1164,9 +1163,7 @@ static int bond_compute_features(struct 
   8.164 - 		features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
   8.165 - 
   8.166 - 	if ((features & NETIF_F_SG) && 
   8.167 --	    !(features & (NETIF_F_IP_CSUM |
   8.168 --			  NETIF_F_NO_CSUM |
   8.169 --			  NETIF_F_HW_CSUM)))
   8.170 -+	    !(features & NETIF_F_ALL_CSUM))
   8.171 - 		features &= ~NETIF_F_SG;
   8.172 - 
   8.173 - 	/* 
   8.174 -@@ -4147,7 +4144,7 @@ static int bond_init(struct net_device *
   8.175 - 	 */
   8.176 - 	bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
   8.177 - 
   8.178 --	/* don't acquire bond device's xmit_lock when 
   8.179 -+	/* don't acquire bond device's netif_tx_lock when
   8.180 - 	 * transmitting */
   8.181 - 	bond_dev->features |= NETIF_F_LLTX;
   8.182 - 
   8.183 -diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
   8.184 -index 30ff8ea..7b7d360 100644
   8.185 ---- a/drivers/net/chelsio/sge.c
   8.186 -+++ b/drivers/net/chelsio/sge.c
   8.187 -@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
   8.188 - 	struct cpl_tx_pkt *cpl;
   8.189 - 
   8.190 - #ifdef NETIF_F_TSO
   8.191 --	if (skb_shinfo(skb)->tso_size) {
   8.192 -+	if (skb_shinfo(skb)->gso_size) {
   8.193 - 		int eth_type;
   8.194 - 		struct cpl_tx_pkt_lso *hdr;
   8.195 - 
   8.196 -@@ -1434,7 +1434,7 @@ #ifdef NETIF_F_TSO
   8.197 - 		hdr->ip_hdr_words = skb->nh.iph->ihl;
   8.198 - 		hdr->tcp_hdr_words = skb->h.th->doff;
   8.199 - 		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
   8.200 --						skb_shinfo(skb)->tso_size));
   8.201 -+						skb_shinfo(skb)->gso_size));
   8.202 - 		hdr->len = htonl(skb->len - sizeof(*hdr));
   8.203 - 		cpl = (struct cpl_tx_pkt *)hdr;
   8.204 - 		sge->stats.tx_lso_pkts++;
   8.205 -diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
   8.206 -index fa29402..681d284 100644
   8.207 ---- a/drivers/net/e1000/e1000_main.c
   8.208 -+++ b/drivers/net/e1000/e1000_main.c
   8.209 -@@ -2526,7 +2526,7 @@ #ifdef NETIF_F_TSO
   8.210 - 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
   8.211 - 	int err;
   8.212 - 
   8.213 --	if (skb_shinfo(skb)->tso_size) {
   8.214 -+	if (skb_shinfo(skb)->gso_size) {
   8.215 - 		if (skb_header_cloned(skb)) {
   8.216 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   8.217 - 			if (err)
   8.218 -@@ -2534,7 +2534,7 @@ #ifdef NETIF_F_TSO
   8.219 - 		}
   8.220 - 
   8.221 - 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
   8.222 --		mss = skb_shinfo(skb)->tso_size;
   8.223 -+		mss = skb_shinfo(skb)->gso_size;
   8.224 - 		if (skb->protocol == ntohs(ETH_P_IP)) {
   8.225 - 			skb->nh.iph->tot_len = 0;
   8.226 - 			skb->nh.iph->check = 0;
   8.227 -@@ -2651,7 +2651,7 @@ #ifdef NETIF_F_TSO
   8.228 - 		 * tso gets written back prematurely before the data is fully
   8.229 - 		 * DMAd to the controller */
   8.230 - 		if (!skb->data_len && tx_ring->last_tx_tso &&
   8.231 --				!skb_shinfo(skb)->tso_size) {
   8.232 -+				!skb_shinfo(skb)->gso_size) {
   8.233 - 			tx_ring->last_tx_tso = 0;
   8.234 - 			size -= 4;
   8.235 - 		}
   8.236 -@@ -2893,7 +2893,7 @@ #endif
   8.237 - 	}
   8.238 - 
   8.239 - #ifdef NETIF_F_TSO
   8.240 --	mss = skb_shinfo(skb)->tso_size;
   8.241 -+	mss = skb_shinfo(skb)->gso_size;
   8.242 - 	/* The controller does a simple calculation to 
   8.243 - 	 * make sure there is enough room in the FIFO before
   8.244 - 	 * initiating the DMA for each buffer.  The calc is:
   8.245 -@@ -2935,7 +2935,7 @@ #endif
   8.246 - #ifdef NETIF_F_TSO
   8.247 - 	/* Controller Erratum workaround */
   8.248 - 	if (!skb->data_len && tx_ring->last_tx_tso &&
   8.249 --		!skb_shinfo(skb)->tso_size)
   8.250 -+		!skb_shinfo(skb)->gso_size)
   8.251 - 		count++;
   8.252 - #endif
   8.253 - 
   8.254 -diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
   8.255 -index 3682ec6..c35f16e 100644
   8.256 ---- a/drivers/net/forcedeth.c
   8.257 -+++ b/drivers/net/forcedeth.c
   8.258 -@@ -482,9 +482,9 @@ #define LPA_1000HALF	0x0400
   8.259 -  * critical parts:
   8.260 -  * - rx is (pseudo-) lockless: it relies on the single-threading provided
   8.261 -  *	by the arch code for interrupts.
   8.262 -- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
   8.263 -+ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
   8.264 -  *	needs dev->priv->lock :-(
   8.265 -- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
   8.266 -+ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
   8.267 -  */
   8.268 - 
   8.269 - /* in dev: base, irq */
   8.270 -@@ -1016,7 +1016,7 @@ static void drain_ring(struct net_device
   8.271 - 
   8.272 - /*
   8.273 -  * nv_start_xmit: dev->hard_start_xmit function
   8.274 -- * Called with dev->xmit_lock held.
   8.275 -+ * Called with netif_tx_lock held.
   8.276 -  */
   8.277 - static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
   8.278 - {
   8.279 -@@ -1105,8 +1105,8 @@ static int nv_start_xmit(struct sk_buff 
   8.280 - 	np->tx_skbuff[nr] = skb;
   8.281 - 
   8.282 - #ifdef NETIF_F_TSO
   8.283 --	if (skb_shinfo(skb)->tso_size)
   8.284 --		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
   8.285 -+	if (skb_shinfo(skb)->gso_size)
   8.286 -+		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
   8.287 - 	else
   8.288 - #endif
   8.289 - 	tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
   8.290 -@@ -1203,7 +1203,7 @@ static void nv_tx_done(struct net_device
   8.291 - 
   8.292 - /*
   8.293 -  * nv_tx_timeout: dev->tx_timeout function
   8.294 -- * Called with dev->xmit_lock held.
   8.295 -+ * Called with netif_tx_lock held.
   8.296 -  */
   8.297 - static void nv_tx_timeout(struct net_device *dev)
   8.298 - {
   8.299 -@@ -1524,7 +1524,7 @@ static int nv_change_mtu(struct net_devi
   8.300 - 		 * Changing the MTU is a rare event, it shouldn't matter.
   8.301 - 		 */
   8.302 - 		disable_irq(dev->irq);
   8.303 --		spin_lock_bh(&dev->xmit_lock);
   8.304 -+		netif_tx_lock_bh(dev);
   8.305 - 		spin_lock(&np->lock);
   8.306 - 		/* stop engines */
   8.307 - 		nv_stop_rx(dev);
   8.308 -@@ -1559,7 +1559,7 @@ static int nv_change_mtu(struct net_devi
   8.309 - 		nv_start_rx(dev);
   8.310 - 		nv_start_tx(dev);
   8.311 - 		spin_unlock(&np->lock);
   8.312 --		spin_unlock_bh(&dev->xmit_lock);
   8.313 -+		netif_tx_unlock_bh(dev);
   8.314 - 		enable_irq(dev->irq);
   8.315 - 	}
   8.316 - 	return 0;
   8.317 -@@ -1594,7 +1594,7 @@ static int nv_set_mac_address(struct net
   8.318 - 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
   8.319 - 
   8.320 - 	if (netif_running(dev)) {
   8.321 --		spin_lock_bh(&dev->xmit_lock);
   8.322 -+		netif_tx_lock_bh(dev);
   8.323 - 		spin_lock_irq(&np->lock);
   8.324 - 
   8.325 - 		/* stop rx engine */
   8.326 -@@ -1606,7 +1606,7 @@ static int nv_set_mac_address(struct net
   8.327 - 		/* restart rx engine */
   8.328 - 		nv_start_rx(dev);
   8.329 - 		spin_unlock_irq(&np->lock);
   8.330 --		spin_unlock_bh(&dev->xmit_lock);
   8.331 -+		netif_tx_unlock_bh(dev);
   8.332 - 	} else {
   8.333 - 		nv_copy_mac_to_hw(dev);
   8.334 - 	}
   8.335 -@@ -1615,7 +1615,7 @@ static int nv_set_mac_address(struct net
   8.336 - 
   8.337 - /*
   8.338 -  * nv_set_multicast: dev->set_multicast function
   8.339 -- * Called with dev->xmit_lock held.
   8.340 -+ * Called with netif_tx_lock held.
   8.341 -  */
   8.342 - static void nv_set_multicast(struct net_device *dev)
   8.343 - {
   8.344 -diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
   8.345 -index 102c1f0..d12605f 100644
   8.346 ---- a/drivers/net/hamradio/6pack.c
   8.347 -+++ b/drivers/net/hamradio/6pack.c
   8.348 -@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net
   8.349 - {
   8.350 - 	struct sockaddr_ax25 *sa = addr;
   8.351 - 
   8.352 --	spin_lock_irq(&dev->xmit_lock);
   8.353 -+	netif_tx_lock_bh(dev);
   8.354 - 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
   8.355 --	spin_unlock_irq(&dev->xmit_lock);
   8.356 -+	netif_tx_unlock_bh(dev);
   8.357 - 
   8.358 - 	return 0;
   8.359 - }
   8.360 -@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_stru
   8.361 - 			break;
   8.362 - 		}
   8.363 - 
   8.364 --		spin_lock_irq(&dev->xmit_lock);
   8.365 -+		netif_tx_lock_bh(dev);
   8.366 - 		memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
   8.367 --		spin_unlock_irq(&dev->xmit_lock);
   8.368 -+		netif_tx_unlock_bh(dev);
   8.369 - 
   8.370 - 		err = 0;
   8.371 - 		break;
   8.372 -diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
   8.373 -index dc5e9d5..5c66f5a 100644
   8.374 ---- a/drivers/net/hamradio/mkiss.c
   8.375 -+++ b/drivers/net/hamradio/mkiss.c
   8.376 -@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net
   8.377 - {
   8.378 - 	struct sockaddr_ax25 *sa = addr;
   8.379 - 
   8.380 --	spin_lock_irq(&dev->xmit_lock);
   8.381 -+	netif_tx_lock_bh(dev);
   8.382 - 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
   8.383 --	spin_unlock_irq(&dev->xmit_lock);
   8.384 -+	netif_tx_unlock_bh(dev);
   8.385 - 
   8.386 - 	return 0;
   8.387 - }
   8.388 -@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct
   8.389 - 			break;
   8.390 - 		}
   8.391 - 
   8.392 --		spin_lock_irq(&dev->xmit_lock);
   8.393 -+		netif_tx_lock_bh(dev);
   8.394 - 		memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
   8.395 --		spin_unlock_irq(&dev->xmit_lock);
   8.396 -+		netif_tx_unlock_bh(dev);
   8.397 - 
   8.398 - 		err = 0;
   8.399 - 		break;
   8.400 -diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
   8.401 -index 31fb2d7..2e222ef 100644
   8.402 ---- a/drivers/net/ifb.c
   8.403 -+++ b/drivers/net/ifb.c
   8.404 -@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev
   8.405 - 	dp->st_task_enter++;
   8.406 - 	if ((skb = skb_peek(&dp->tq)) == NULL) {
   8.407 - 		dp->st_txq_refl_try++;
   8.408 --		if (spin_trylock(&_dev->xmit_lock)) {
   8.409 -+		if (netif_tx_trylock(_dev)) {
   8.410 - 			dp->st_rxq_enter++;
   8.411 - 			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
   8.412 - 				skb_queue_tail(&dp->tq, skb);
   8.413 - 				dp->st_rx2tx_tran++;
   8.414 - 			}
   8.415 --			spin_unlock(&_dev->xmit_lock);
   8.416 -+			netif_tx_unlock(_dev);
   8.417 - 		} else {
   8.418 - 			/* reschedule */
   8.419 - 			dp->st_rxq_notenter++;
   8.420 -@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev
   8.421 - 		}
   8.422 - 	}
   8.423 - 
   8.424 --	if (spin_trylock(&_dev->xmit_lock)) {
   8.425 -+	if (netif_tx_trylock(_dev)) {
   8.426 - 		dp->st_rxq_check++;
   8.427 - 		if ((skb = skb_peek(&dp->rq)) == NULL) {
   8.428 - 			dp->tasklet_pending = 0;
   8.429 -@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev
   8.430 - 				netif_wake_queue(_dev);
   8.431 - 		} else {
   8.432 - 			dp->st_rxq_rsch++;
   8.433 --			spin_unlock(&_dev->xmit_lock);
   8.434 -+			netif_tx_unlock(_dev);
   8.435 - 			goto resched;
   8.436 - 		}
   8.437 --		spin_unlock(&_dev->xmit_lock);
   8.438 -+		netif_tx_unlock(_dev);
   8.439 - 	} else {
   8.440 - resched:
   8.441 - 		dp->tasklet_pending = 1;
   8.442 -diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
   8.443 -index a9f49f0..339d4a7 100644
   8.444 ---- a/drivers/net/irda/vlsi_ir.c
   8.445 -+++ b/drivers/net/irda/vlsi_ir.c
   8.446 -@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct s
   8.447 - 			    ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
   8.448 - 			    	break;
   8.449 - 			udelay(100);
   8.450 --			/* must not sleep here - we are called under xmit_lock! */
   8.451 -+			/* must not sleep here - called under netif_tx_lock! */
   8.452 - 		}
   8.453 - 	}
   8.454 - 
   8.455 -diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
   8.456 -index f9f77e4..bdab369 100644
   8.457 ---- a/drivers/net/ixgb/ixgb_main.c
   8.458 -+++ b/drivers/net/ixgb/ixgb_main.c
   8.459 -@@ -1163,7 +1163,7 @@ #ifdef NETIF_F_TSO
   8.460 - 	uint16_t ipcse, tucse, mss;
   8.461 - 	int err;
   8.462 - 
   8.463 --	if(likely(skb_shinfo(skb)->tso_size)) {
   8.464 -+	if(likely(skb_shinfo(skb)->gso_size)) {
   8.465 - 		if (skb_header_cloned(skb)) {
   8.466 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   8.467 - 			if (err)
   8.468 -@@ -1171,7 +1171,7 @@ #ifdef NETIF_F_TSO
   8.469 - 		}
   8.470 - 
   8.471 - 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
   8.472 --		mss = skb_shinfo(skb)->tso_size;
   8.473 -+		mss = skb_shinfo(skb)->gso_size;
   8.474 - 		skb->nh.iph->tot_len = 0;
   8.475 - 		skb->nh.iph->check = 0;
   8.476 - 		skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
   8.477 -diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
   8.478 -index 690a1aa..9bcaa80 100644
   8.479 ---- a/drivers/net/loopback.c
   8.480 -+++ b/drivers/net/loopback.c
   8.481 -@@ -74,7 +74,7 @@ static void emulate_large_send_offload(s
   8.482 - 	struct iphdr *iph = skb->nh.iph;
   8.483 - 	struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
   8.484 - 	unsigned int doffset = (iph->ihl + th->doff) * 4;
   8.485 --	unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
   8.486 -+	unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
   8.487 - 	unsigned int offset = 0;
   8.488 - 	u32 seq = ntohl(th->seq);
   8.489 - 	u16 id  = ntohs(iph->id);
   8.490 -@@ -139,7 +139,7 @@ #ifndef LOOPBACK_MUST_CHECKSUM
   8.491 - #endif
   8.492 - 
   8.493 - #ifdef LOOPBACK_TSO
   8.494 --	if (skb_shinfo(skb)->tso_size) {
   8.495 -+	if (skb_shinfo(skb)->gso_size) {
   8.496 - 		BUG_ON(skb->protocol != htons(ETH_P_IP));
   8.497 - 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
   8.498 - 
   8.499 -diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
   8.500 -index c0998ef..0fac9d5 100644
   8.501 ---- a/drivers/net/mv643xx_eth.c
   8.502 -+++ b/drivers/net/mv643xx_eth.c
   8.503 -@@ -1107,7 +1107,7 @@ static int mv643xx_eth_start_xmit(struct
   8.504 - 
   8.505 - #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
   8.506 - 	if (has_tiny_unaligned_frags(skb)) {
   8.507 --		if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
   8.508 -+		if (__skb_linearize(skb)) {
   8.509 - 			stats->tx_dropped++;
   8.510 - 			printk(KERN_DEBUG "%s: failed to linearize tiny "
   8.511 - 					"unaligned fragment\n", dev->name);
   8.512 -diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
   8.513 -index 9d6d254..c9ed624 100644
   8.514 ---- a/drivers/net/natsemi.c
   8.515 -+++ b/drivers/net/natsemi.c
   8.516 -@@ -323,12 +323,12 @@ performance critical codepaths:
   8.517 - The rx process only runs in the interrupt handler. Access from outside
   8.518 - the interrupt handler is only permitted after disable_irq().
   8.519 - 
   8.520 --The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
   8.521 -+The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
   8.522 - is set, then access is permitted under spin_lock_irq(&np->lock).
   8.523 - 
   8.524 - Thus configuration functions that want to access everything must call
   8.525 - 	disable_irq(dev->irq);
   8.526 --	spin_lock_bh(dev->xmit_lock);
   8.527 -+	netif_tx_lock_bh(dev);
   8.528 - 	spin_lock_irq(&np->lock);
   8.529 - 
   8.530 - IV. Notes
   8.531 -diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
   8.532 -index 8cc0d0b..e53b313 100644
   8.533 ---- a/drivers/net/r8169.c
   8.534 -+++ b/drivers/net/r8169.c
   8.535 -@@ -2171,7 +2171,7 @@ static int rtl8169_xmit_frags(struct rtl
   8.536 - static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
   8.537 - {
   8.538 - 	if (dev->features & NETIF_F_TSO) {
   8.539 --		u32 mss = skb_shinfo(skb)->tso_size;
   8.540 -+		u32 mss = skb_shinfo(skb)->gso_size;
   8.541 - 
   8.542 - 		if (mss)
   8.543 - 			return LargeSend | ((mss & MSSMask) << MSSShift);
   8.544 -diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
   8.545 -index b7f00d6..439f45f 100644
   8.546 ---- a/drivers/net/s2io.c
   8.547 -+++ b/drivers/net/s2io.c
   8.548 -@@ -3522,8 +3522,8 @@ #endif
   8.549 - 	txdp->Control_1 = 0;
   8.550 - 	txdp->Control_2 = 0;
   8.551 - #ifdef NETIF_F_TSO
   8.552 --	mss = skb_shinfo(skb)->tso_size;
   8.553 --	if (mss) {
   8.554 -+	mss = skb_shinfo(skb)->gso_size;
   8.555 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
   8.556 - 		txdp->Control_1 |= TXD_TCP_LSO_EN;
   8.557 - 		txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
   8.558 - 	}
   8.559 -@@ -3543,10 +3543,10 @@ #endif
   8.560 - 	}
   8.561 - 
   8.562 - 	frg_len = skb->len - skb->data_len;
   8.563 --	if (skb_shinfo(skb)->ufo_size) {
   8.564 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
   8.565 - 		int ufo_size;
   8.566 - 
   8.567 --		ufo_size = skb_shinfo(skb)->ufo_size;
   8.568 -+		ufo_size = skb_shinfo(skb)->gso_size;
   8.569 - 		ufo_size &= ~7;
   8.570 - 		txdp->Control_1 |= TXD_UFO_EN;
   8.571 - 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
   8.572 -@@ -3572,7 +3572,7 @@ #endif
   8.573 - 	txdp->Host_Control = (unsigned long) skb;
   8.574 - 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
   8.575 - 
   8.576 --	if (skb_shinfo(skb)->ufo_size)
   8.577 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   8.578 - 		txdp->Control_1 |= TXD_UFO_EN;
   8.579 - 
   8.580 - 	frg_cnt = skb_shinfo(skb)->nr_frags;
   8.581 -@@ -3587,12 +3587,12 @@ #endif
   8.582 - 		    (sp->pdev, frag->page, frag->page_offset,
   8.583 - 		     frag->size, PCI_DMA_TODEVICE);
   8.584 - 		txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
   8.585 --		if (skb_shinfo(skb)->ufo_size)
   8.586 -+		if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   8.587 - 			txdp->Control_1 |= TXD_UFO_EN;
   8.588 - 	}
   8.589 - 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
   8.590 - 
   8.591 --	if (skb_shinfo(skb)->ufo_size)
   8.592 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   8.593 - 		frg_cnt++; /* as Txd0 was used for inband header */
   8.594 - 
   8.595 - 	tx_fifo = mac_control->tx_FIFO_start[queue];
   8.596 -@@ -3606,7 +3606,7 @@ #ifdef NETIF_F_TSO
   8.597 - 	if (mss)
   8.598 - 		val64 |= TX_FIFO_SPECIAL_FUNC;
   8.599 - #endif
   8.600 --	if (skb_shinfo(skb)->ufo_size)
   8.601 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   8.602 - 		val64 |= TX_FIFO_SPECIAL_FUNC;
   8.603 - 	writeq(val64, &tx_fifo->List_Control);
   8.604 - 
   8.605 -diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
   8.606 -index 0618cd5..2a55eb3 100644
   8.607 ---- a/drivers/net/sky2.c
   8.608 -+++ b/drivers/net/sky2.c
   8.609 -@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
   8.610 - 	count = sizeof(dma_addr_t) / sizeof(u32);
   8.611 - 	count += skb_shinfo(skb)->nr_frags * count;
   8.612 - 
   8.613 --	if (skb_shinfo(skb)->tso_size)
   8.614 -+	if (skb_shinfo(skb)->gso_size)
   8.615 - 		++count;
   8.616 - 
   8.617 - 	if (skb->ip_summed == CHECKSUM_HW)
   8.618 -@@ -1197,7 +1197,7 @@ static int sky2_xmit_frame(struct sk_buf
   8.619 - 	}
   8.620 - 
   8.621 - 	/* Check for TCP Segmentation Offload */
   8.622 --	mss = skb_shinfo(skb)->tso_size;
   8.623 -+	mss = skb_shinfo(skb)->gso_size;
   8.624 - 	if (mss != 0) {
   8.625 - 		/* just drop the packet if non-linear expansion fails */
   8.626 - 		if (skb_header_cloned(skb) &&
   8.627 -diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
   8.628 -index caf4102..fc9164a 100644
   8.629 ---- a/drivers/net/tg3.c
   8.630 -+++ b/drivers/net/tg3.c
   8.631 -@@ -3664,7 +3664,7 @@ static int tg3_start_xmit(struct sk_buff
   8.632 - #if TG3_TSO_SUPPORT != 0
   8.633 - 	mss = 0;
   8.634 - 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
   8.635 --	    (mss = skb_shinfo(skb)->tso_size) != 0) {
   8.636 -+	    (mss = skb_shinfo(skb)->gso_size) != 0) {
   8.637 - 		int tcp_opt_len, ip_tcp_len;
   8.638 - 
   8.639 - 		if (skb_header_cloned(skb) &&
   8.640 -diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
   8.641 -index 5b1af39..11de5af 100644
   8.642 ---- a/drivers/net/tulip/winbond-840.c
   8.643 -+++ b/drivers/net/tulip/winbond-840.c
   8.644 -@@ -1605,11 +1605,11 @@ #ifdef CONFIG_PM
   8.645 -  * - get_stats:
   8.646 -  * 	spin_lock_irq(np->lock), doesn't touch hw if not present
   8.647 -  * - hard_start_xmit:
   8.648 -- * 	netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
   8.649 -+ * 	synchronize_irq + netif_tx_disable;
   8.650 -  * - tx_timeout:
   8.651 -- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
   8.652 -+ * 	netif_device_detach + netif_tx_disable;
   8.653 -  * - set_multicast_list
   8.654 -- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
   8.655 -+ * 	netif_device_detach + netif_tx_disable;
   8.656 -  * - interrupt handler
   8.657 -  * 	doesn't touch hw if not present, synchronize_irq waits for
   8.658 -  * 	running instances of the interrupt handler.
   8.659 -@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev 
   8.660 - 		netif_device_detach(dev);
   8.661 - 		update_csr6(dev, 0);
   8.662 - 		iowrite32(0, ioaddr + IntrEnable);
   8.663 --		netif_stop_queue(dev);
   8.664 - 		spin_unlock_irq(&np->lock);
   8.665 - 
   8.666 --		spin_unlock_wait(&dev->xmit_lock);
   8.667 - 		synchronize_irq(dev->irq);
   8.668 -+		netif_tx_disable(dev);
   8.669 - 	
   8.670 - 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
   8.671 - 
   8.672 -diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
   8.673 -index 4c76cb7..30c48c9 100644
   8.674 ---- a/drivers/net/typhoon.c
   8.675 -+++ b/drivers/net/typhoon.c
   8.676 -@@ -340,7 +340,7 @@ #define typhoon_synchronize_irq(x) synch
   8.677 - #endif
   8.678 - 
   8.679 - #if defined(NETIF_F_TSO)
   8.680 --#define skb_tso_size(x)		(skb_shinfo(x)->tso_size)
   8.681 -+#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
   8.682 - #define TSO_NUM_DESCRIPTORS	2
   8.683 - #define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
   8.684 - #else
   8.685 -diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
   8.686 -index ed1f837..2eb6b5f 100644
   8.687 ---- a/drivers/net/via-velocity.c
   8.688 -+++ b/drivers/net/via-velocity.c
   8.689 -@@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff 
   8.690 - 
   8.691 - 	int pktlen = skb->len;
   8.692 - 
   8.693 -+#ifdef VELOCITY_ZERO_COPY_SUPPORT
   8.694 -+	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
   8.695 -+		kfree_skb(skb);
   8.696 -+		return 0;
   8.697 -+	}
   8.698 -+#endif
   8.699 -+
   8.700 - 	spin_lock_irqsave(&vptr->lock, flags);
   8.701 - 
   8.702 - 	index = vptr->td_curr[qnum];
   8.703 -@@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff 
   8.704 - 	 */
   8.705 - 	if (pktlen < ETH_ZLEN) {
   8.706 - 		/* Cannot occur until ZC support */
   8.707 --		if(skb_linearize(skb, GFP_ATOMIC))
   8.708 --			return 0; 
   8.709 - 		pktlen = ETH_ZLEN;
   8.710 - 		memcpy(tdinfo->buf, skb->data, skb->len);
   8.711 - 		memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
   8.712 -@@ -1933,7 +1938,6 @@ #ifdef VELOCITY_ZERO_COPY_SUPPORT
   8.713 - 		int nfrags = skb_shinfo(skb)->nr_frags;
   8.714 - 		tdinfo->skb = skb;
   8.715 - 		if (nfrags > 6) {
   8.716 --			skb_linearize(skb, GFP_ATOMIC);
   8.717 - 			memcpy(tdinfo->buf, skb->data, skb->len);
   8.718 - 			tdinfo->skb_dma[0] = tdinfo->buf_dma;
   8.719 - 			td_ptr->tdesc0.pktsize = 
   8.720 -diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
   8.721 -index 6fd0bf7..75237c1 100644
   8.722 ---- a/drivers/net/wireless/orinoco.c
   8.723 -+++ b/drivers/net/wireless/orinoco.c
   8.724 -@@ -1835,7 +1835,9 @@ static int __orinoco_program_rids(struct
   8.725 - 	/* Set promiscuity / multicast*/
   8.726 - 	priv->promiscuous = 0;
   8.727 - 	priv->mc_count = 0;
   8.728 --	__orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
   8.729 -+
   8.730 -+	/* FIXME: what about netif_tx_lock */
   8.731 -+	__orinoco_set_multicast_list(dev);
   8.732 - 
   8.733 - 	return 0;
   8.734 - }
   8.735 -diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
   8.736 -index 82cb4af..57cec40 100644
   8.737 ---- a/drivers/s390/net/qeth_eddp.c
   8.738 -+++ b/drivers/s390/net/qeth_eddp.c
   8.739 -@@ -421,7 +421,7 @@ #endif /* CONFIG_QETH_VLAN */
   8.740 -        }
   8.741 - 	tcph = eddp->skb->h.th;
   8.742 - 	while (eddp->skb_offset < eddp->skb->len) {
   8.743 --		data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
   8.744 -+		data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
   8.745 - 			       (int)(eddp->skb->len - eddp->skb_offset));
   8.746 - 		/* prepare qdio hdr */
   8.747 - 		if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
   8.748 -@@ -516,20 +516,20 @@ qeth_eddp_calc_num_pages(struct qeth_edd
   8.749 - 	
   8.750 - 	QETH_DBF_TEXT(trace, 5, "eddpcanp");
   8.751 - 	/* can we put multiple skbs in one page? */
   8.752 --	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
   8.753 -+	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
   8.754 - 	if (skbs_per_page > 1){
   8.755 --		ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
   8.756 -+		ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
   8.757 - 				 skbs_per_page + 1;
   8.758 - 		ctx->elements_per_skb = 1;
   8.759 - 	} else {
   8.760 - 		/* no -> how many elements per skb? */
   8.761 --		ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
   8.762 -+		ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
   8.763 - 				     PAGE_SIZE) >> PAGE_SHIFT;
   8.764 - 		ctx->num_pages = ctx->elements_per_skb *
   8.765 --				 (skb_shinfo(skb)->tso_segs + 1);
   8.766 -+				 (skb_shinfo(skb)->gso_segs + 1);
   8.767 - 	}
   8.768 - 	ctx->num_elements = ctx->elements_per_skb *
   8.769 --			    (skb_shinfo(skb)->tso_segs + 1);
   8.770 -+			    (skb_shinfo(skb)->gso_segs + 1);
   8.771 - }
   8.772 - 
   8.773 - static inline struct qeth_eddp_context *
   8.774 -diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
   8.775 -index dba7f7f..d9cc997 100644
   8.776 ---- a/drivers/s390/net/qeth_main.c
   8.777 -+++ b/drivers/s390/net/qeth_main.c
   8.778 -@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
   8.779 - 	queue = card->qdio.out_qs
   8.780 - 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
   8.781 - 
   8.782 --	if (skb_shinfo(skb)->tso_size)
   8.783 -+	if (skb_shinfo(skb)->gso_size)
   8.784 - 		large_send = card->options.large_send;
   8.785 - 
   8.786 - 	/*are we able to do TSO ? If so ,prepare and send it from here */
   8.787 -@@ -4501,7 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
   8.788 - 		card->stats.tx_packets++;
   8.789 - 		card->stats.tx_bytes += skb->len;
   8.790 - #ifdef CONFIG_QETH_PERF_STATS
   8.791 --		if (skb_shinfo(skb)->tso_size &&
   8.792 -+		if (skb_shinfo(skb)->gso_size &&
   8.793 - 		   !(large_send == QETH_LARGE_SEND_NO)) {
   8.794 - 			card->perf_stats.large_send_bytes += skb->len;
   8.795 - 			card->perf_stats.large_send_cnt++;
   8.796 -diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
   8.797 -index 1286dde..89cbf34 100644
   8.798 ---- a/drivers/s390/net/qeth_tso.h
   8.799 -+++ b/drivers/s390/net/qeth_tso.h
   8.800 -@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *c
   8.801 - 	hdr->ext.hdr_version = 1;
   8.802 - 	hdr->ext.hdr_len     = 28;
   8.803 - 	/*insert non-fix values */
   8.804 --	hdr->ext.mss = skb_shinfo(skb)->tso_size;
   8.805 -+	hdr->ext.mss = skb_shinfo(skb)->gso_size;
   8.806 - 	hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
   8.807 - 	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
   8.808 - 				       sizeof(struct qeth_hdr_tso));
   8.809 -diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
   8.810 -index 93535f0..9269df7 100644
   8.811 ---- a/include/linux/ethtool.h
   8.812 -+++ b/include/linux/ethtool.h
   8.813 -@@ -408,6 +408,8 @@ #define ETHTOOL_STSO		0x0000001f /* Set 
   8.814 - #define ETHTOOL_GPERMADDR	0x00000020 /* Get permanent hardware address */
   8.815 - #define ETHTOOL_GUFO		0x00000021 /* Get UFO enable (ethtool_value) */
   8.816 - #define ETHTOOL_SUFO		0x00000022 /* Set UFO enable (ethtool_value) */
   8.817 -+#define ETHTOOL_GGSO		0x00000023 /* Get GSO enable (ethtool_value) */
   8.818 -+#define ETHTOOL_SGSO		0x00000024 /* Set GSO enable (ethtool_value) */
   8.819 - 
   8.820 - /* compatibility with older code */
   8.821 - #define SPARC_ETH_GSET		ETHTOOL_GSET
   8.822 -diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
   8.823 -index 7fda03d..47b0965 100644
   8.824 ---- a/include/linux/netdevice.h
   8.825 -+++ b/include/linux/netdevice.h
   8.826 -@@ -230,7 +230,8 @@ enum netdev_state_t
   8.827 - 	__LINK_STATE_SCHED,
   8.828 - 	__LINK_STATE_NOCARRIER,
   8.829 - 	__LINK_STATE_RX_SCHED,
   8.830 --	__LINK_STATE_LINKWATCH_PENDING
   8.831 -+	__LINK_STATE_LINKWATCH_PENDING,
   8.832 -+	__LINK_STATE_QDISC_RUNNING,
   8.833 - };
   8.834 - 
   8.835 - 
   8.836 -@@ -306,9 +307,17 @@ #define NETIF_F_HW_VLAN_TX	128	/* Transm
   8.837 - #define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */
   8.838 - #define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */
   8.839 - #define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */
   8.840 --#define NETIF_F_TSO		2048	/* Can offload TCP/IP segmentation */
   8.841 -+#define NETIF_F_GSO		2048	/* Enable software GSO. */
   8.842 - #define NETIF_F_LLTX		4096	/* LockLess TX */
   8.843 --#define NETIF_F_UFO             8192    /* Can offload UDP Large Send*/
   8.844 -+
   8.845 -+	/* Segmentation offload features */
   8.846 -+#define NETIF_F_GSO_SHIFT	16
   8.847 -+#define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
   8.848 -+#define NETIF_F_UFO		(SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
   8.849 -+#define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
   8.850 -+
   8.851 -+#define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
   8.852 -+#define NETIF_F_ALL_CSUM	(NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
   8.853 - 
   8.854 - 	struct net_device	*next_sched;
   8.855 - 
   8.856 -@@ -394,6 +403,9 @@ #define NETIF_F_UFO             8192    
   8.857 - 	struct list_head	qdisc_list;
   8.858 - 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
   8.859 - 
   8.860 -+	/* Partially transmitted GSO packet. */
   8.861 -+	struct sk_buff		*gso_skb;
   8.862 -+
   8.863 - 	/* ingress path synchronizer */
   8.864 - 	spinlock_t		ingress_lock;
   8.865 - 	struct Qdisc		*qdisc_ingress;
   8.866 -@@ -402,7 +414,7 @@ #define NETIF_F_UFO             8192    
   8.867 -  * One part is mostly used on xmit path (device)
   8.868 -  */
   8.869 - 	/* hard_start_xmit synchronizer */
   8.870 --	spinlock_t		xmit_lock ____cacheline_aligned_in_smp;
   8.871 -+	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
   8.872 - 	/* cpu id of processor entered to hard_start_xmit or -1,
   8.873 - 	   if nobody entered there.
   8.874 - 	 */
   8.875 -@@ -527,6 +539,8 @@ struct packet_type {
   8.876 - 					 struct net_device *,
   8.877 - 					 struct packet_type *,
   8.878 - 					 struct net_device *);
   8.879 -+	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
   8.880 -+						int features);
   8.881 - 	void			*af_packet_priv;
   8.882 - 	struct list_head	list;
   8.883 - };
   8.884 -@@ -693,7 +707,8 @@ extern int		dev_change_name(struct net_d
   8.885 - extern int		dev_set_mtu(struct net_device *, int);
   8.886 - extern int		dev_set_mac_address(struct net_device *,
   8.887 - 					    struct sockaddr *);
   8.888 --extern void		dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
   8.889 -+extern int		dev_hard_start_xmit(struct sk_buff *skb,
   8.890 -+					    struct net_device *dev);
   8.891 - 
   8.892 - extern void		dev_init(void);
   8.893 - 
   8.894 -@@ -900,11 +915,43 @@ static inline void __netif_rx_complete(s
   8.895 - 	clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
   8.896 - }
   8.897 - 
   8.898 -+static inline void netif_tx_lock(struct net_device *dev)
   8.899 -+{
   8.900 -+	spin_lock(&dev->_xmit_lock);
   8.901 -+	dev->xmit_lock_owner = smp_processor_id();
   8.902 -+}
   8.903 -+
   8.904 -+static inline void netif_tx_lock_bh(struct net_device *dev)
   8.905 -+{
   8.906 -+	spin_lock_bh(&dev->_xmit_lock);
   8.907 -+	dev->xmit_lock_owner = smp_processor_id();
   8.908 -+}
   8.909 -+
   8.910 -+static inline int netif_tx_trylock(struct net_device *dev)
   8.911 -+{
   8.912 -+	int err = spin_trylock(&dev->_xmit_lock);
   8.913 -+	if (!err)
   8.914 -+		dev->xmit_lock_owner = smp_processor_id();
   8.915 -+	return err;
   8.916 -+}
   8.917 -+
   8.918 -+static inline void netif_tx_unlock(struct net_device *dev)
   8.919 -+{
   8.920 -+	dev->xmit_lock_owner = -1;
   8.921 -+	spin_unlock(&dev->_xmit_lock);
   8.922 -+}
   8.923 -+
   8.924 -+static inline void netif_tx_unlock_bh(struct net_device *dev)
   8.925 -+{
   8.926 -+	dev->xmit_lock_owner = -1;
   8.927 -+	spin_unlock_bh(&dev->_xmit_lock);
   8.928 -+}
   8.929 -+
   8.930 - static inline void netif_tx_disable(struct net_device *dev)
   8.931 - {
   8.932 --	spin_lock_bh(&dev->xmit_lock);
   8.933 -+	netif_tx_lock_bh(dev);
   8.934 - 	netif_stop_queue(dev);
   8.935 --	spin_unlock_bh(&dev->xmit_lock);
   8.936 -+	netif_tx_unlock_bh(dev);
   8.937 - }
   8.938 - 
   8.939 - /* These functions live elsewhere (drivers/net/net_init.c, but related) */
   8.940 -@@ -932,6 +979,7 @@ extern int		netdev_max_backlog;
   8.941 - extern int		weight_p;
   8.942 - extern int		netdev_set_master(struct net_device *dev, struct net_device *master);
   8.943 - extern int skb_checksum_help(struct sk_buff *skb, int inward);
   8.944 -+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
   8.945 - #ifdef CONFIG_BUG
   8.946 - extern void netdev_rx_csum_fault(struct net_device *dev);
   8.947 - #else
   8.948 -@@ -951,6 +999,18 @@ #endif
   8.949 - 
   8.950 - extern void linkwatch_run_queue(void);
   8.951 - 
   8.952 -+static inline int skb_gso_ok(struct sk_buff *skb, int features)
   8.953 -+{
   8.954 -+	int feature = skb_shinfo(skb)->gso_size ?
   8.955 -+		      skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
   8.956 -+	return (features & feature) == feature;
   8.957 -+}
   8.958 -+
   8.959 -+static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
   8.960 -+{
   8.961 -+	return !skb_gso_ok(skb, dev->features);
   8.962 -+}
   8.963 -+
   8.964 - #endif /* __KERNEL__ */
   8.965 - 
   8.966 - #endif	/* _LINUX_DEV_H */
   8.967 -diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
   8.968 -index ad7cc22..b19d45d 100644
   8.969 ---- a/include/linux/skbuff.h
   8.970 -+++ b/include/linux/skbuff.h
   8.971 -@@ -134,9 +134,10 @@ struct skb_frag_struct {
   8.972 - struct skb_shared_info {
   8.973 - 	atomic_t	dataref;
   8.974 - 	unsigned short	nr_frags;
   8.975 --	unsigned short	tso_size;
   8.976 --	unsigned short	tso_segs;
   8.977 --	unsigned short  ufo_size;
   8.978 -+	unsigned short	gso_size;
   8.979 -+	/* Warning: this field is not always filled in (UFO)! */
   8.980 -+	unsigned short	gso_segs;
   8.981 -+	unsigned short  gso_type;
   8.982 - 	unsigned int    ip6_frag_id;
   8.983 - 	struct sk_buff	*frag_list;
   8.984 - 	skb_frag_t	frags[MAX_SKB_FRAGS];
   8.985 -@@ -168,6 +169,14 @@ enum {
   8.986 - 	SKB_FCLONE_CLONE,
   8.987 - };
   8.988 - 
   8.989 -+enum {
   8.990 -+	SKB_GSO_TCPV4 = 1 << 0,
   8.991 -+	SKB_GSO_UDPV4 = 1 << 1,
   8.992 -+
   8.993 -+	/* This indicates the skb is from an untrusted source. */
   8.994 -+	SKB_GSO_DODGY = 1 << 2,
   8.995 -+};
   8.996 -+
   8.997 - /** 
   8.998 -  *	struct sk_buff - socket buffer
   8.999 -  *	@next: Next buffer in list
  8.1000 -@@ -1148,18 +1157,34 @@ static inline int skb_can_coalesce(struc
  8.1001 - 	return 0;
  8.1002 - }
  8.1003 - 
  8.1004 -+static inline int __skb_linearize(struct sk_buff *skb)
  8.1005 -+{
  8.1006 -+	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
  8.1007 -+}
  8.1008 -+
  8.1009 - /**
  8.1010 -  *	skb_linearize - convert paged skb to linear one
  8.1011 -  *	@skb: buffer to linarize
  8.1012 -- *	@gfp: allocation mode
  8.1013 -  *
  8.1014 -  *	If there is no free memory -ENOMEM is returned, otherwise zero
  8.1015 -  *	is returned and the old skb data released.
  8.1016 -  */
  8.1017 --extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
  8.1018 --static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
  8.1019 -+static inline int skb_linearize(struct sk_buff *skb)
  8.1020 -+{
  8.1021 -+	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
  8.1022 -+}
  8.1023 -+
  8.1024 -+/**
  8.1025 -+ *	skb_linearize_cow - make sure skb is linear and writable
  8.1026 -+ *	@skb: buffer to process
  8.1027 -+ *
  8.1028 -+ *	If there is no free memory -ENOMEM is returned, otherwise zero
  8.1029 -+ *	is returned and the old skb data released.
  8.1030 -+ */
  8.1031 -+static inline int skb_linearize_cow(struct sk_buff *skb)
  8.1032 - {
  8.1033 --	return __skb_linearize(skb, gfp);
  8.1034 -+	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
  8.1035 -+	       __skb_linearize(skb) : 0;
  8.1036 - }
  8.1037 - 
  8.1038 - /**
  8.1039 -@@ -1254,6 +1279,7 @@ extern void	       skb_split(struct sk_b
  8.1040 - 				 struct sk_buff *skb1, const u32 len);
  8.1041 - 
  8.1042 - extern void	       skb_release_data(struct sk_buff *skb);
  8.1043 -+extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
  8.1044 - 
  8.1045 - static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
  8.1046 - 				       int len, void *buffer)
  8.1047 -diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
  8.1048 -index b94d1ad..75b5b93 100644
  8.1049 ---- a/include/net/pkt_sched.h
  8.1050 -+++ b/include/net/pkt_sched.h
  8.1051 -@@ -218,12 +218,13 @@ extern struct qdisc_rate_table *qdisc_ge
  8.1052 - 		struct rtattr *tab);
  8.1053 - extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
  8.1054 - 
  8.1055 --extern int qdisc_restart(struct net_device *dev);
  8.1056 -+extern void __qdisc_run(struct net_device *dev);
  8.1057 - 
  8.1058 - static inline void qdisc_run(struct net_device *dev)
  8.1059 - {
  8.1060 --	while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
  8.1061 --		/* NOTHING */;
  8.1062 -+	if (!netif_queue_stopped(dev) &&
  8.1063 -+	    !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
  8.1064 -+		__qdisc_run(dev);
  8.1065 - }
  8.1066 - 
  8.1067 - extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
  8.1068 -diff --git a/include/net/protocol.h b/include/net/protocol.h
  8.1069 -index 6dc5970..0d2dcdb 100644
  8.1070 ---- a/include/net/protocol.h
  8.1071 -+++ b/include/net/protocol.h
  8.1072 -@@ -37,6 +37,8 @@ #define MAX_INET_PROTOS	256		/* Must be 
  8.1073 - struct net_protocol {
  8.1074 - 	int			(*handler)(struct sk_buff *skb);
  8.1075 - 	void			(*err_handler)(struct sk_buff *skb, u32 info);
  8.1076 -+	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
  8.1077 -+					       int features);
  8.1078 - 	int			no_policy;
  8.1079 - };
  8.1080 - 
  8.1081 -diff --git a/include/net/sock.h b/include/net/sock.h
  8.1082 -index f63d0d5..a8e8d21 100644
  8.1083 ---- a/include/net/sock.h
  8.1084 -+++ b/include/net/sock.h
  8.1085 -@@ -1064,9 +1064,13 @@ static inline void sk_setup_caps(struct 
  8.1086 - {
  8.1087 - 	__sk_dst_set(sk, dst);
  8.1088 - 	sk->sk_route_caps = dst->dev->features;
  8.1089 -+	if (sk->sk_route_caps & NETIF_F_GSO)
  8.1090 -+		sk->sk_route_caps |= NETIF_F_TSO;
  8.1091 - 	if (sk->sk_route_caps & NETIF_F_TSO) {
  8.1092 - 		if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
  8.1093 - 			sk->sk_route_caps &= ~NETIF_F_TSO;
  8.1094 -+		else 
  8.1095 -+			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
  8.1096 - 	}
  8.1097 - }
  8.1098 - 
  8.1099 -diff --git a/include/net/tcp.h b/include/net/tcp.h
  8.1100 -index 77f21c6..70e1d5f 100644
  8.1101 ---- a/include/net/tcp.h
  8.1102 -+++ b/include/net/tcp.h
  8.1103 -@@ -552,13 +552,13 @@ #include <net/tcp_ecn.h>
  8.1104 -  */
  8.1105 - static inline int tcp_skb_pcount(const struct sk_buff *skb)
  8.1106 - {
  8.1107 --	return skb_shinfo(skb)->tso_segs;
  8.1108 -+	return skb_shinfo(skb)->gso_segs;
  8.1109 - }
  8.1110 - 
  8.1111 - /* This is valid iff tcp_skb_pcount() > 1. */
  8.1112 - static inline int tcp_skb_mss(const struct sk_buff *skb)
  8.1113 - {
  8.1114 --	return skb_shinfo(skb)->tso_size;
  8.1115 -+	return skb_shinfo(skb)->gso_size;
  8.1116 - }
  8.1117 - 
  8.1118 - static inline void tcp_dec_pcount_approx(__u32 *count,
  8.1119 -@@ -1063,6 +1063,8 @@ extern struct request_sock_ops tcp_reque
  8.1120 - 
  8.1121 - extern int tcp_v4_destroy_sock(struct sock *sk);
  8.1122 - 
  8.1123 -+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
  8.1124 -+
  8.1125 - #ifdef CONFIG_PROC_FS
  8.1126 - extern int  tcp4_proc_init(void);
  8.1127 - extern void tcp4_proc_exit(void);
  8.1128 -diff --git a/net/atm/clip.c b/net/atm/clip.c
  8.1129 -index 1842a4e..6dc21a7 100644
  8.1130 ---- a/net/atm/clip.c
  8.1131 -+++ b/net/atm/clip.c
  8.1132 -@@ -101,7 +101,7 @@ static void unlink_clip_vcc(struct clip_
  8.1133 - 		printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc);
  8.1134 - 		return;
  8.1135 - 	}
  8.1136 --	spin_lock_bh(&entry->neigh->dev->xmit_lock);	/* block clip_start_xmit() */
  8.1137 -+	netif_tx_lock_bh(entry->neigh->dev);	/* block clip_start_xmit() */
  8.1138 - 	entry->neigh->used = jiffies;
  8.1139 - 	for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
  8.1140 - 		if (*walk == clip_vcc) {
  8.1141 -@@ -125,7 +125,7 @@ static void unlink_clip_vcc(struct clip_
  8.1142 - 	printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
  8.1143 - 	  "0x%p)\n",entry,clip_vcc);
  8.1144 - out:
  8.1145 --	spin_unlock_bh(&entry->neigh->dev->xmit_lock);
  8.1146 -+	netif_tx_unlock_bh(entry->neigh->dev);
  8.1147 - }
  8.1148 - 
  8.1149 - /* The neighbour entry n->lock is held. */
  8.1150 -diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
  8.1151 -index 0b33a7b..180e79b 100644
  8.1152 ---- a/net/bridge/br_device.c
  8.1153 -+++ b/net/bridge/br_device.c
  8.1154 -@@ -146,9 +146,9 @@ static int br_set_tx_csum(struct net_dev
  8.1155 - 	struct net_bridge *br = netdev_priv(dev);
  8.1156 - 
  8.1157 - 	if (data)
  8.1158 --		br->feature_mask |= NETIF_F_IP_CSUM;
  8.1159 -+		br->feature_mask |= NETIF_F_NO_CSUM;
  8.1160 - 	else
  8.1161 --		br->feature_mask &= ~NETIF_F_IP_CSUM;
  8.1162 -+		br->feature_mask &= ~NETIF_F_ALL_CSUM;
  8.1163 - 
  8.1164 - 	br_features_recompute(br);
  8.1165 - 	return 0;
  8.1166 -@@ -185,6 +185,6 @@ void br_dev_setup(struct net_device *dev
  8.1167 - 	dev->set_mac_address = br_set_mac_address;
  8.1168 - 	dev->priv_flags = IFF_EBRIDGE;
  8.1169 - 
  8.1170 -- 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
  8.1171 -- 		| NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM;
  8.1172 -+ 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
  8.1173 -+ 			NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
  8.1174 - }
  8.1175 -diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
  8.1176 -index 2d24fb4..00b1128 100644
  8.1177 ---- a/net/bridge/br_forward.c
  8.1178 -+++ b/net/bridge/br_forward.c
  8.1179 -@@ -32,7 +32,7 @@ static inline int should_deliver(const s
  8.1180 - int br_dev_queue_push_xmit(struct sk_buff *skb)
  8.1181 - {
  8.1182 - 	/* drop mtu oversized packets except tso */
  8.1183 --	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
  8.1184 -+	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
  8.1185 - 		kfree_skb(skb);
  8.1186 - 	else {
  8.1187 - #ifdef CONFIG_BRIDGE_NETFILTER
  8.1188 -diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
  8.1189 -index f36b35e..0617146 100644
  8.1190 ---- a/net/bridge/br_if.c
  8.1191 -+++ b/net/bridge/br_if.c
  8.1192 -@@ -385,17 +385,28 @@ void br_features_recompute(struct net_br
  8.1193 - 	struct net_bridge_port *p;
  8.1194 - 	unsigned long features, checksum;
  8.1195 - 
  8.1196 --	features = br->feature_mask &~ NETIF_F_IP_CSUM;
  8.1197 --	checksum = br->feature_mask & NETIF_F_IP_CSUM;
  8.1198 -+	checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
  8.1199 -+	features = br->feature_mask & ~NETIF_F_ALL_CSUM;
  8.1200 - 
  8.1201 - 	list_for_each_entry(p, &br->port_list, list) {
  8.1202 --		if (!(p->dev->features 
  8.1203 --		      & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)))
  8.1204 -+		unsigned long feature = p->dev->features;
  8.1205 -+
  8.1206 -+		if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
  8.1207 -+			checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
  8.1208 -+		if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
  8.1209 -+			checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
  8.1210 -+		if (!(feature & NETIF_F_IP_CSUM))
  8.1211 - 			checksum = 0;
  8.1212 --		features &= p->dev->features;
  8.1213 -+
  8.1214 -+		if (feature & NETIF_F_GSO)
  8.1215 -+			feature |= NETIF_F_TSO;
  8.1216 -+		feature |= NETIF_F_GSO;
  8.1217 -+
  8.1218 -+		features &= feature;
  8.1219 - 	}
  8.1220 - 
  8.1221 --	br->dev->features = features | checksum | NETIF_F_LLTX;
  8.1222 -+	br->dev->features = features | checksum | NETIF_F_LLTX |
  8.1223 -+			    NETIF_F_GSO_ROBUST;
  8.1224 - }
  8.1225 - 
  8.1226 - /* called with RTNL */
  8.1227 -diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
  8.1228 -index 9e27373..588207f 100644
  8.1229 ---- a/net/bridge/br_netfilter.c
  8.1230 -+++ b/net/bridge/br_netfilter.c
  8.1231 -@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
  8.1232 - {
  8.1233 - 	if (skb->protocol == htons(ETH_P_IP) &&
  8.1234 - 	    skb->len > skb->dev->mtu &&
  8.1235 --	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
  8.1236 -+	    !skb_shinfo(skb)->gso_size)
  8.1237 - 		return ip_fragment(skb, br_dev_queue_push_xmit);
  8.1238 - 	else
  8.1239 - 		return br_dev_queue_push_xmit(skb);
  8.1240 -diff --git a/net/core/dev.c b/net/core/dev.c
  8.1241 -index 12a214c..32e1056 100644
  8.1242 ---- a/net/core/dev.c
  8.1243 -+++ b/net/core/dev.c
  8.1244 -@@ -115,6 +115,7 @@ #include <linux/wireless.h>		/* Note : w
  8.1245 - #include <net/iw_handler.h>
  8.1246 - #endif	/* CONFIG_NET_RADIO */
  8.1247 - #include <asm/current.h>
  8.1248 -+#include <linux/err.h>
  8.1249 - 
  8.1250 - /*
  8.1251 -  *	The list of packet types we will receive (as opposed to discard)
  8.1252 -@@ -1032,7 +1033,7 @@ static inline void net_timestamp(struct 
  8.1253 -  *	taps currently in use.
  8.1254 -  */
  8.1255 - 
  8.1256 --void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  8.1257 -+static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  8.1258 - {
  8.1259 - 	struct packet_type *ptype;
  8.1260 - 
  8.1261 -@@ -1106,6 +1107,45 @@ out:	
  8.1262 - 	return ret;
  8.1263 - }
  8.1264 - 
  8.1265 -+/**
  8.1266 -+ *	skb_gso_segment - Perform segmentation on skb.
  8.1267 -+ *	@skb: buffer to segment
  8.1268 -+ *	@features: features for the output path (see dev->features)
  8.1269 -+ *
  8.1270 -+ *	This function segments the given skb and returns a list of segments.
  8.1271 -+ *
  8.1272 -+ *	It may return NULL if the skb requires no segmentation.  This is
  8.1273 -+ *	only possible when GSO is used for verifying header integrity.
  8.1274 -+ */
  8.1275 -+struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
  8.1276 -+{
  8.1277 -+	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  8.1278 -+	struct packet_type *ptype;
  8.1279 -+	int type = skb->protocol;
  8.1280 -+
  8.1281 -+	BUG_ON(skb_shinfo(skb)->frag_list);
  8.1282 -+	BUG_ON(skb->ip_summed != CHECKSUM_HW);
  8.1283 -+
  8.1284 -+	skb->mac.raw = skb->data;
  8.1285 -+	skb->mac_len = skb->nh.raw - skb->data;
  8.1286 -+	__skb_pull(skb, skb->mac_len);
  8.1287 -+
  8.1288 -+	rcu_read_lock();
  8.1289 -+	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
  8.1290 -+		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
  8.1291 -+			segs = ptype->gso_segment(skb, features);
  8.1292 -+			break;
  8.1293 -+		}
  8.1294 -+	}
  8.1295 -+	rcu_read_unlock();
  8.1296 -+
  8.1297 -+	__skb_push(skb, skb->data - skb->mac.raw);
  8.1298 -+
  8.1299 -+	return segs;
  8.1300 -+}
  8.1301 -+
  8.1302 -+EXPORT_SYMBOL(skb_gso_segment);
  8.1303 -+
  8.1304 - /* Take action when hardware reception checksum errors are detected. */
  8.1305 - #ifdef CONFIG_BUG
  8.1306 - void netdev_rx_csum_fault(struct net_device *dev)
  8.1307 -@@ -1142,75 +1182,108 @@ #else
  8.1308 - #define illegal_highdma(dev, skb)	(0)
  8.1309 - #endif
  8.1310 - 
  8.1311 --/* Keep head the same: replace data */
  8.1312 --int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
  8.1313 --{
  8.1314 --	unsigned int size;
  8.1315 --	u8 *data;
  8.1316 --	long offset;
  8.1317 --	struct skb_shared_info *ninfo;
  8.1318 --	int headerlen = skb->data - skb->head;
  8.1319 --	int expand = (skb->tail + skb->data_len) - skb->end;
  8.1320 --
  8.1321 --	if (skb_shared(skb))
  8.1322 --		BUG();
  8.1323 --
  8.1324 --	if (expand <= 0)
  8.1325 --		expand = 0;
  8.1326 --
  8.1327 --	size = skb->end - skb->head + expand;
  8.1328 --	size = SKB_DATA_ALIGN(size);
  8.1329 --	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
  8.1330 --	if (!data)
  8.1331 --		return -ENOMEM;
  8.1332 --
  8.1333 --	/* Copy entire thing */
  8.1334 --	if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
  8.1335 --		BUG();
  8.1336 --
  8.1337 --	/* Set up shinfo */
  8.1338 --	ninfo = (struct skb_shared_info*)(data + size);
  8.1339 --	atomic_set(&ninfo->dataref, 1);
  8.1340 --	ninfo->tso_size = skb_shinfo(skb)->tso_size;
  8.1341 --	ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
  8.1342 --	ninfo->nr_frags = 0;
  8.1343 --	ninfo->frag_list = NULL;
  8.1344 --
  8.1345 --	/* Offset between the two in bytes */
  8.1346 --	offset = data - skb->head;
  8.1347 --
  8.1348 --	/* Free old data. */
  8.1349 --	skb_release_data(skb);
  8.1350 --
  8.1351 --	skb->head = data;
  8.1352 --	skb->end  = data + size;
  8.1353 --
  8.1354 --	/* Set up new pointers */
  8.1355 --	skb->h.raw   += offset;
  8.1356 --	skb->nh.raw  += offset;
  8.1357 --	skb->mac.raw += offset;
  8.1358 --	skb->tail    += offset;
  8.1359 --	skb->data    += offset;
  8.1360 --
  8.1361 --	/* We are no longer a clone, even if we were. */
  8.1362 --	skb->cloned    = 0;
  8.1363 --
  8.1364 --	skb->tail     += skb->data_len;
  8.1365 --	skb->data_len  = 0;
  8.1366 -+struct dev_gso_cb {
  8.1367 -+	void (*destructor)(struct sk_buff *skb);
  8.1368 -+};
  8.1369 -+
  8.1370 -+#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
  8.1371 -+
  8.1372 -+static void dev_gso_skb_destructor(struct sk_buff *skb)
  8.1373 -+{
  8.1374 -+	struct dev_gso_cb *cb;
  8.1375 -+
  8.1376 -+	do {
  8.1377 -+		struct sk_buff *nskb = skb->next;
  8.1378 -+
  8.1379 -+		skb->next = nskb->next;
  8.1380 -+		nskb->next = NULL;
  8.1381 -+		kfree_skb(nskb);
  8.1382 -+	} while (skb->next);
  8.1383 -+
  8.1384 -+	cb = DEV_GSO_CB(skb);
  8.1385 -+	if (cb->destructor)
  8.1386 -+		cb->destructor(skb);
  8.1387 -+}
  8.1388 -+
  8.1389 -+/**
  8.1390 -+ *	dev_gso_segment - Perform emulated hardware segmentation on skb.
  8.1391 -+ *	@skb: buffer to segment
  8.1392 -+ *
  8.1393 -+ *	This function segments the given skb and stores the list of segments
  8.1394 -+ *	in skb->next.
  8.1395 -+ */
  8.1396 -+static int dev_gso_segment(struct sk_buff *skb)
  8.1397 -+{
  8.1398 -+	struct net_device *dev = skb->dev;
  8.1399 -+	struct sk_buff *segs;
  8.1400 -+	int features = dev->features & ~(illegal_highdma(dev, skb) ?
  8.1401 -+					 NETIF_F_SG : 0);
  8.1402 -+
  8.1403 -+	segs = skb_gso_segment(skb, features);
  8.1404 -+
  8.1405 -+	/* Verifying header integrity only. */
  8.1406 -+	if (!segs)
  8.1407 -+		return 0;
  8.1408 -+
  8.1409 -+	if (unlikely(IS_ERR(segs)))
  8.1410 -+		return PTR_ERR(segs);
  8.1411 -+
  8.1412 -+	skb->next = segs;
  8.1413 -+	DEV_GSO_CB(skb)->destructor = skb->destructor;
  8.1414 -+	skb->destructor = dev_gso_skb_destructor;
  8.1415 -+
  8.1416 -+	return 0;
  8.1417 -+}
  8.1418 -+
  8.1419 -+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  8.1420 -+{
  8.1421 -+	if (likely(!skb->next)) {
  8.1422 -+		if (netdev_nit)
  8.1423 -+			dev_queue_xmit_nit(skb, dev);
  8.1424 -+
  8.1425 -+		if (netif_needs_gso(dev, skb)) {
  8.1426 -+			if (unlikely(dev_gso_segment(skb)))
  8.1427 -+				goto out_kfree_skb;
  8.1428 -+			if (skb->next)
  8.1429 -+				goto gso;
  8.1430 -+		}
  8.1431 -+
  8.1432 -+		return dev->hard_start_xmit(skb, dev);
  8.1433 -+	}
  8.1434 -+
  8.1435 -+gso:
  8.1436 -+	do {
  8.1437 -+		struct sk_buff *nskb = skb->next;
  8.1438 -+		int rc;
  8.1439 -+
  8.1440 -+		skb->next = nskb->next;
  8.1441 -+		nskb->next = NULL;
  8.1442 -+		rc = dev->hard_start_xmit(nskb, dev);
  8.1443 -+		if (unlikely(rc)) {
  8.1444 -+			nskb->next = skb->next;
  8.1445 -+			skb->next = nskb;
  8.1446 -+			return rc;
  8.1447 -+		}
  8.1448 -+		if (unlikely(netif_queue_stopped(dev) && skb->next))
  8.1449 -+			return NETDEV_TX_BUSY;
  8.1450 -+	} while (skb->next);
  8.1451 -+	
  8.1452 -+	skb->destructor = DEV_GSO_CB(skb)->destructor;
  8.1453 -+
  8.1454 -+out_kfree_skb:
  8.1455 -+	kfree_skb(skb);
  8.1456 - 	return 0;
  8.1457 - }
  8.1458 - 
  8.1459 - #define HARD_TX_LOCK(dev, cpu) {			\
  8.1460 - 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
  8.1461 --		spin_lock(&dev->xmit_lock);		\
  8.1462 --		dev->xmit_lock_owner = cpu;		\
  8.1463 -+		netif_tx_lock(dev);			\
  8.1464 - 	}						\
  8.1465 - }
  8.1466 - 
  8.1467 - #define HARD_TX_UNLOCK(dev) {				\
  8.1468 - 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
  8.1469 --		dev->xmit_lock_owner = -1;		\
  8.1470 --		spin_unlock(&dev->xmit_lock);		\
  8.1471 -+		netif_tx_unlock(dev);			\
  8.1472 - 	}						\
  8.1473 - }
  8.1474 - 
  8.1475 -@@ -1246,9 +1319,13 @@ int dev_queue_xmit(struct sk_buff *skb)
  8.1476 - 	struct Qdisc *q;
  8.1477 - 	int rc = -ENOMEM;
  8.1478 - 
  8.1479 -+	/* GSO will handle the following emulations directly. */
  8.1480 -+	if (netif_needs_gso(dev, skb))
  8.1481 -+		goto gso;
  8.1482 -+
  8.1483 - 	if (skb_shinfo(skb)->frag_list &&
  8.1484 - 	    !(dev->features & NETIF_F_FRAGLIST) &&
  8.1485 --	    __skb_linearize(skb, GFP_ATOMIC))
  8.1486 -+	    __skb_linearize(skb))
  8.1487 - 		goto out_kfree_skb;
  8.1488 - 
  8.1489 - 	/* Fragmented skb is linearized if device does not support SG,
  8.1490 -@@ -1257,25 +1334,26 @@ int dev_queue_xmit(struct sk_buff *skb)
  8.1491 - 	 */
  8.1492 - 	if (skb_shinfo(skb)->nr_frags &&
  8.1493 - 	    (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
  8.1494 --	    __skb_linearize(skb, GFP_ATOMIC))
  8.1495 -+	    __skb_linearize(skb))
  8.1496 - 		goto out_kfree_skb;
  8.1497 - 
  8.1498 - 	/* If packet is not checksummed and device does not support
  8.1499 - 	 * checksumming for this protocol, complete checksumming here.
  8.1500 - 	 */
  8.1501 - 	if (skb->ip_summed == CHECKSUM_HW &&
  8.1502 --	    (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
  8.1503 -+	    (!(dev->features & NETIF_F_GEN_CSUM) &&
  8.1504 - 	     (!(dev->features & NETIF_F_IP_CSUM) ||
  8.1505 - 	      skb->protocol != htons(ETH_P_IP))))
  8.1506 - 	      	if (skb_checksum_help(skb, 0))
  8.1507 - 	      		goto out_kfree_skb;
  8.1508 - 
  8.1509 -+gso:
  8.1510 - 	spin_lock_prefetch(&dev->queue_lock);
  8.1511 - 
  8.1512 - 	/* Disable soft irqs for various locks below. Also 
  8.1513 - 	 * stops preemption for RCU. 
  8.1514 - 	 */
  8.1515 --	local_bh_disable(); 
  8.1516 -+	rcu_read_lock_bh(); 
  8.1517 - 
  8.1518 - 	/* Updates of qdisc are serialized by queue_lock. 
  8.1519 - 	 * The struct Qdisc which is pointed to by qdisc is now a 
  8.1520 -@@ -1309,8 +1387,8 @@ #endif
  8.1521 - 	/* The device has no queue. Common case for software devices:
  8.1522 - 	   loopback, all the sorts of tunnels...
  8.1523 - 
  8.1524 --	   Really, it is unlikely that xmit_lock protection is necessary here.
  8.1525 --	   (f.e. loopback and IP tunnels are clean ignoring statistics
  8.1526 -+	   Really, it is unlikely that netif_tx_lock protection is necessary
  8.1527 -+	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
  8.1528 - 	   counters.)
  8.1529 - 	   However, it is possible, that they rely on protection
  8.1530 - 	   made by us here.
  8.1531 -@@ -1326,11 +1404,8 @@ #endif
  8.1532 - 			HARD_TX_LOCK(dev, cpu);
  8.1533 - 
  8.1534 - 			if (!netif_queue_stopped(dev)) {
  8.1535 --				if (netdev_nit)
  8.1536 --					dev_queue_xmit_nit(skb, dev);
  8.1537 --
  8.1538 - 				rc = 0;
  8.1539 --				if (!dev->hard_start_xmit(skb, dev)) {
  8.1540 -+				if (!dev_hard_start_xmit(skb, dev)) {
  8.1541 - 					HARD_TX_UNLOCK(dev);
  8.1542 - 					goto out;
  8.1543 - 				}
  8.1544 -@@ -1349,13 +1424,13 @@ #endif
  8.1545 - 	}
  8.1546 - 
  8.1547 - 	rc = -ENETDOWN;
  8.1548 --	local_bh_enable();
  8.1549 -+	rcu_read_unlock_bh();
  8.1550 - 
  8.1551 - out_kfree_skb:
  8.1552 - 	kfree_skb(skb);
  8.1553 - 	return rc;
  8.1554 - out:
  8.1555 --	local_bh_enable();
  8.1556 -+	rcu_read_unlock_bh();
  8.1557 - 	return rc;
  8.1558 - }
  8.1559 - 
  8.1560 -@@ -2670,7 +2745,7 @@ int register_netdevice(struct net_device
  8.1561 - 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
  8.1562 - 
  8.1563 - 	spin_lock_init(&dev->queue_lock);
  8.1564 --	spin_lock_init(&dev->xmit_lock);
  8.1565 -+	spin_lock_init(&dev->_xmit_lock);
  8.1566 - 	dev->xmit_lock_owner = -1;
  8.1567 - #ifdef CONFIG_NET_CLS_ACT
  8.1568 - 	spin_lock_init(&dev->ingress_lock);
  8.1569 -@@ -2714,9 +2789,7 @@ #endif
  8.1570 - 
  8.1571 - 	/* Fix illegal SG+CSUM combinations. */
  8.1572 - 	if ((dev->features & NETIF_F_SG) &&
  8.1573 --	    !(dev->features & (NETIF_F_IP_CSUM |
  8.1574 --			       NETIF_F_NO_CSUM |
  8.1575 --			       NETIF_F_HW_CSUM))) {
  8.1576 -+	    !(dev->features & NETIF_F_ALL_CSUM)) {
  8.1577 - 		printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
  8.1578 - 		       dev->name);
  8.1579 - 		dev->features &= ~NETIF_F_SG;
  8.1580 -@@ -3268,7 +3341,6 @@ subsys_initcall(net_dev_init);
  8.1581 - EXPORT_SYMBOL(__dev_get_by_index);
  8.1582 - EXPORT_SYMBOL(__dev_get_by_name);
  8.1583 - EXPORT_SYMBOL(__dev_remove_pack);
  8.1584 --EXPORT_SYMBOL(__skb_linearize);
  8.1585 - EXPORT_SYMBOL(dev_valid_name);
  8.1586 - EXPORT_SYMBOL(dev_add_pack);
  8.1587 - EXPORT_SYMBOL(dev_alloc_name);
  8.1588 -diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
  8.1589 -index 05d6085..c57d887 100644
  8.1590 ---- a/net/core/dev_mcast.c
  8.1591 -+++ b/net/core/dev_mcast.c
  8.1592 -@@ -62,7 +62,7 @@ #include <net/arp.h>
  8.1593 -  *	Device mc lists are changed by bh at least if IPv6 is enabled,
  8.1594 -  *	so that it must be bh protected.
  8.1595 -  *
  8.1596 -- *	We block accesses to device mc filters with dev->xmit_lock.
  8.1597 -+ *	We block accesses to device mc filters with netif_tx_lock.
  8.1598 -  */
  8.1599 - 
  8.1600 - /*
  8.1601 -@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_d
  8.1602 - 
  8.1603 - void dev_mc_upload(struct net_device *dev)
  8.1604 - {
  8.1605 --	spin_lock_bh(&dev->xmit_lock);
  8.1606 -+	netif_tx_lock_bh(dev);
  8.1607 - 	__dev_mc_upload(dev);
  8.1608 --	spin_unlock_bh(&dev->xmit_lock);
  8.1609 -+	netif_tx_unlock_bh(dev);
  8.1610 - }
  8.1611 - 
  8.1612 - /*
  8.1613 -@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev
  8.1614 - 	int err = 0;
  8.1615 - 	struct dev_mc_list *dmi, **dmip;
  8.1616 - 
  8.1617 --	spin_lock_bh(&dev->xmit_lock);
  8.1618 -+	netif_tx_lock_bh(dev);
  8.1619 - 
  8.1620 - 	for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
  8.1621 - 		/*
  8.1622 -@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev
  8.1623 - 			 */
  8.1624 - 			__dev_mc_upload(dev);
  8.1625 - 			
  8.1626 --			spin_unlock_bh(&dev->xmit_lock);
  8.1627 -+			netif_tx_unlock_bh(dev);
  8.1628 - 			return 0;
  8.1629 - 		}
  8.1630 - 	}
  8.1631 - 	err = -ENOENT;
  8.1632 - done:
  8.1633 --	spin_unlock_bh(&dev->xmit_lock);
  8.1634 -+	netif_tx_unlock_bh(dev);
  8.1635 - 	return err;
  8.1636 - }
  8.1637 - 
  8.1638 -@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, v
  8.1639 - 
  8.1640 - 	dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
  8.1641 - 
  8.1642 --	spin_lock_bh(&dev->xmit_lock);
  8.1643 -+	netif_tx_lock_bh(dev);
  8.1644 - 	for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
  8.1645 - 		if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
  8.1646 - 		    dmi->dmi_addrlen == alen) {
  8.1647 -@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, v
  8.1648 - 	}
  8.1649 - 
  8.1650 - 	if ((dmi = dmi1) == NULL) {
  8.1651 --		spin_unlock_bh(&dev->xmit_lock);
  8.1652 -+		netif_tx_unlock_bh(dev);
  8.1653 - 		return -ENOMEM;
  8.1654 - 	}
  8.1655 - 	memcpy(dmi->dmi_addr, addr, alen);
  8.1656 -@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, v
  8.1657 - 
  8.1658 - 	__dev_mc_upload(dev);
  8.1659 - 	
  8.1660 --	spin_unlock_bh(&dev->xmit_lock);
  8.1661 -+	netif_tx_unlock_bh(dev);
  8.1662 - 	return 0;
  8.1663 - 
  8.1664 - done:
  8.1665 --	spin_unlock_bh(&dev->xmit_lock);
  8.1666 -+	netif_tx_unlock_bh(dev);
  8.1667 - 	kfree(dmi1);
  8.1668 - 	return err;
  8.1669 - }
  8.1670 -@@ -204,7 +204,7 @@ done:
  8.1671 - 
  8.1672 - void dev_mc_discard(struct net_device *dev)
  8.1673 - {
  8.1674 --	spin_lock_bh(&dev->xmit_lock);
  8.1675 -+	netif_tx_lock_bh(dev);
  8.1676 - 	
  8.1677 - 	while (dev->mc_list != NULL) {
  8.1678 - 		struct dev_mc_list *tmp = dev->mc_list;
  8.1679 -@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *d
  8.1680 - 	}
  8.1681 - 	dev->mc_count = 0;
  8.1682 - 
  8.1683 --	spin_unlock_bh(&dev->xmit_lock);
  8.1684 -+	netif_tx_unlock_bh(dev);
  8.1685 - }
  8.1686 - 
  8.1687 - #ifdef CONFIG_PROC_FS
  8.1688 -@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_fi
  8.1689 - 	struct dev_mc_list *m;
  8.1690 - 	struct net_device *dev = v;
  8.1691 - 
  8.1692 --	spin_lock_bh(&dev->xmit_lock);
  8.1693 -+	netif_tx_lock_bh(dev);
  8.1694 - 	for (m = dev->mc_list; m; m = m->next) {
  8.1695 - 		int i;
  8.1696 - 
  8.1697 -@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_fi
  8.1698 - 
  8.1699 - 		seq_putc(seq, '\n');
  8.1700 - 	}
  8.1701 --	spin_unlock_bh(&dev->xmit_lock);
  8.1702 -+	netif_tx_unlock_bh(dev);
  8.1703 - 	return 0;
  8.1704 - }
  8.1705 - 
  8.1706 -diff --git a/net/core/ethtool.c b/net/core/ethtool.c
  8.1707 -index e6f7610..27ce168 100644
  8.1708 ---- a/net/core/ethtool.c
  8.1709 -+++ b/net/core/ethtool.c
  8.1710 -@@ -30,7 +30,7 @@ u32 ethtool_op_get_link(struct net_devic
  8.1711 - 
  8.1712 - u32 ethtool_op_get_tx_csum(struct net_device *dev)
  8.1713 - {
  8.1714 --	return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0;
  8.1715 -+	return (dev->features & NETIF_F_ALL_CSUM) != 0;
  8.1716 - }
  8.1717 - 
  8.1718 - int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
  8.1719 -@@ -551,9 +551,7 @@ static int ethtool_set_sg(struct net_dev
  8.1720 - 		return -EFAULT;
  8.1721 - 
  8.1722 - 	if (edata.data && 
  8.1723 --	    !(dev->features & (NETIF_F_IP_CSUM |
  8.1724 --			       NETIF_F_NO_CSUM |
  8.1725 --			       NETIF_F_HW_CSUM)))
  8.1726 -+	    !(dev->features & NETIF_F_ALL_CSUM))
  8.1727 - 		return -EINVAL;
  8.1728 - 
  8.1729 - 	return __ethtool_set_sg(dev, edata.data);
  8.1730 -@@ -591,7 +589,7 @@ static int ethtool_set_tso(struct net_de
  8.1731 - 
  8.1732 - static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr)
  8.1733 - {
  8.1734 --	struct ethtool_value edata = { ETHTOOL_GTSO };
  8.1735 -+	struct ethtool_value edata = { ETHTOOL_GUFO };
  8.1736 - 
  8.1737 - 	if (!dev->ethtool_ops->get_ufo)
  8.1738 - 		return -EOPNOTSUPP;
  8.1739 -@@ -600,6 +598,7 @@ static int ethtool_get_ufo(struct net_de
  8.1740 - 		 return -EFAULT;
  8.1741 - 	return 0;
  8.1742 - }
  8.1743 -+
  8.1744 - static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
  8.1745 - {
  8.1746 - 	struct ethtool_value edata;
  8.1747 -@@ -615,6 +614,29 @@ static int ethtool_set_ufo(struct net_de
  8.1748 - 	return dev->ethtool_ops->set_ufo(dev, edata.data);
  8.1749 - }
  8.1750 - 
  8.1751 -+static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
  8.1752 -+{
  8.1753 -+	struct ethtool_value edata = { ETHTOOL_GGSO };
  8.1754 -+
  8.1755 -+	edata.data = dev->features & NETIF_F_GSO;
  8.1756 -+	if (copy_to_user(useraddr, &edata, sizeof(edata)))
  8.1757 -+		 return -EFAULT;
  8.1758 -+	return 0;
  8.1759 -+}
  8.1760 -+
  8.1761 -+static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
  8.1762 -+{
  8.1763 -+	struct ethtool_value edata;
  8.1764 -+
  8.1765 -+	if (copy_from_user(&edata, useraddr, sizeof(edata)))
  8.1766 -+		return -EFAULT;
  8.1767 -+	if (edata.data)
  8.1768 -+		dev->features |= NETIF_F_GSO;
  8.1769 -+	else
  8.1770 -+		dev->features &= ~NETIF_F_GSO;
  8.1771 -+	return 0;
  8.1772 -+}
  8.1773 -+
  8.1774 - static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
  8.1775 - {
  8.1776 - 	struct ethtool_test test;
  8.1777 -@@ -906,6 +928,12 @@ int dev_ethtool(struct ifreq *ifr)
  8.1778 - 	case ETHTOOL_SUFO:
  8.1779 - 		rc = ethtool_set_ufo(dev, useraddr);
  8.1780 - 		break;
  8.1781 -+	case ETHTOOL_GGSO:
  8.1782 -+		rc = ethtool_get_gso(dev, useraddr);
  8.1783 -+		break;
  8.1784 -+	case ETHTOOL_SGSO:
  8.1785 -+		rc = ethtool_set_gso(dev, useraddr);
  8.1786 -+		break;
  8.1787 - 	default:
  8.1788 - 		rc =  -EOPNOTSUPP;
  8.1789 - 	}
  8.1790 -diff --git a/net/core/netpoll.c b/net/core/netpoll.c
  8.1791 -index ea51f8d..ec28d3b 100644
  8.1792 ---- a/net/core/netpoll.c
  8.1793 -+++ b/net/core/netpoll.c
  8.1794 -@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netp
  8.1795 - 
  8.1796 - 	do {
  8.1797 - 		npinfo->tries--;
  8.1798 --		spin_lock(&np->dev->xmit_lock);
  8.1799 --		np->dev->xmit_lock_owner = smp_processor_id();
  8.1800 -+		netif_tx_lock(np->dev);
  8.1801 - 
  8.1802 - 		/*
  8.1803 - 		 * network drivers do not expect to be called if the queue is
  8.1804 - 		 * stopped.
  8.1805 - 		 */
  8.1806 - 		if (netif_queue_stopped(np->dev)) {
  8.1807 --			np->dev->xmit_lock_owner = -1;
  8.1808 --			spin_unlock(&np->dev->xmit_lock);
  8.1809 -+			netif_tx_unlock(np->dev);
  8.1810 - 			netpoll_poll(np);
  8.1811 - 			udelay(50);
  8.1812 - 			continue;
  8.1813 - 		}
  8.1814 - 
  8.1815 - 		status = np->dev->hard_start_xmit(skb, np->dev);
  8.1816 --		np->dev->xmit_lock_owner = -1;
  8.1817 --		spin_unlock(&np->dev->xmit_lock);
  8.1818 -+		netif_tx_unlock(np->dev);
  8.1819 - 
  8.1820 - 		/* success */
  8.1821 - 		if(!status) {
  8.1822 -diff --git a/net/core/pktgen.c b/net/core/pktgen.c
  8.1823 -index da16f8f..2380347 100644
  8.1824 ---- a/net/core/pktgen.c
  8.1825 -+++ b/net/core/pktgen.c
  8.1826 -@@ -2582,7 +2582,7 @@ static __inline__ void pktgen_xmit(struc
  8.1827 - 		}
  8.1828 - 	}
  8.1829 - 	
  8.1830 --	spin_lock_bh(&odev->xmit_lock);
  8.1831 -+	netif_tx_lock_bh(odev);
  8.1832 - 	if (!netif_queue_stopped(odev)) {
  8.1833 - 
  8.1834 - 		atomic_inc(&(pkt_dev->skb->users));
  8.1835 -@@ -2627,7 +2627,7 @@ retry_now:
  8.1836 - 		pkt_dev->next_tx_ns = 0;
  8.1837 -         }
  8.1838 - 
  8.1839 --	spin_unlock_bh(&odev->xmit_lock);
  8.1840 -+	netif_tx_unlock_bh(odev);
  8.1841 - 	
  8.1842 - 	/* If pkt_dev->count is zero, then run forever */
  8.1843 - 	if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
  8.1844 -diff --git a/net/core/skbuff.c b/net/core/skbuff.c
  8.1845 -index 2144952..46f56af 100644
  8.1846 ---- a/net/core/skbuff.c
  8.1847 -+++ b/net/core/skbuff.c
  8.1848 -@@ -164,9 +164,9 @@ struct sk_buff *__alloc_skb(unsigned int
  8.1849 - 	shinfo = skb_shinfo(skb);
  8.1850 - 	atomic_set(&shinfo->dataref, 1);
  8.1851 - 	shinfo->nr_frags  = 0;
  8.1852 --	shinfo->tso_size = 0;
  8.1853 --	shinfo->tso_segs = 0;
  8.1854 --	shinfo->ufo_size = 0;
  8.1855 -+	shinfo->gso_size = 0;
  8.1856 -+	shinfo->gso_segs = 0;
  8.1857 -+	shinfo->gso_type = 0;
  8.1858 - 	shinfo->ip6_frag_id = 0;
  8.1859 - 	shinfo->frag_list = NULL;
  8.1860 - 
  8.1861 -@@ -230,8 +230,9 @@ struct sk_buff *alloc_skb_from_cache(kme
  8.1862 - 
  8.1863 - 	atomic_set(&(skb_shinfo(skb)->dataref), 1);
  8.1864 - 	skb_shinfo(skb)->nr_frags  = 0;
  8.1865 --	skb_shinfo(skb)->tso_size = 0;
  8.1866 --	skb_shinfo(skb)->tso_segs = 0;
  8.1867 -+	skb_shinfo(skb)->gso_size = 0;
  8.1868 -+	skb_shinfo(skb)->gso_segs = 0;
  8.1869 -+	skb_shinfo(skb)->gso_type = 0;
  8.1870 - 	skb_shinfo(skb)->frag_list = NULL;
  8.1871 - out:
  8.1872 - 	return skb;
  8.1873 -@@ -501,8 +502,9 @@ #endif
  8.1874 - 	new->tc_index	= old->tc_index;
  8.1875 - #endif
  8.1876 - 	atomic_set(&new->users, 1);
  8.1877 --	skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
  8.1878 --	skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
  8.1879 -+	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
  8.1880 -+	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
  8.1881 -+	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
  8.1882 - }
  8.1883 - 
  8.1884 - /**
  8.1885 -@@ -1777,6 +1779,133 @@ int skb_append_datato_frags(struct sock 
  8.1886 - 	return 0;
  8.1887 - }
  8.1888 - 
  8.1889 -+/**
  8.1890 -+ *	skb_segment - Perform protocol segmentation on skb.
  8.1891 -+ *	@skb: buffer to segment
  8.1892 -+ *	@features: features for the output path (see dev->features)
  8.1893 -+ *
  8.1894 -+ *	This function performs segmentation on the given skb.  It returns
  8.1895 -+ *	the segment at the given position.  It returns NULL if there are
  8.1896 -+ *	no more segments to generate, or when an error is encountered.
  8.1897 -+ */
  8.1898 -+struct sk_buff *skb_segment(struct sk_buff *skb, int features)
  8.1899 -+{
  8.1900 -+	struct sk_buff *segs = NULL;
  8.1901 -+	struct sk_buff *tail = NULL;
  8.1902 -+	unsigned int mss = skb_shinfo(skb)->gso_size;
  8.1903 -+	unsigned int doffset = skb->data - skb->mac.raw;
  8.1904 -+	unsigned int offset = doffset;
  8.1905 -+	unsigned int headroom;
  8.1906 -+	unsigned int len;
  8.1907 -+	int sg = features & NETIF_F_SG;
  8.1908 -+	int nfrags = skb_shinfo(skb)->nr_frags;
  8.1909 -+	int err = -ENOMEM;
  8.1910 -+	int i = 0;
  8.1911 -+	int pos;
  8.1912 -+
  8.1913 -+	__skb_push(skb, doffset);
  8.1914 -+	headroom = skb_headroom(skb);
  8.1915 -+	pos = skb_headlen(skb);
  8.1916 -+
  8.1917 -+	do {
  8.1918 -+		struct sk_buff *nskb;
  8.1919 -+		skb_frag_t *frag;
  8.1920 -+		int hsize, nsize;
  8.1921 -+		int k;
  8.1922 -+		int size;
  8.1923 -+
  8.1924 -+		len = skb->len - offset;
  8.1925 -+		if (len > mss)
  8.1926 -+			len = mss;
  8.1927 -+
  8.1928 -+		hsize = skb_headlen(skb) - offset;
  8.1929 -+		if (hsize < 0)
  8.1930 -+			hsize = 0;
  8.1931 -+		nsize = hsize + doffset;
  8.1932 -+		if (nsize > len + doffset || !sg)
  8.1933 -+			nsize = len + doffset;
  8.1934 -+
  8.1935 -+		nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
  8.1936 -+		if (unlikely(!nskb))
  8.1937 -+			goto err;
  8.1938 -+
  8.1939 -+		if (segs)
  8.1940 -+			tail->next = nskb;
  8.1941 -+		else
  8.1942 -+			segs = nskb;
  8.1943 -+		tail = nskb;
  8.1944 -+
  8.1945 -+		nskb->dev = skb->dev;
  8.1946 -+		nskb->priority = skb->priority;
  8.1947 -+		nskb->protocol = skb->protocol;
  8.1948 -+		nskb->dst = dst_clone(skb->dst);
  8.1949 -+		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
  8.1950 -+		nskb->pkt_type = skb->pkt_type;
  8.1951 -+		nskb->mac_len = skb->mac_len;
  8.1952 -+
  8.1953 -+		skb_reserve(nskb, headroom);
  8.1954 -+		nskb->mac.raw = nskb->data;
  8.1955 -+		nskb->nh.raw = nskb->data + skb->mac_len;
  8.1956 -+		nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
  8.1957 -+		memcpy(skb_put(nskb, doffset), skb->data, doffset);
  8.1958 -+
  8.1959 -+		if (!sg) {
  8.1960 -+			nskb->csum = skb_copy_and_csum_bits(skb, offset,
  8.1961 -+							    skb_put(nskb, len),
  8.1962 -+							    len, 0);
  8.1963 -+			continue;
  8.1964 -+		}
  8.1965 -+
  8.1966 -+		frag = skb_shinfo(nskb)->frags;
  8.1967 -+		k = 0;
  8.1968 -+
  8.1969 -+		nskb->ip_summed = CHECKSUM_HW;
  8.1970 -+		nskb->csum = skb->csum;
  8.1971 -+		memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
  8.1972 -+
  8.1973 -+		while (pos < offset + len) {
  8.1974 -+			BUG_ON(i >= nfrags);
  8.1975 -+
  8.1976 -+			*frag = skb_shinfo(skb)->frags[i];
  8.1977 -+			get_page(frag->page);
  8.1978 -+			size = frag->size;
  8.1979 -+
  8.1980 -+			if (pos < offset) {
  8.1981 -+				frag->page_offset += offset - pos;
  8.1982 -+				frag->size -= offset - pos;
  8.1983 -+			}
  8.1984 -+
  8.1985 -+			k++;
  8.1986 -+
  8.1987 -+			if (pos + size <= offset + len) {
  8.1988 -+				i++;
  8.1989 -+				pos += size;
  8.1990 -+			} else {
  8.1991 -+				frag->size -= pos + size - (offset + len);
  8.1992 -+				break;
  8.1993 -+			}
  8.1994 -+
  8.1995 -+			frag++;
  8.1996 -+		}
  8.1997 -+
  8.1998 -+		skb_shinfo(nskb)->nr_frags = k;
  8.1999 -+		nskb->data_len = len - hsize;
  8.2000 -+		nskb->len += nskb->data_len;
  8.2001 -+		nskb->truesize += nskb->data_len;
  8.2002 -+	} while ((offset += len) < skb->len);
  8.2003 -+
  8.2004 -+	return segs;
  8.2005 -+
  8.2006 -+err:
  8.2007 -+	while ((skb = segs)) {
  8.2008 -+		segs = skb->next;
  8.2009 -+		kfree(skb);
  8.2010 -+	}
  8.2011 -+	return ERR_PTR(err);
  8.2012 -+}
  8.2013 -+
  8.2014 -+EXPORT_SYMBOL_GPL(skb_segment);
  8.2015 -+
  8.2016 - void __init skb_init(void)
  8.2017 - {
  8.2018 - 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
  8.2019 -diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
  8.2020 -index 44bda85..2e3323a 100644
  8.2021 ---- a/net/decnet/dn_nsp_in.c
  8.2022 -+++ b/net/decnet/dn_nsp_in.c
  8.2023 -@@ -801,8 +801,7 @@ got_it:
  8.2024 - 		 * We linearize everything except data segments here.
  8.2025 - 		 */
  8.2026 - 		if (cb->nsp_flags & ~0x60) {
  8.2027 --			if (unlikely(skb_is_nonlinear(skb)) &&
  8.2028 --			    skb_linearize(skb, GFP_ATOMIC) != 0)
  8.2029 -+			if (unlikely(skb_linearize(skb)))
  8.2030 - 				goto free_out;
  8.2031 - 		}
  8.2032 - 
  8.2033 -diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
  8.2034 -index 3407f19..a0a25e0 100644
  8.2035 ---- a/net/decnet/dn_route.c
  8.2036 -+++ b/net/decnet/dn_route.c
  8.2037 -@@ -629,8 +629,7 @@ int dn_route_rcv(struct sk_buff *skb, st
  8.2038 - 			padlen);
  8.2039 - 
  8.2040 -         if (flags & DN_RT_PKT_CNTL) {
  8.2041 --		if (unlikely(skb_is_nonlinear(skb)) &&
  8.2042 --		    skb_linearize(skb, GFP_ATOMIC) != 0)
  8.2043 -+		if (unlikely(skb_linearize(skb)))
  8.2044 - 			goto dump_it;
  8.2045 - 
  8.2046 -                 switch(flags & DN_RT_CNTL_MSK) {
  8.2047 -diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
  8.2048 -index 97c276f..5ba719e 100644
  8.2049 ---- a/net/ipv4/af_inet.c
  8.2050 -+++ b/net/ipv4/af_inet.c
  8.2051 -@@ -68,6 +68,7 @@
  8.2052 -  */
  8.2053 - 
  8.2054 - #include <linux/config.h>
  8.2055 -+#include <linux/err.h>
  8.2056 - #include <linux/errno.h>
  8.2057 - #include <linux/types.h>
  8.2058 - #include <linux/socket.h>
  8.2059 -@@ -1084,6 +1085,54 @@ int inet_sk_rebuild_header(struct sock *
  8.2060 - 
  8.2061 - EXPORT_SYMBOL(inet_sk_rebuild_header);
  8.2062 - 
  8.2063 -+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
  8.2064 -+{
  8.2065 -+	struct sk_buff *segs = ERR_PTR(-EINVAL);
  8.2066 -+	struct iphdr *iph;
  8.2067 -+	struct net_protocol *ops;
  8.2068 -+	int proto;
  8.2069 -+	int ihl;
  8.2070 -+	int id;
  8.2071 -+
  8.2072 -+	if (!pskb_may_pull(skb, sizeof(*iph)))
  8.2073 -+		goto out;
  8.2074 -+
  8.2075 -+	iph = skb->nh.iph;
  8.2076 -+	ihl = iph->ihl * 4;
  8.2077 -+	if (ihl < sizeof(*iph))
  8.2078 -+		goto out;
  8.2079 -+
  8.2080 -+	if (!pskb_may_pull(skb, ihl))
  8.2081 -+		goto out;
  8.2082 -+
  8.2083 -+	skb->h.raw = __skb_pull(skb, ihl);
  8.2084 -+	iph = skb->nh.iph;
  8.2085 -+	id = ntohs(iph->id);
  8.2086 -+	proto = iph->protocol & (MAX_INET_PROTOS - 1);
  8.2087 -+	segs = ERR_PTR(-EPROTONOSUPPORT);
  8.2088 -+
  8.2089 -+	rcu_read_lock();
  8.2090 -+	ops = rcu_dereference(inet_protos[proto]);
  8.2091 -+	if (ops && ops->gso_segment)
  8.2092 -+		segs = ops->gso_segment(skb, features);
  8.2093 -+	rcu_read_unlock();
  8.2094 -+
  8.2095 -+	if (!segs || unlikely(IS_ERR(segs)))
  8.2096 -+		goto out;
  8.2097 -+
  8.2098 -+	skb = segs;
  8.2099 -+	do {
  8.2100 -+		iph = skb->nh.iph;
  8.2101 -+		iph->id = htons(id++);
  8.2102 -+		iph->tot_len = htons(skb->len - skb->mac_len);
  8.2103 -+		iph->check = 0;
  8.2104 -+		iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
  8.2105 -+	} while ((skb = skb->next));
  8.2106 -+
  8.2107 -+out:
  8.2108 -+	return segs;
  8.2109 -+}
  8.2110 -+
  8.2111 - #ifdef CONFIG_IP_MULTICAST
  8.2112 - static struct net_protocol igmp_protocol = {
  8.2113 - 	.handler =	igmp_rcv,
  8.2114 -@@ -1093,6 +1142,7 @@ #endif
  8.2115 - static struct net_protocol tcp_protocol = {
  8.2116 - 	.handler =	tcp_v4_rcv,
  8.2117 - 	.err_handler =	tcp_v4_err,
  8.2118 -+	.gso_segment =	tcp_tso_segment,
  8.2119 - 	.no_policy =	1,
  8.2120 - };
  8.2121 - 
  8.2122 -@@ -1138,6 +1188,7 @@ static int ipv4_proc_init(void);
  8.2123 - static struct packet_type ip_packet_type = {
  8.2124 - 	.type = __constant_htons(ETH_P_IP),
  8.2125 - 	.func = ip_rcv,
  8.2126 -+	.gso_segment = inet_gso_segment,
  8.2127 - };
  8.2128 - 
  8.2129 - static int __init inet_init(void)
  8.2130 -diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
  8.2131 -index 8dcba38..19c3c73 100644
  8.2132 ---- a/net/ipv4/ip_output.c
  8.2133 -+++ b/net/ipv4/ip_output.c
  8.2134 -@@ -210,8 +210,7 @@ #if defined(CONFIG_NETFILTER) && defined
  8.2135 - 		return dst_output(skb);
  8.2136 - 	}
  8.2137 - #endif
  8.2138 --	if (skb->len > dst_mtu(skb->dst) &&
  8.2139 --	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
  8.2140 -+	if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
  8.2141 - 		return ip_fragment(skb, ip_finish_output2);
  8.2142 - 	else
  8.2143 - 		return ip_finish_output2(skb);
  8.2144 -@@ -362,7 +361,7 @@ packet_routed:
  8.2145 - 	}
  8.2146 - 
  8.2147 - 	ip_select_ident_more(iph, &rt->u.dst, sk,
  8.2148 --			     (skb_shinfo(skb)->tso_segs ?: 1) - 1);
  8.2149 -+			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
  8.2150 - 
  8.2151 - 	/* Add an IP checksum. */
  8.2152 - 	ip_send_check(iph);
  8.2153 -@@ -743,7 +742,8 @@ static inline int ip_ufo_append_data(str
  8.2154 - 			       (length - transhdrlen));
  8.2155 - 	if (!err) {
  8.2156 - 		/* specify the length of each IP datagram fragment*/
  8.2157 --		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
  8.2158 -+		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
  8.2159 -+		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
  8.2160 - 		__skb_queue_tail(&sk->sk_write_queue, skb);
  8.2161 - 
  8.2162 - 		return 0;
  8.2163 -@@ -839,7 +839,7 @@ int ip_append_data(struct sock *sk,
  8.2164 - 	 */
  8.2165 - 	if (transhdrlen &&
  8.2166 - 	    length + fragheaderlen <= mtu &&
  8.2167 --	    rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
  8.2168 -+	    rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
  8.2169 - 	    !exthdrlen)
  8.2170 - 		csummode = CHECKSUM_HW;
  8.2171 - 
  8.2172 -@@ -1086,14 +1086,16 @@ ssize_t	ip_append_page(struct sock *sk, 
  8.2173 - 
  8.2174 - 	inet->cork.length += size;
  8.2175 - 	if ((sk->sk_protocol == IPPROTO_UDP) &&
  8.2176 --	    (rt->u.dst.dev->features & NETIF_F_UFO))
  8.2177 --		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
  8.2178 -+	    (rt->u.dst.dev->features & NETIF_F_UFO)) {
  8.2179 -+		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
  8.2180 -+		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
  8.2181 -+	}
  8.2182 - 
  8.2183 - 
  8.2184 - 	while (size > 0) {
  8.2185 - 		int i;
  8.2186 - 
  8.2187 --		if (skb_shinfo(skb)->ufo_size)
  8.2188 -+		if (skb_shinfo(skb)->gso_size)
  8.2189 - 			len = size;
  8.2190 - 		else {
  8.2191 - 
  8.2192 -diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
  8.2193 -index d64e2ec..7494823 100644
  8.2194 ---- a/net/ipv4/ipcomp.c
  8.2195 -+++ b/net/ipv4/ipcomp.c
  8.2196 -@@ -84,7 +84,7 @@ static int ipcomp_input(struct xfrm_stat
  8.2197 -                         struct xfrm_decap_state *decap, struct sk_buff *skb)
  8.2198 - {
  8.2199 - 	u8 nexthdr;
  8.2200 --	int err = 0;
  8.2201 -+	int err = -ENOMEM;
  8.2202 - 	struct iphdr *iph;
  8.2203 - 	union {
  8.2204 - 		struct iphdr	iph;
  8.2205 -@@ -92,11 +92,8 @@ static int ipcomp_input(struct xfrm_stat
  8.2206 - 	} tmp_iph;
  8.2207 - 
  8.2208 - 
  8.2209 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  8.2210 --	    skb_linearize(skb, GFP_ATOMIC) != 0) {
  8.2211 --	    	err = -ENOMEM;
  8.2212 -+	if (skb_linearize_cow(skb))
  8.2213 - 	    	goto out;
  8.2214 --	}
  8.2215 - 
  8.2216 - 	skb->ip_summed = CHECKSUM_NONE;
  8.2217 - 
  8.2218 -@@ -171,10 +168,8 @@ static int ipcomp_output(struct xfrm_sta
  8.2219 - 		goto out_ok;
  8.2220 - 	}
  8.2221 - 
  8.2222 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  8.2223 --	    skb_linearize(skb, GFP_ATOMIC) != 0) {
  8.2224 -+	if (skb_linearize_cow(skb))
  8.2225 - 		goto out_ok;
  8.2226 --	}
  8.2227 - 	
  8.2228 - 	err = ipcomp_compress(x, skb);
  8.2229 - 	iph = skb->nh.iph;
  8.2230 -diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
  8.2231 -index 00aa80e..84130c9 100644
  8.2232 ---- a/net/ipv4/tcp.c
  8.2233 -+++ b/net/ipv4/tcp.c
  8.2234 -@@ -257,6 +257,7 @@ #include <linux/smp_lock.h>
  8.2235 - #include <linux/fs.h>
  8.2236 - #include <linux/random.h>
  8.2237 - #include <linux/bootmem.h>
  8.2238 -+#include <linux/err.h>
  8.2239 - 
  8.2240 - #include <net/icmp.h>
  8.2241 - #include <net/tcp.h>
  8.2242 -@@ -570,7 +571,7 @@ new_segment:
  8.2243 - 		skb->ip_summed = CHECKSUM_HW;
  8.2244 - 		tp->write_seq += copy;
  8.2245 - 		TCP_SKB_CB(skb)->end_seq += copy;
  8.2246 --		skb_shinfo(skb)->tso_segs = 0;
  8.2247 -+		skb_shinfo(skb)->gso_segs = 0;
  8.2248 - 
  8.2249 - 		if (!copied)
  8.2250 - 			TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
  8.2251 -@@ -621,14 +622,10 @@ ssize_t tcp_sendpage(struct socket *sock
  8.2252 - 	ssize_t res;
  8.2253 - 	struct sock *sk = sock->sk;
  8.2254 - 
  8.2255 --#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  8.2256 --
  8.2257 - 	if (!(sk->sk_route_caps & NETIF_F_SG) ||
  8.2258 --	    !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
  8.2259 -+	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
  8.2260 - 		return sock_no_sendpage(sock, page, offset, size, flags);
  8.2261 - 
  8.2262 --#undef TCP_ZC_CSUM_FLAGS
  8.2263 --
  8.2264 - 	lock_sock(sk);
  8.2265 - 	TCP_CHECK_TIMER(sk);
  8.2266 - 	res = do_tcp_sendpages(sk, &page, offset, size, flags);
  8.2267 -@@ -725,9 +722,7 @@ new_segment:
  8.2268 - 				/*
  8.2269 - 				 * Check whether we can use HW checksum.
  8.2270 - 				 */
  8.2271 --				if (sk->sk_route_caps &
  8.2272 --				    (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
  8.2273 --				     NETIF_F_HW_CSUM))
  8.2274 -+				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
  8.2275 - 					skb->ip_summed = CHECKSUM_HW;
  8.2276 - 
  8.2277 - 				skb_entail(sk, tp, skb);
  8.2278 -@@ -823,7 +818,7 @@ new_segment:
  8.2279 - 
  8.2280 - 			tp->write_seq += copy;
  8.2281 - 			TCP_SKB_CB(skb)->end_seq += copy;
  8.2282 --			skb_shinfo(skb)->tso_segs = 0;
  8.2283 -+			skb_shinfo(skb)->gso_segs = 0;
  8.2284 - 
  8.2285 - 			from += copy;
  8.2286 - 			copied += copy;
  8.2287 -@@ -2026,6 +2021,71 @@ int tcp_getsockopt(struct sock *sk, int 
  8.2288 - }
  8.2289 - 
  8.2290 - 
  8.2291 -+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
  8.2292 -+{
  8.2293 -+	struct sk_buff *segs = ERR_PTR(-EINVAL);
  8.2294 -+	struct tcphdr *th;
  8.2295 -+	unsigned thlen;
  8.2296 -+	unsigned int seq;
  8.2297 -+	unsigned int delta;
  8.2298 -+	unsigned int oldlen;
  8.2299 -+	unsigned int len;
  8.2300 -+
  8.2301 -+	if (!pskb_may_pull(skb, sizeof(*th)))
  8.2302 -+		goto out;
  8.2303 -+
  8.2304 -+	th = skb->h.th;
  8.2305 -+	thlen = th->doff * 4;
  8.2306 -+	if (thlen < sizeof(*th))
  8.2307 -+		goto out;
  8.2308 -+
  8.2309 -+	if (!pskb_may_pull(skb, thlen))
  8.2310 -+		goto out;
  8.2311 -+
  8.2312 -+	segs = NULL;
  8.2313 -+	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
  8.2314 -+		goto out;
  8.2315 -+
  8.2316 -+	oldlen = (u16)~skb->len;
  8.2317 -+	__skb_pull(skb, thlen);
  8.2318 -+
  8.2319 -+	segs = skb_segment(skb, features);
  8.2320 -+	if (IS_ERR(segs))
  8.2321 -+		goto out;
  8.2322 -+
  8.2323 -+	len = skb_shinfo(skb)->gso_size;
  8.2324 -+	delta = htonl(oldlen + (thlen + len));
  8.2325 -+
  8.2326 -+	skb = segs;
  8.2327 -+	th = skb->h.th;
  8.2328 -+	seq = ntohl(th->seq);
  8.2329 -+
  8.2330 -+	do {
  8.2331 -+		th->fin = th->psh = 0;
  8.2332 -+
  8.2333 -+		th->check = ~csum_fold(th->check + delta);
  8.2334 -+		if (skb->ip_summed != CHECKSUM_HW)
  8.2335 -+			th->check = csum_fold(csum_partial(skb->h.raw, thlen,
  8.2336 -+							   skb->csum));
  8.2337 -+
  8.2338 -+		seq += len;
  8.2339 -+		skb = skb->next;
  8.2340 -+		th = skb->h.th;
  8.2341 -+
  8.2342 -+		th->seq = htonl(seq);
  8.2343 -+		th->cwr = 0;
  8.2344 -+	} while (skb->next);
  8.2345 -+
  8.2346 -+	delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
  8.2347 -+	th->check = ~csum_fold(th->check + delta);
  8.2348 -+	if (skb->ip_summed != CHECKSUM_HW)
  8.2349 -+		th->check = csum_fold(csum_partial(skb->h.raw, thlen,
  8.2350 -+						   skb->csum));
  8.2351 -+
  8.2352 -+out:
  8.2353 -+	return segs;
  8.2354 -+}
  8.2355 -+
  8.2356 - extern void __skb_cb_too_small_for_tcp(int, int);
  8.2357 - extern struct tcp_congestion_ops tcp_reno;
  8.2358 - 
  8.2359 -diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
  8.2360 -index e9a54ae..defe77a 100644
  8.2361 ---- a/net/ipv4/tcp_input.c
  8.2362 -+++ b/net/ipv4/tcp_input.c
  8.2363 -@@ -1072,7 +1072,7 @@ tcp_sacktag_write_queue(struct sock *sk,
  8.2364 - 				else
  8.2365 - 					pkt_len = (end_seq -
  8.2366 - 						   TCP_SKB_CB(skb)->seq);
  8.2367 --				if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
  8.2368 -+				if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
  8.2369 - 					break;
  8.2370 - 				pcount = tcp_skb_pcount(skb);
  8.2371 - 			}
  8.2372 -diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
  8.2373 -index 310f2e6..ee01f69 100644
  8.2374 ---- a/net/ipv4/tcp_output.c
  8.2375 -+++ b/net/ipv4/tcp_output.c
  8.2376 -@@ -497,15 +497,17 @@ static void tcp_set_skb_tso_segs(struct 
  8.2377 - 		/* Avoid the costly divide in the normal
  8.2378 - 		 * non-TSO case.
  8.2379 - 		 */
  8.2380 --		skb_shinfo(skb)->tso_segs = 1;
  8.2381 --		skb_shinfo(skb)->tso_size = 0;
  8.2382 -+		skb_shinfo(skb)->gso_segs = 1;
  8.2383 -+		skb_shinfo(skb)->gso_size = 0;
  8.2384 -+		skb_shinfo(skb)->gso_type = 0;
  8.2385 - 	} else {
  8.2386 - 		unsigned int factor;
  8.2387 - 
  8.2388 - 		factor = skb->len + (mss_now - 1);
  8.2389 - 		factor /= mss_now;
  8.2390 --		skb_shinfo(skb)->tso_segs = factor;
  8.2391 --		skb_shinfo(skb)->tso_size = mss_now;
  8.2392 -+		skb_shinfo(skb)->gso_segs = factor;
  8.2393 -+		skb_shinfo(skb)->gso_size = mss_now;
  8.2394 -+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  8.2395 - 	}
  8.2396 - }
  8.2397 - 
  8.2398 -@@ -850,7 +852,7 @@ static int tcp_init_tso_segs(struct sock
  8.2399 - 
  8.2400 - 	if (!tso_segs ||
  8.2401 - 	    (tso_segs > 1 &&
  8.2402 --	     skb_shinfo(skb)->tso_size != mss_now)) {
  8.2403 -+	     tcp_skb_mss(skb) != mss_now)) {
  8.2404 - 		tcp_set_skb_tso_segs(sk, skb, mss_now);
  8.2405 - 		tso_segs = tcp_skb_pcount(skb);
  8.2406 - 	}
  8.2407 -@@ -1510,8 +1512,9 @@ int tcp_retransmit_skb(struct sock *sk, 
  8.2408 - 	   tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
  8.2409 - 		if (!pskb_trim(skb, 0)) {
  8.2410 - 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
  8.2411 --			skb_shinfo(skb)->tso_segs = 1;
  8.2412 --			skb_shinfo(skb)->tso_size = 0;
  8.2413 -+			skb_shinfo(skb)->gso_segs = 1;
  8.2414 -+			skb_shinfo(skb)->gso_size = 0;
  8.2415 -+			skb_shinfo(skb)->gso_type = 0;
  8.2416 - 			skb->ip_summed = CHECKSUM_NONE;
  8.2417 - 			skb->csum = 0;
  8.2418 - 		}
  8.2419 -@@ -1716,8 +1719,9 @@ void tcp_send_fin(struct sock *sk)
  8.2420 - 		skb->csum = 0;
  8.2421 - 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
  8.2422 - 		TCP_SKB_CB(skb)->sacked = 0;
  8.2423 --		skb_shinfo(skb)->tso_segs = 1;
  8.2424 --		skb_shinfo(skb)->tso_size = 0;
  8.2425 -+		skb_shinfo(skb)->gso_segs = 1;
  8.2426 -+		skb_shinfo(skb)->gso_size = 0;
  8.2427 -+		skb_shinfo(skb)->gso_type = 0;
  8.2428 - 
  8.2429 - 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
  8.2430 - 		TCP_SKB_CB(skb)->seq = tp->write_seq;
  8.2431 -@@ -1749,8 +1753,9 @@ void tcp_send_active_reset(struct sock *
  8.2432 - 	skb->csum = 0;
  8.2433 - 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
  8.2434 - 	TCP_SKB_CB(skb)->sacked = 0;
  8.2435 --	skb_shinfo(skb)->tso_segs = 1;
  8.2436 --	skb_shinfo(skb)->tso_size = 0;
  8.2437 -+	skb_shinfo(skb)->gso_segs = 1;
  8.2438 -+	skb_shinfo(skb)->gso_size = 0;
  8.2439 -+	skb_shinfo(skb)->gso_type = 0;
  8.2440 - 
  8.2441 - 	/* Send it off. */
  8.2442 - 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
  8.2443 -@@ -1833,8 +1838,9 @@ struct sk_buff * tcp_make_synack(struct 
  8.2444 - 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
  8.2445 - 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
  8.2446 - 	TCP_SKB_CB(skb)->sacked = 0;
  8.2447 --	skb_shinfo(skb)->tso_segs = 1;
  8.2448 --	skb_shinfo(skb)->tso_size = 0;
  8.2449 -+	skb_shinfo(skb)->gso_segs = 1;
  8.2450 -+	skb_shinfo(skb)->gso_size = 0;
  8.2451 -+	skb_shinfo(skb)->gso_type = 0;
  8.2452 - 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
  8.2453 - 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
  8.2454 - 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
  8.2455 -@@ -1937,8 +1943,9 @@ int tcp_connect(struct sock *sk)
  8.2456 - 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
  8.2457 - 	TCP_ECN_send_syn(sk, tp, buff);
  8.2458 - 	TCP_SKB_CB(buff)->sacked = 0;
  8.2459 --	skb_shinfo(buff)->tso_segs = 1;
  8.2460 --	skb_shinfo(buff)->tso_size = 0;
  8.2461 -+	skb_shinfo(buff)->gso_segs = 1;
  8.2462 -+	skb_shinfo(buff)->gso_size = 0;
  8.2463 -+	skb_shinfo(buff)->gso_type = 0;
  8.2464 - 	buff->csum = 0;
  8.2465 - 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
  8.2466 - 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
  8.2467 -@@ -2042,8 +2049,9 @@ void tcp_send_ack(struct sock *sk)
  8.2468 - 		buff->csum = 0;
  8.2469 - 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
  8.2470 - 		TCP_SKB_CB(buff)->sacked = 0;
  8.2471 --		skb_shinfo(buff)->tso_segs = 1;
  8.2472 --		skb_shinfo(buff)->tso_size = 0;
  8.2473 -+		skb_shinfo(buff)->gso_segs = 1;
  8.2474 -+		skb_shinfo(buff)->gso_size = 0;
  8.2475 -+		skb_shinfo(buff)->gso_type = 0;
  8.2476 - 
  8.2477 - 		/* Send it off, this clears delayed acks for us. */
  8.2478 - 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
  8.2479 -@@ -2078,8 +2086,9 @@ static int tcp_xmit_probe_skb(struct soc
  8.2480 - 	skb->csum = 0;
  8.2481 - 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
  8.2482 - 	TCP_SKB_CB(skb)->sacked = urgent;
  8.2483 --	skb_shinfo(skb)->tso_segs = 1;
  8.2484 --	skb_shinfo(skb)->tso_size = 0;
  8.2485 -+	skb_shinfo(skb)->gso_segs = 1;
  8.2486 -+	skb_shinfo(skb)->gso_size = 0;
  8.2487 -+	skb_shinfo(skb)->gso_type = 0;
  8.2488 - 
  8.2489 - 	/* Use a previous sequence.  This should cause the other
  8.2490 - 	 * end to send an ack.  Don't queue or clone SKB, just
  8.2491 -diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
  8.2492 -index 32ad229..737c1db 100644
  8.2493 ---- a/net/ipv4/xfrm4_output.c
  8.2494 -+++ b/net/ipv4/xfrm4_output.c
  8.2495 -@@ -9,6 +9,8 @@
  8.2496 -  */
  8.2497 - 
  8.2498 - #include <linux/compiler.h>
  8.2499 -+#include <linux/if_ether.h>
  8.2500 -+#include <linux/kernel.h>
  8.2501 - #include <linux/skbuff.h>
  8.2502 - #include <linux/spinlock.h>
  8.2503 - #include <linux/netfilter_ipv4.h>
  8.2504 -@@ -152,16 +154,10 @@ error_nolock:
  8.2505 - 	goto out_exit;
  8.2506 - }
  8.2507 - 
  8.2508 --static int xfrm4_output_finish(struct sk_buff *skb)
  8.2509 -+static int xfrm4_output_finish2(struct sk_buff *skb)
  8.2510 - {
  8.2511 - 	int err;
  8.2512 - 
  8.2513 --#ifdef CONFIG_NETFILTER
  8.2514 --	if (!skb->dst->xfrm) {
  8.2515 --		IPCB(skb)->flags |= IPSKB_REROUTED;
  8.2516 --		return dst_output(skb);
  8.2517 --	}
  8.2518 --#endif
  8.2519 - 	while (likely((err = xfrm4_output_one(skb)) == 0)) {
  8.2520 - 		nf_reset(skb);
  8.2521 - 
  8.2522 -@@ -174,7 +170,7 @@ #endif
  8.2523 - 			return dst_output(skb);
  8.2524 - 
  8.2525 - 		err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL,
  8.2526 --			      skb->dst->dev, xfrm4_output_finish);
  8.2527 -+			      skb->dst->dev, xfrm4_output_finish2);
  8.2528 - 		if (unlikely(err != 1))
  8.2529 - 			break;
  8.2530 - 	}
  8.2531 -@@ -182,6 +178,48 @@ #endif
  8.2532 - 	return err;
  8.2533 - }
  8.2534 - 
  8.2535 -+static int xfrm4_output_finish(struct sk_buff *skb)
  8.2536 -+{
  8.2537 -+	struct sk_buff *segs;
  8.2538 -+
  8.2539 -+#ifdef CONFIG_NETFILTER
  8.2540 -+	if (!skb->dst->xfrm) {
  8.2541 -+		IPCB(skb)->flags |= IPSKB_REROUTED;
  8.2542 -+		return dst_output(skb);
  8.2543 -+	}
  8.2544 -+#endif
  8.2545 -+
  8.2546 -+	if (!skb_shinfo(skb)->gso_size)
  8.2547 -+		return xfrm4_output_finish2(skb);
  8.2548 -+
  8.2549 -+	skb->protocol = htons(ETH_P_IP);
  8.2550 -+	segs = skb_gso_segment(skb, 0);
  8.2551 -+	kfree_skb(skb);
  8.2552 -+	if (unlikely(IS_ERR(segs)))
  8.2553 -+		return PTR_ERR(segs);
  8.2554 -+
  8.2555 -+	do {
  8.2556 -+		struct sk_buff *nskb = segs->next;
  8.2557 -+		int err;
  8.2558 -+
  8.2559 -+		segs->next = NULL;
  8.2560 -+		err = xfrm4_output_finish2(segs);
  8.2561 -+
  8.2562 -+		if (unlikely(err)) {
  8.2563 -+			while ((segs = nskb)) {
  8.2564 -+				nskb = segs->next;
  8.2565 -+				segs->next = NULL;
  8.2566 -+				kfree_skb(segs);
  8.2567 -+			}
  8.2568 -+			return err;
  8.2569 -+		}
  8.2570 -+
  8.2571 -+		segs = nskb;
  8.2572 -+	} while (segs);
  8.2573 -+
  8.2574 -+	return 0;
  8.2575 -+}
  8.2576 -+
  8.2577 - int xfrm4_output(struct sk_buff *skb)
  8.2578 - {
  8.2579 - 	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
  8.2580 -diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
  8.2581 -index 5bf70b1..cf5d17e 100644
  8.2582 ---- a/net/ipv6/ip6_output.c
  8.2583 -+++ b/net/ipv6/ip6_output.c
  8.2584 -@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s
  8.2585 - 
  8.2586 - int ip6_output(struct sk_buff *skb)
  8.2587 - {
  8.2588 --	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) ||
  8.2589 -+	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
  8.2590 - 				dst_allfrag(skb->dst))
  8.2591 - 		return ip6_fragment(skb, ip6_output2);
  8.2592 - 	else
  8.2593 -@@ -829,8 +829,9 @@ static inline int ip6_ufo_append_data(st
  8.2594 - 		struct frag_hdr fhdr;
  8.2595 - 
  8.2596 - 		/* specify the length of each IP datagram fragment*/
  8.2597 --		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) - 
  8.2598 --						sizeof(struct frag_hdr);
  8.2599 -+		skb_shinfo(skb)->gso_size = mtu - fragheaderlen - 
  8.2600 -+					    sizeof(struct frag_hdr);
  8.2601 -+		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
  8.2602 - 		ipv6_select_ident(skb, &fhdr);
  8.2603 - 		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  8.2604 - 		__skb_queue_tail(&sk->sk_write_queue, skb);
  8.2605 -diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
  8.2606 -index d511a88..ef56d5d 100644
  8.2607 ---- a/net/ipv6/ipcomp6.c
  8.2608 -+++ b/net/ipv6/ipcomp6.c
  8.2609 -@@ -64,7 +64,7 @@ static LIST_HEAD(ipcomp6_tfms_list);
  8.2610 - 
  8.2611 - static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
  8.2612 - {
  8.2613 --	int err = 0;
  8.2614 -+	int err = -ENOMEM;
  8.2615 - 	u8 nexthdr = 0;
  8.2616 - 	int hdr_len = skb->h.raw - skb->nh.raw;
  8.2617 - 	unsigned char *tmp_hdr = NULL;
  8.2618 -@@ -75,11 +75,8 @@ static int ipcomp6_input(struct xfrm_sta
  8.2619 - 	struct crypto_tfm *tfm;
  8.2620 - 	int cpu;
  8.2621 - 
  8.2622 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  8.2623 --		skb_linearize(skb, GFP_ATOMIC) != 0) {
  8.2624 --		err = -ENOMEM;
  8.2625 -+	if (skb_linearize_cow(skb))
  8.2626 - 		goto out;
  8.2627 --	}
  8.2628 - 
  8.2629 - 	skb->ip_summed = CHECKSUM_NONE;
  8.2630 - 
  8.2631 -@@ -158,10 +155,8 @@ static int ipcomp6_output(struct xfrm_st
  8.2632 - 		goto out_ok;
  8.2633 - 	}
  8.2634 - 
  8.2635 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  8.2636 --		skb_linearize(skb, GFP_ATOMIC) != 0) {
  8.2637 -+	if (skb_linearize_cow(skb))
  8.2638 - 		goto out_ok;
  8.2639 --	}
  8.2640 - 
  8.2641 - 	/* compression */
  8.2642 - 	plen = skb->len - hdr_len;
  8.2643 -diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
  8.2644 -index 8024217..39bdeec 100644
  8.2645 ---- a/net/ipv6/xfrm6_output.c
  8.2646 -+++ b/net/ipv6/xfrm6_output.c
  8.2647 -@@ -151,7 +151,7 @@ error_nolock:
  8.2648 - 	goto out_exit;
  8.2649 - }
  8.2650 - 
  8.2651 --static int xfrm6_output_finish(struct sk_buff *skb)
  8.2652 -+static int xfrm6_output_finish2(struct sk_buff *skb)
  8.2653 - {
  8.2654 - 	int err;
  8.2655 - 
  8.2656 -@@ -167,7 +167,7 @@ static int xfrm6_output_finish(struct sk
  8.2657 - 			return dst_output(skb);
  8.2658 - 
  8.2659 - 		err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL,
  8.2660 --			      skb->dst->dev, xfrm6_output_finish);
  8.2661 -+			      skb->dst->dev, xfrm6_output_finish2);
  8.2662 - 		if (unlikely(err != 1))
  8.2663 - 			break;
  8.2664 - 	}
  8.2665 -@@ -175,6 +175,41 @@ static int xfrm6_output_finish(struct sk
  8.2666 - 	return err;
  8.2667 - }
  8.2668 - 
  8.2669 -+static int xfrm6_output_finish(struct sk_buff *skb)
  8.2670 -+{
  8.2671 -+	struct sk_buff *segs;
  8.2672 -+
  8.2673 -+	if (!skb_shinfo(skb)->gso_size)
  8.2674 -+		return xfrm6_output_finish2(skb);
  8.2675 -+
  8.2676 -+	skb->protocol = htons(ETH_P_IP);
  8.2677 -+	segs = skb_gso_segment(skb, 0);
  8.2678 -+	kfree_skb(skb);
  8.2679 -+	if (unlikely(IS_ERR(segs)))
  8.2680 -+		return PTR_ERR(segs);
  8.2681 -+
  8.2682 -+	do {
  8.2683 -+		struct sk_buff *nskb = segs->next;
  8.2684 -+		int err;
  8.2685 -+
  8.2686 -+		segs->next = NULL;
  8.2687 -+		err = xfrm6_output_finish2(segs);
  8.2688 -+
  8.2689 -+		if (unlikely(err)) {
  8.2690 -+			while ((segs = nskb)) {
  8.2691 -+				nskb = segs->next;
  8.2692 -+				segs->next = NULL;
  8.2693 -+				kfree_skb(segs);
  8.2694 -+			}
  8.2695 -+			return err;
  8.2696 -+		}
  8.2697 -+
  8.2698 -+		segs = nskb;
  8.2699 -+	} while (segs);
  8.2700 -+
  8.2701 -+	return 0;
  8.2702 -+}
  8.2703 -+
  8.2704 - int xfrm6_output(struct sk_buff *skb)
  8.2705 - {
  8.2706 - 	return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dst->dev,
  8.2707 -diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
  8.2708 -index 99ceb91..28c9efd 100644
  8.2709 ---- a/net/sched/sch_generic.c
  8.2710 -+++ b/net/sched/sch_generic.c
  8.2711 -@@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device
  8.2712 -    dev->queue_lock serializes queue accesses for this device
  8.2713 -    AND dev->qdisc pointer itself.
  8.2714 - 
  8.2715 --   dev->xmit_lock serializes accesses to device driver.
  8.2716 -+   netif_tx_lock serializes accesses to device driver.
  8.2717 - 
  8.2718 --   dev->queue_lock and dev->xmit_lock are mutually exclusive,
  8.2719 -+   dev->queue_lock and netif_tx_lock are mutually exclusive,
  8.2720 -    if one is grabbed, another must be free.
  8.2721 -  */
  8.2722 - 
  8.2723 -@@ -90,14 +90,17 @@ void qdisc_unlock_tree(struct net_device
  8.2724 -    NOTE: Called under dev->queue_lock with locally disabled BH.
  8.2725 - */
  8.2726 - 
  8.2727 --int qdisc_restart(struct net_device *dev)
  8.2728 -+static inline int qdisc_restart(struct net_device *dev)
  8.2729 - {
  8.2730 - 	struct Qdisc *q = dev->qdisc;
  8.2731 - 	struct sk_buff *skb;
  8.2732 - 
  8.2733 - 	/* Dequeue packet */
  8.2734 --	if ((skb = q->dequeue(q)) != NULL) {
  8.2735 -+	if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
  8.2736 - 		unsigned nolock = (dev->features & NETIF_F_LLTX);
  8.2737 -+
  8.2738 -+		dev->gso_skb = NULL;
  8.2739 -+
  8.2740 - 		/*
  8.2741 - 		 * When the driver has LLTX set it does its own locking
  8.2742 - 		 * in start_xmit. No need to add additional overhead by
  8.2743 -@@ -108,7 +111,7 @@ int qdisc_restart(struct net_device *dev
  8.2744 - 		 * will be requeued.
  8.2745 - 		 */
  8.2746 - 		if (!nolock) {
  8.2747 --			if (!spin_trylock(&dev->xmit_lock)) {
  8.2748 -+			if (!netif_tx_trylock(dev)) {
  8.2749 - 			collision:
  8.2750 - 				/* So, someone grabbed the driver. */
  8.2751 - 				
  8.2752 -@@ -126,8 +129,6 @@ int qdisc_restart(struct net_device *dev
  8.2753 - 				__get_cpu_var(netdev_rx_stat).cpu_collision++;
  8.2754 - 				goto requeue;
  8.2755 - 			}
  8.2756 --			/* Remember that the driver is grabbed by us. */
  8.2757 --			dev->xmit_lock_owner = smp_processor_id();
  8.2758 - 		}
  8.2759 - 		
  8.2760 - 		{
  8.2761 -@@ -136,14 +137,11 @@ int qdisc_restart(struct net_device *dev
  8.2762 - 
  8.2763 - 			if (!netif_queue_stopped(dev)) {
  8.2764 - 				int ret;
  8.2765 --				if (netdev_nit)
  8.2766 --					dev_queue_xmit_nit(skb, dev);
  8.2767 - 
  8.2768 --				ret = dev->hard_start_xmit(skb, dev);
  8.2769 -+				ret = dev_hard_start_xmit(skb, dev);
  8.2770 - 				if (ret == NETDEV_TX_OK) { 
  8.2771 - 					if (!nolock) {
  8.2772 --						dev->xmit_lock_owner = -1;
  8.2773 --						spin_unlock(&dev->xmit_lock);
  8.2774 -+						netif_tx_unlock(dev);
  8.2775 - 					}
  8.2776 - 					spin_lock(&dev->queue_lock);
  8.2777 - 					return -1;
  8.2778 -@@ -157,8 +155,7 @@ int qdisc_restart(struct net_device *dev
  8.2779 - 			/* NETDEV_TX_BUSY - we need to requeue */
  8.2780 - 			/* Release the driver */
  8.2781 - 			if (!nolock) { 
  8.2782 --				dev->xmit_lock_owner = -1;
  8.2783 --				spin_unlock(&dev->xmit_lock);
  8.2784 -+				netif_tx_unlock(dev);
  8.2785 - 			} 
  8.2786 - 			spin_lock(&dev->queue_lock);
  8.2787 - 			q = dev->qdisc;
  8.2788 -@@ -175,7 +172,10 @@ int qdisc_restart(struct net_device *dev
  8.2789 - 		 */
  8.2790 - 
  8.2791 - requeue:
  8.2792 --		q->ops->requeue(skb, q);
  8.2793 -+		if (skb->next)
  8.2794 -+			dev->gso_skb = skb;
  8.2795 -+		else
  8.2796 -+			q->ops->requeue(skb, q);
  8.2797 - 		netif_schedule(dev);
  8.2798 - 		return 1;
  8.2799 - 	}
  8.2800 -@@ -183,11 +183,23 @@ requeue:
  8.2801 - 	return q->q.qlen;
  8.2802 - }
  8.2803 - 
  8.2804 -+void __qdisc_run(struct net_device *dev)
  8.2805 -+{
  8.2806 -+	if (unlikely(dev->qdisc == &noop_qdisc))
  8.2807 -+		goto out;
  8.2808 -+
  8.2809 -+	while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
  8.2810 -+		/* NOTHING */;
  8.2811 -+
  8.2812 -+out:
  8.2813 -+	clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
  8.2814 -+}
  8.2815 -+
  8.2816 - static void dev_watchdog(unsigned long arg)
  8.2817 - {
  8.2818 - 	struct net_device *dev = (struct net_device *)arg;
  8.2819 - 
  8.2820 --	spin_lock(&dev->xmit_lock);
  8.2821 -+	netif_tx_lock(dev);
  8.2822 - 	if (dev->qdisc != &noop_qdisc) {
  8.2823 - 		if (netif_device_present(dev) &&
  8.2824 - 		    netif_running(dev) &&
  8.2825 -@@ -201,7 +213,7 @@ static void dev_watchdog(unsigned long a
  8.2826 - 				dev_hold(dev);
  8.2827 - 		}
  8.2828 - 	}
  8.2829 --	spin_unlock(&dev->xmit_lock);
  8.2830 -+	netif_tx_unlock(dev);
  8.2831 - 
  8.2832 - 	dev_put(dev);
  8.2833 - }
  8.2834 -@@ -225,17 +237,17 @@ void __netdev_watchdog_up(struct net_dev
  8.2835 - 
  8.2836 - static void dev_watchdog_up(struct net_device *dev)
  8.2837 - {
  8.2838 --	spin_lock_bh(&dev->xmit_lock);
  8.2839 -+	netif_tx_lock_bh(dev);
  8.2840 - 	__netdev_watchdog_up(dev);
  8.2841 --	spin_unlock_bh(&dev->xmit_lock);
  8.2842 -+	netif_tx_unlock_bh(dev);
  8.2843 - }
  8.2844 - 
  8.2845 - static void dev_watchdog_down(struct net_device *dev)
  8.2846 - {
  8.2847 --	spin_lock_bh(&dev->xmit_lock);
  8.2848 -+	netif_tx_lock_bh(dev);
  8.2849 - 	if (del_timer(&dev->watchdog_timer))
  8.2850 - 		__dev_put(dev);
  8.2851 --	spin_unlock_bh(&dev->xmit_lock);
  8.2852 -+	netif_tx_unlock_bh(dev);
  8.2853 - }
  8.2854 - 
  8.2855 - void netif_carrier_on(struct net_device *dev)
  8.2856 -@@ -577,10 +589,17 @@ void dev_deactivate(struct net_device *d
  8.2857 - 
  8.2858 - 	dev_watchdog_down(dev);
  8.2859 - 
  8.2860 --	while (test_bit(__LINK_STATE_SCHED, &dev->state))
  8.2861 -+	/* Wait for outstanding dev_queue_xmit calls. */
  8.2862 -+	synchronize_rcu();
  8.2863 -+
  8.2864 -+	/* Wait for outstanding qdisc_run calls. */
  8.2865 -+	while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
  8.2866 - 		yield();
  8.2867 - 
  8.2868 --	spin_unlock_wait(&dev->xmit_lock);
  8.2869 -+	if (dev->gso_skb) {
  8.2870 -+		kfree_skb(dev->gso_skb);
  8.2871 -+		dev->gso_skb = NULL;
  8.2872 -+	}
  8.2873 - }
  8.2874 - 
  8.2875 - void dev_init_scheduler(struct net_device *dev)
  8.2876 -@@ -622,6 +641,5 @@ EXPORT_SYMBOL(qdisc_create_dflt);
  8.2877 - EXPORT_SYMBOL(qdisc_alloc);
  8.2878 - EXPORT_SYMBOL(qdisc_destroy);
  8.2879 - EXPORT_SYMBOL(qdisc_reset);
  8.2880 --EXPORT_SYMBOL(qdisc_restart);
  8.2881 - EXPORT_SYMBOL(qdisc_lock_tree);
  8.2882 - EXPORT_SYMBOL(qdisc_unlock_tree);
  8.2883 -diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
  8.2884 -index 79b8ef3..4c16ad5 100644
  8.2885 ---- a/net/sched/sch_teql.c
  8.2886 -+++ b/net/sched/sch_teql.c
  8.2887 -@@ -302,20 +302,17 @@ restart:
  8.2888 - 
  8.2889 - 		switch (teql_resolve(skb, skb_res, slave)) {
  8.2890 - 		case 0:
  8.2891 --			if (spin_trylock(&slave->xmit_lock)) {
  8.2892 --				slave->xmit_lock_owner = smp_processor_id();
  8.2893 -+			if (netif_tx_trylock(slave)) {
  8.2894 - 				if (!netif_queue_stopped(slave) &&
  8.2895 - 				    slave->hard_start_xmit(skb, slave) == 0) {
  8.2896 --					slave->xmit_lock_owner = -1;
  8.2897 --					spin_unlock(&slave->xmit_lock);
  8.2898 -+					netif_tx_unlock(slave);
  8.2899 - 					master->slaves = NEXT_SLAVE(q);
  8.2900 - 					netif_wake_queue(dev);
  8.2901 - 					master->stats.tx_packets++;
  8.2902 - 					master->stats.tx_bytes += len;
  8.2903 - 					return 0;
  8.2904 - 				}
  8.2905 --				slave->xmit_lock_owner = -1;
  8.2906 --				spin_unlock(&slave->xmit_lock);
  8.2907 -+				netif_tx_unlock(slave);
  8.2908 - 			}
  8.2909 - 			if (netif_queue_stopped(dev))
  8.2910 - 				busy = 1;
     9.1 --- a/patches/linux-2.6.16.13/net-gso-1-check-dodgy.patch	Tue Sep 19 14:26:22 2006 +0100
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,27 +0,0 @@
     9.4 -diff -urp a/net/ipv4/tcp.c b/net/ipv4/tcp.c
     9.5 ---- a/net/ipv4/tcp.c	2006-07-25 14:42:53.194910626 +0100
     9.6 -+++ b/net/ipv4/tcp.c	2006-07-25 14:41:00.955501910 +0100
     9.7 -@@ -2042,13 +2042,19 @@ struct sk_buff *tcp_tso_segment(struct s
     9.8 - 	if (!pskb_may_pull(skb, thlen))
     9.9 - 		goto out;
    9.10 - 
    9.11 --	segs = NULL;
    9.12 --	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
    9.13 --		goto out;
    9.14 --
    9.15 - 	oldlen = (u16)~skb->len;
    9.16 - 	__skb_pull(skb, thlen);
    9.17 - 
    9.18 -+	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
    9.19 -+		/* Packet is from an untrusted source, reset gso_segs. */
    9.20 -+		int mss = skb_shinfo(skb)->gso_size;
    9.21 -+
    9.22 -+		skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
    9.23 -+
    9.24 -+		segs = NULL;
    9.25 -+		goto out;
    9.26 -+	}
    9.27 -+
    9.28 - 	segs = skb_segment(skb, features);
    9.29 - 	if (IS_ERR(segs))
    9.30 - 		goto out;
    10.1 --- a/patches/linux-2.6.16.13/net-gso-2-checksum-fix.patch	Tue Sep 19 14:26:22 2006 +0100
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,451 +0,0 @@
    10.4 -diff -urp a/drivers/net/bnx2.c b/drivers/net/bnx2.c
    10.5 ---- a/drivers/net/bnx2.c	2006-07-25 14:41:00.905507519 +0100
    10.6 -+++ b/drivers/net/bnx2.c	2006-07-25 14:36:00.288561400 +0100
    10.7 -@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
    10.8 - 		skb = tx_buf->skb;
    10.9 - #ifdef BCM_TSO 
   10.10 - 		/* partial BD completions possible with TSO packets */
   10.11 --		if (skb_shinfo(skb)->gso_size) {
   10.12 -+		if (skb_is_gso(skb)) {
   10.13 - 			u16 last_idx, last_ring_idx;
   10.14 - 
   10.15 - 			last_idx = sw_cons +
   10.16 -diff -urp a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
   10.17 ---- a/drivers/net/chelsio/sge.c	2006-07-25 14:41:00.908507183 +0100
   10.18 -+++ b/drivers/net/chelsio/sge.c	2006-07-25 14:36:00.291561087 +0100
   10.19 -@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
   10.20 - 	struct cpl_tx_pkt *cpl;
   10.21 - 
   10.22 - #ifdef NETIF_F_TSO
   10.23 --	if (skb_shinfo(skb)->gso_size) {
   10.24 -+	if (skb_is_gso(skb)) {
   10.25 - 		int eth_type;
   10.26 - 		struct cpl_tx_pkt_lso *hdr;
   10.27 - 
   10.28 -diff -urp a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
   10.29 ---- a/drivers/net/e1000/e1000_main.c	2006-07-25 14:41:00.910506958 +0100
   10.30 -+++ b/drivers/net/e1000/e1000_main.c	2006-07-25 14:36:00.293560878 +0100
   10.31 -@@ -2526,7 +2526,7 @@ e1000_tso(struct e1000_adapter *adapter,
   10.32 - 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
   10.33 - 	int err;
   10.34 - 
   10.35 --	if (skb_shinfo(skb)->gso_size) {
   10.36 -+	if (skb_is_gso(skb)) {
   10.37 - 		if (skb_header_cloned(skb)) {
   10.38 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   10.39 - 			if (err)
   10.40 -@@ -2651,7 +2651,7 @@ e1000_tx_map(struct e1000_adapter *adapt
   10.41 - 		 * tso gets written back prematurely before the data is fully
   10.42 - 		 * DMAd to the controller */
   10.43 - 		if (!skb->data_len && tx_ring->last_tx_tso &&
   10.44 --				!skb_shinfo(skb)->gso_size) {
   10.45 -+		    !skb_is_gso(skb)) {
   10.46 - 			tx_ring->last_tx_tso = 0;
   10.47 - 			size -= 4;
   10.48 - 		}
   10.49 -@@ -2934,8 +2934,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
   10.50 - 
   10.51 - #ifdef NETIF_F_TSO
   10.52 - 	/* Controller Erratum workaround */
   10.53 --	if (!skb->data_len && tx_ring->last_tx_tso &&
   10.54 --		!skb_shinfo(skb)->gso_size)
   10.55 -+	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
   10.56 - 		count++;
   10.57 - #endif
   10.58 - 
   10.59 -diff -urp a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
   10.60 ---- a/drivers/net/forcedeth.c	2006-07-25 14:41:00.912506734 +0100
   10.61 -+++ b/drivers/net/forcedeth.c	2006-07-25 14:36:00.295560669 +0100
   10.62 -@@ -1105,7 +1105,7 @@ static int nv_start_xmit(struct sk_buff 
   10.63 - 	np->tx_skbuff[nr] = skb;
   10.64 - 
   10.65 - #ifdef NETIF_F_TSO
   10.66 --	if (skb_shinfo(skb)->gso_size)
   10.67 -+	if (skb_is_gso(skb))
   10.68 - 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
   10.69 - 	else
   10.70 - #endif
   10.71 -diff -urp a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
   10.72 ---- a/drivers/net/ixgb/ixgb_main.c	2006-07-25 14:41:00.915506397 +0100
   10.73 -+++ b/drivers/net/ixgb/ixgb_main.c	2006-07-25 14:36:00.298560355 +0100
   10.74 -@@ -1163,7 +1163,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
   10.75 - 	uint16_t ipcse, tucse, mss;
   10.76 - 	int err;
   10.77 - 
   10.78 --	if(likely(skb_shinfo(skb)->gso_size)) {
   10.79 -+	if (likely(skb_is_gso(skb))) {
   10.80 - 		if (skb_header_cloned(skb)) {
   10.81 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   10.82 - 			if (err)
   10.83 -diff -urp a/drivers/net/loopback.c b/drivers/net/loopback.c
   10.84 ---- a/drivers/net/loopback.c	2006-07-25 14:41:00.915506397 +0100
   10.85 -+++ b/drivers/net/loopback.c	2006-07-25 14:36:00.298560355 +0100
   10.86 -@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff 
   10.87 - #endif
   10.88 - 
   10.89 - #ifdef LOOPBACK_TSO
   10.90 --	if (skb_shinfo(skb)->gso_size) {
   10.91 -+	if (skb_is_gso(skb)) {
   10.92 - 		BUG_ON(skb->protocol != htons(ETH_P_IP));
   10.93 - 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
   10.94 - 
   10.95 -diff -urp a/drivers/net/sky2.c b/drivers/net/sky2.c
   10.96 ---- a/drivers/net/sky2.c	2006-07-25 14:41:00.924505388 +0100
   10.97 -+++ b/drivers/net/sky2.c	2006-07-25 14:36:00.306559519 +0100
   10.98 -@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
   10.99 - 	count = sizeof(dma_addr_t) / sizeof(u32);
  10.100 - 	count += skb_shinfo(skb)->nr_frags * count;
  10.101 - 
  10.102 --	if (skb_shinfo(skb)->gso_size)
  10.103 -+	if (skb_is_gso(skb))
  10.104 - 		++count;
  10.105 - 
  10.106 - 	if (skb->ip_summed == CHECKSUM_HW)
  10.107 -diff -urp a/drivers/net/typhoon.c b/drivers/net/typhoon.c
  10.108 ---- a/drivers/net/typhoon.c	2006-07-25 14:41:00.931504603 +0100
  10.109 -+++ b/drivers/net/typhoon.c	2006-07-25 14:36:00.314558683 +0100
  10.110 -@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, st
  10.111 - 	 * If problems develop with TSO, check this first.
  10.112 - 	 */
  10.113 - 	numDesc = skb_shinfo(skb)->nr_frags + 1;
  10.114 --	if(skb_tso_size(skb))
  10.115 -+	if (skb_is_gso(skb))
  10.116 - 		numDesc++;
  10.117 - 
  10.118 - 	/* When checking for free space in the ring, we need to also
  10.119 -@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, st
  10.120 - 				TYPHOON_TX_PF_VLAN_TAG_SHIFT);
  10.121 - 	}
  10.122 - 
  10.123 --	if(skb_tso_size(skb)) {
  10.124 -+	if (skb_is_gso(skb)) {
  10.125 - 		first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
  10.126 - 		first_txd->numDesc++;
  10.127 - 
  10.128 -diff -urp a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
  10.129 ---- a/drivers/s390/net/qeth_main.c	2006-07-25 14:41:00.939503705 +0100
  10.130 -+++ b/drivers/s390/net/qeth_main.c	2006-07-25 14:36:00.321557952 +0100
  10.131 -@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
  10.132 - 	queue = card->qdio.out_qs
  10.133 - 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
  10.134 - 
  10.135 --	if (skb_shinfo(skb)->gso_size)
  10.136 -+	if (skb_is_gso(skb))
  10.137 - 		large_send = card->options.large_send;
  10.138 - 
  10.139 - 	/*are we able to do TSO ? If so ,prepare and send it from here */
  10.140 -@@ -4501,8 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
  10.141 - 		card->stats.tx_packets++;
  10.142 - 		card->stats.tx_bytes += skb->len;
  10.143 - #ifdef CONFIG_QETH_PERF_STATS
  10.144 --		if (skb_shinfo(skb)->gso_size &&
  10.145 --		   !(large_send == QETH_LARGE_SEND_NO)) {
  10.146 -+		if (skb_is_gso(skb) && !(large_send == QETH_LARGE_SEND_NO)) {
  10.147 - 			card->perf_stats.large_send_bytes += skb->len;
  10.148 - 			card->perf_stats.large_send_cnt++;
  10.149 - 		}
  10.150 -diff -urp a/include/linux/netdevice.h b/include/linux/netdevice.h
  10.151 ---- a/include/linux/netdevice.h	2006-07-25 14:41:00.940503593 +0100
  10.152 -+++ b/include/linux/netdevice.h	2006-07-25 14:36:00.323557743 +0100
  10.153 -@@ -541,6 +541,7 @@ struct packet_type {
  10.154 - 					 struct net_device *);
  10.155 - 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
  10.156 - 						int features);
  10.157 -+	int			(*gso_send_check)(struct sk_buff *skb);
  10.158 - 	void			*af_packet_priv;
  10.159 - 	struct list_head	list;
  10.160 - };
  10.161 -@@ -1001,14 +1002,15 @@ extern void linkwatch_run_queue(void);
  10.162 - 
  10.163 - static inline int skb_gso_ok(struct sk_buff *skb, int features)
  10.164 - {
  10.165 --	int feature = skb_shinfo(skb)->gso_size ?
  10.166 --		      skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
  10.167 -+	int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT;
  10.168 - 	return (features & feature) == feature;
  10.169 - }
  10.170 - 
  10.171 - static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  10.172 - {
  10.173 --	return !skb_gso_ok(skb, dev->features);
  10.174 -+	return skb_is_gso(skb) &&
  10.175 -+	       (!skb_gso_ok(skb, dev->features) ||
  10.176 -+		unlikely(skb->ip_summed != CHECKSUM_HW));
  10.177 - }
  10.178 - 
  10.179 - #endif /* __KERNEL__ */
  10.180 -diff -urp a/include/linux/skbuff.h b/include/linux/skbuff.h
  10.181 ---- a/include/linux/skbuff.h	2006-07-25 14:41:00.941503481 +0100
  10.182 -+++ b/include/linux/skbuff.h	2006-07-25 14:36:00.323557743 +0100
  10.183 -@@ -1403,5 +1403,10 @@ static inline void nf_bridge_get(struct 
  10.184 - static inline void nf_reset(struct sk_buff *skb) {}
  10.185 - #endif /* CONFIG_NETFILTER */
  10.186 - 
  10.187 -+static inline int skb_is_gso(const struct sk_buff *skb)
  10.188 -+{
  10.189 -+	return skb_shinfo(skb)->gso_size;
  10.190 -+}
  10.191 -+
  10.192 - #endif	/* __KERNEL__ */
  10.193 - #endif	/* _LINUX_SKBUFF_H */
  10.194 -diff -urp a/include/net/protocol.h b/include/net/protocol.h
  10.195 ---- a/include/net/protocol.h	2006-07-25 14:41:00.942503369 +0100
  10.196 -+++ b/include/net/protocol.h	2006-07-25 14:36:00.324557639 +0100
  10.197 -@@ -37,6 +37,7 @@
  10.198 - struct net_protocol {
  10.199 - 	int			(*handler)(struct sk_buff *skb);
  10.200 - 	void			(*err_handler)(struct sk_buff *skb, u32 info);
  10.201 -+	int			(*gso_send_check)(struct sk_buff *skb);
  10.202 - 	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
  10.203 - 					       int features);
  10.204 - 	int			no_policy;
  10.205 -diff -urp a/include/net/tcp.h b/include/net/tcp.h
  10.206 ---- a/include/net/tcp.h	2006-07-25 14:41:00.943503256 +0100
  10.207 -+++ b/include/net/tcp.h	2006-07-25 14:36:00.325557534 +0100
  10.208 -@@ -1063,6 +1063,7 @@ extern struct request_sock_ops tcp_reque
  10.209 - 
  10.210 - extern int tcp_v4_destroy_sock(struct sock *sk);
  10.211 - 
  10.212 -+extern int tcp_v4_gso_send_check(struct sk_buff *skb);
  10.213 - extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
  10.214 - 
  10.215 - #ifdef CONFIG_PROC_FS
  10.216 -diff -urp a/net/bridge/br_forward.c b/net/bridge/br_forward.c
  10.217 ---- a/net/bridge/br_forward.c	2006-07-25 14:41:00.944503144 +0100
  10.218 -+++ b/net/bridge/br_forward.c	2006-07-25 14:36:00.326557430 +0100
  10.219 -@@ -32,7 +32,7 @@ static inline int should_deliver(const s
  10.220 - int br_dev_queue_push_xmit(struct sk_buff *skb)
  10.221 - {
  10.222 - 	/* drop mtu oversized packets except tso */
  10.223 --	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
  10.224 -+	if (skb->len > skb->dev->mtu && !skb_is_gso(skb))
  10.225 - 		kfree_skb(skb);
  10.226 - 	else {
  10.227 - #ifdef CONFIG_BRIDGE_NETFILTER
  10.228 -diff -urp a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
  10.229 ---- a/net/bridge/br_netfilter.c	2006-07-25 14:41:00.945503032 +0100
  10.230 -+++ b/net/bridge/br_netfilter.c	2006-07-25 14:36:00.327557325 +0100
  10.231 -@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
  10.232 - {
  10.233 - 	if (skb->protocol == htons(ETH_P_IP) &&
  10.234 - 	    skb->len > skb->dev->mtu &&
  10.235 --	    !skb_shinfo(skb)->gso_size)
  10.236 -+	    !skb_is_gso(skb))
  10.237 - 		return ip_fragment(skb, br_dev_queue_push_xmit);
  10.238 - 	else
  10.239 - 		return br_dev_queue_push_xmit(skb);
  10.240 -diff -urp a/net/core/dev.c b/net/core/dev.c
  10.241 ---- a/net/core/dev.c	2006-07-25 14:41:00.947502808 +0100
  10.242 -+++ b/net/core/dev.c	2006-07-25 14:36:00.329557116 +0100
  10.243 -@@ -1083,9 +1083,17 @@ int skb_checksum_help(struct sk_buff *sk
  10.244 - 	unsigned int csum;
  10.245 - 	int ret = 0, offset = skb->h.raw - skb->data;
  10.246 - 
  10.247 --	if (inward) {
  10.248 --		skb->ip_summed = CHECKSUM_NONE;
  10.249 --		goto out;
  10.250 -+	if (inward)
  10.251 -+		goto out_set_summed;
  10.252 -+
  10.253 -+	if (unlikely(skb_shinfo(skb)->gso_size)) {
  10.254 -+		static int warned;
  10.255 -+
  10.256 -+		WARN_ON(!warned);
  10.257 -+		warned = 1;
  10.258 -+
  10.259 -+		/* Let GSO fix up the checksum. */
  10.260 -+		goto out_set_summed;
  10.261 - 	}
  10.262 - 
  10.263 - 	if (skb_cloned(skb)) {
  10.264 -@@ -1102,6 +1110,8 @@ int skb_checksum_help(struct sk_buff *sk
  10.265 - 	BUG_ON(skb->csum + 2 > offset);
  10.266 - 
  10.267 - 	*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
  10.268 -+
  10.269 -+out_set_summed:
  10.270 - 	skb->ip_summed = CHECKSUM_NONE;
  10.271 - out:	
  10.272 - 	return ret;
  10.273 -@@ -1122,17 +1132,35 @@ struct sk_buff *skb_gso_segment(struct s
  10.274 - 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  10.275 - 	struct packet_type *ptype;
  10.276 - 	int type = skb->protocol;
  10.277 -+	int err;
  10.278 - 
  10.279 - 	BUG_ON(skb_shinfo(skb)->frag_list);
  10.280 --	BUG_ON(skb->ip_summed != CHECKSUM_HW);
  10.281 - 
  10.282 - 	skb->mac.raw = skb->data;
  10.283 - 	skb->mac_len = skb->nh.raw - skb->data;
  10.284 - 	__skb_pull(skb, skb->mac_len);
  10.285 - 
  10.286 -+	if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
  10.287 -+		static int warned;
  10.288 -+
  10.289 -+		WARN_ON(!warned);
  10.290 -+		warned = 1;
  10.291 -+
  10.292 -+		if (skb_header_cloned(skb) &&
  10.293 -+		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
  10.294 -+			return ERR_PTR(err);
  10.295 -+	}
  10.296 -+
  10.297 - 	rcu_read_lock();
  10.298 - 	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
  10.299 - 		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
  10.300 -+			if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
  10.301 -+				err = ptype->gso_send_check(skb);
  10.302 -+				segs = ERR_PTR(err);
  10.303 -+				if (err || skb_gso_ok(skb, features))
  10.304 -+					break;
  10.305 -+				__skb_push(skb, skb->data - skb->nh.raw);
  10.306 -+			}
  10.307 - 			segs = ptype->gso_segment(skb, features);
  10.308 - 			break;
  10.309 - 		}
  10.310 -diff -urp a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
  10.311 ---- a/net/ipv4/af_inet.c	2006-07-25 14:41:00.952502247 +0100
  10.312 -+++ b/net/ipv4/af_inet.c	2006-07-25 14:36:00.334556594 +0100
  10.313 -@@ -1085,6 +1085,40 @@ int inet_sk_rebuild_header(struct sock *
  10.314 - 
  10.315 - EXPORT_SYMBOL(inet_sk_rebuild_header);
  10.316 - 
  10.317 -+static int inet_gso_send_check(struct sk_buff *skb)
  10.318 -+{
  10.319 -+	struct iphdr *iph;
  10.320 -+	struct net_protocol *ops;
  10.321 -+	int proto;
  10.322 -+	int ihl;
  10.323 -+	int err = -EINVAL;
  10.324 -+
  10.325 -+	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
  10.326 -+		goto out;
  10.327 -+
  10.328 -+	iph = skb->nh.iph;
  10.329 -+	ihl = iph->ihl * 4;
  10.330 -+	if (ihl < sizeof(*iph))
  10.331 -+		goto out;
  10.332 -+
  10.333 -+	if (unlikely(!pskb_may_pull(skb, ihl)))
  10.334 -+		goto out;
  10.335 -+
  10.336 -+	skb->h.raw = __skb_pull(skb, ihl);
  10.337 -+	iph = skb->nh.iph;
  10.338 -+	proto = iph->protocol & (MAX_INET_PROTOS - 1);
  10.339 -+	err = -EPROTONOSUPPORT;
  10.340 -+
  10.341 -+	rcu_read_lock();
  10.342 -+	ops = rcu_dereference(inet_protos[proto]);
  10.343 -+	if (likely(ops && ops->gso_send_check))
  10.344 -+		err = ops->gso_send_check(skb);
  10.345 -+	rcu_read_unlock();
  10.346 -+
  10.347 -+out:
  10.348 -+	return err;
  10.349 -+}
  10.350 -+
  10.351 - static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
  10.352 - {
  10.353 - 	struct sk_buff *segs = ERR_PTR(-EINVAL);
  10.354 -@@ -1142,6 +1176,7 @@ static struct net_protocol igmp_protocol
  10.355 - static struct net_protocol tcp_protocol = {
  10.356 - 	.handler =	tcp_v4_rcv,
  10.357 - 	.err_handler =	tcp_v4_err,
  10.358 -+	.gso_send_check = tcp_v4_gso_send_check,
  10.359 - 	.gso_segment =	tcp_tso_segment,
  10.360 - 	.no_policy =	1,
  10.361 - };
  10.362 -@@ -1188,6 +1223,7 @@ static int ipv4_proc_init(void);
  10.363 - static struct packet_type ip_packet_type = {
  10.364 - 	.type = __constant_htons(ETH_P_IP),
  10.365 - 	.func = ip_rcv,
  10.366 -+	.gso_send_check = inet_gso_send_check,
  10.367 - 	.gso_segment = inet_gso_segment,
  10.368 - };
  10.369 - 
  10.370 -diff -urp a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
  10.371 ---- a/net/ipv4/ip_output.c	2006-07-25 14:41:00.953502135 +0100
  10.372 -+++ b/net/ipv4/ip_output.c	2006-07-25 14:36:00.335556489 +0100
  10.373 -@@ -210,7 +210,7 @@ static inline int ip_finish_output(struc
  10.374 - 		return dst_output(skb);
  10.375 - 	}
  10.376 - #endif
  10.377 --	if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
  10.378 -+	if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
  10.379 - 		return ip_fragment(skb, ip_finish_output2);
  10.380 - 	else
  10.381 - 		return ip_finish_output2(skb);
  10.382 -@@ -1095,7 +1095,7 @@ ssize_t	ip_append_page(struct sock *sk, 
  10.383 - 	while (size > 0) {
  10.384 - 		int i;
  10.385 - 
  10.386 --		if (skb_shinfo(skb)->gso_size)
  10.387 -+		if (skb_is_gso(skb))
  10.388 - 			len = size;
  10.389 - 		else {
  10.390 - 
  10.391 -diff -urp a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
  10.392 ---- a/net/ipv4/tcp_ipv4.c	2006-07-25 14:39:15.985080788 +0100
  10.393 -+++ b/net/ipv4/tcp_ipv4.c	2006-07-25 14:36:00.339556071 +0100
  10.394 -@@ -495,6 +495,24 @@ void tcp_v4_send_check(struct sock *sk, 
  10.395 - 	}
  10.396 - }
  10.397 - 
  10.398 -+int tcp_v4_gso_send_check(struct sk_buff *skb)
  10.399 -+{
  10.400 -+	struct iphdr *iph;
  10.401 -+	struct tcphdr *th;
  10.402 -+
  10.403 -+	if (!pskb_may_pull(skb, sizeof(*th)))
  10.404 -+		return -EINVAL;
  10.405 -+
  10.406 -+	iph = skb->nh.iph;
  10.407 -+	th = skb->h.th;
  10.408 -+
  10.409 -+	th->check = 0;
  10.410 -+	th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
  10.411 -+	skb->csum = offsetof(struct tcphdr, check);
  10.412 -+	skb->ip_summed = CHECKSUM_HW;
  10.413 -+	return 0;
  10.414 -+}
  10.415 -+
  10.416 - /*
  10.417 -  *	This routine will send an RST to the other tcp.
  10.418 -  *
  10.419 -diff -urp a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
  10.420 ---- a/net/ipv4/xfrm4_output.c	2006-07-25 14:41:00.958501574 +0100
  10.421 -+++ b/net/ipv4/xfrm4_output.c	2006-07-25 14:36:00.341555862 +0100
  10.422 -@@ -189,7 +189,7 @@ static int xfrm4_output_finish(struct sk
  10.423 - 	}
  10.424 - #endif
  10.425 - 
  10.426 --	if (!skb_shinfo(skb)->gso_size)
  10.427 -+	if (!skb_is_gso(skb))
  10.428 - 		return xfrm4_output_finish2(skb);
  10.429 - 
  10.430 - 	skb->protocol = htons(ETH_P_IP);
  10.431 -diff -urp a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
  10.432 ---- a/net/ipv6/ip6_output.c	2006-07-25 14:41:00.959501461 +0100
  10.433 -+++ b/net/ipv6/ip6_output.c	2006-07-25 14:36:00.341555862 +0100
  10.434 -@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s
  10.435 - 
  10.436 - int ip6_output(struct sk_buff *skb)
  10.437 - {
  10.438 --	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
  10.439 -+	if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
  10.440 - 				dst_allfrag(skb->dst))
  10.441 - 		return ip6_fragment(skb, ip6_output2);
  10.442 - 	else
  10.443 -diff -urp a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
  10.444 ---- a/net/ipv6/xfrm6_output.c	2006-07-25 14:41:00.960501349 +0100
  10.445 -+++ b/net/ipv6/xfrm6_output.c	2006-07-25 14:36:00.342555758 +0100
  10.446 -@@ -179,7 +179,7 @@ static int xfrm6_output_finish(struct sk
  10.447 - {
  10.448 - 	struct sk_buff *segs;
  10.449 - 
  10.450 --	if (!skb_shinfo(skb)->gso_size)
  10.451 -+	if (!skb_is_gso(skb))
  10.452 - 		return xfrm6_output_finish2(skb);
  10.453 - 
  10.454 - 	skb->protocol = htons(ETH_P_IP);
    11.1 --- a/patches/linux-2.6.16.13/net-gso-3-fix-errorcheck.patch	Tue Sep 19 14:26:22 2006 +0100
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,17 +0,0 @@
    11.4 -diff -urp a/include/linux/netdevice.h b/include/linux/netdevice.h
    11.5 ---- a/include/linux/netdevice.h	2006-07-25 15:16:39.314333975 +0100
    11.6 -+++ b/include/linux/netdevice.h	2006-07-25 15:19:37.298320799 +0100
    11.7 -@@ -930,10 +930,10 @@ static inline void netif_tx_lock_bh(stru
    11.8 - 
    11.9 - static inline int netif_tx_trylock(struct net_device *dev)
   11.10 - {
   11.11 --	int err = spin_trylock(&dev->_xmit_lock);
   11.12 --	if (!err)
   11.13 -+	int ok = spin_trylock(&dev->_xmit_lock);
   11.14 -+	if (likely(ok))
   11.15 - 		dev->xmit_lock_owner = smp_processor_id();
   11.16 --	return err;
   11.17 -+	return ok;
   11.18 - }
   11.19 - 
   11.20 - static inline void netif_tx_unlock(struct net_device *dev)
    12.1 --- a/patches/linux-2.6.16.13/net-gso-4-kill-warnon.patch	Tue Sep 19 14:26:22 2006 +0100
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,29 +0,0 @@
    12.4 -508c578140642a641bb9b888369719c510ae2a00
    12.5 -diff --git a/net/core/dev.c b/net/core/dev.c
    12.6 -index e814a89..240773b 100644
    12.7 ---- a/net/core/dev.c
    12.8 -+++ b/net/core/dev.c
    12.9 -@@ -1087,11 +1087,6 @@ int skb_checksum_help(struct sk_buff *sk
   12.10 - 		goto out_set_summed;
   12.11 - 
   12.12 - 	if (unlikely(skb_shinfo(skb)->gso_size)) {
   12.13 --		static int warned;
   12.14 --
   12.15 --		WARN_ON(!warned);
   12.16 --		warned = 1;
   12.17 --
   12.18 - 		/* Let GSO fix up the checksum. */
   12.19 - 		goto out_set_summed;
   12.20 - 	}
   12.21 -@@ -1141,11 +1136,6 @@ struct sk_buff *skb_gso_segment(struct s
   12.22 - 	__skb_pull(skb, skb->mac_len);
   12.23 - 
   12.24 - 	if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
   12.25 --		static int warned;
   12.26 --
   12.27 --		WARN_ON(!warned);
   12.28 --		warned = 1;
   12.29 --
   12.30 - 		if (skb_header_cloned(skb) &&
   12.31 - 		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
   12.32 - 			return ERR_PTR(err);
    13.1 --- a/patches/linux-2.6.16.13/pmd-shared.patch	Tue Sep 19 14:26:22 2006 +0100
    13.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.3 @@ -1,111 +0,0 @@
    13.4 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/mm/pageattr.c ./arch/i386/mm/pageattr.c
    13.5 ---- ../pristine-linux-2.6.16.13/arch/i386/mm/pageattr.c	2006-05-02 22:38:44.000000000 +0100
    13.6 -+++ ./arch/i386/mm/pageattr.c	2006-05-04 17:41:40.000000000 +0100
    13.7 -@@ -78,7 +78,7 @@ static void set_pmd_pte(pte_t *kpte, uns
    13.8 - 	unsigned long flags;
    13.9 - 
   13.10 - 	set_pte_atomic(kpte, pte); 	/* change init_mm */
   13.11 --	if (PTRS_PER_PMD > 1)
   13.12 -+	if (HAVE_SHARED_KERNEL_PMD)
   13.13 - 		return;
   13.14 - 
   13.15 - 	spin_lock_irqsave(&pgd_lock, flags);
   13.16 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/mm/pgtable.c ./arch/i386/mm/pgtable.c
   13.17 ---- ../pristine-linux-2.6.16.13/arch/i386/mm/pgtable.c	2006-05-02 22:38:44.000000000 +0100
   13.18 -+++ ./arch/i386/mm/pgtable.c	2006-05-04 17:41:40.000000000 +0100
   13.19 -@@ -215,9 +215,10 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
   13.20 - 		spin_lock_irqsave(&pgd_lock, flags);
   13.21 - 	}
   13.22 - 
   13.23 --	clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
   13.24 --			swapper_pg_dir + USER_PTRS_PER_PGD,
   13.25 --			KERNEL_PGD_PTRS);
   13.26 -+	if (PTRS_PER_PMD == 1 || HAVE_SHARED_KERNEL_PMD)
   13.27 -+		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
   13.28 -+				swapper_pg_dir + USER_PTRS_PER_PGD,
   13.29 -+				KERNEL_PGD_PTRS);
   13.30 - 	if (PTRS_PER_PMD > 1)
   13.31 - 		return;
   13.32 - 
   13.33 -@@ -249,6 +250,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
   13.34 - 			goto out_oom;
   13.35 - 		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
   13.36 - 	}
   13.37 -+
   13.38 -+	if (!HAVE_SHARED_KERNEL_PMD) {
   13.39 -+		unsigned long flags;
   13.40 -+
   13.41 -+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   13.42 -+			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
   13.43 -+			if (!pmd)
   13.44 -+				goto out_oom;
   13.45 -+			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
   13.46 -+		}
   13.47 -+
   13.48 -+		spin_lock_irqsave(&pgd_lock, flags);
   13.49 -+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   13.50 -+			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
   13.51 -+			pgd_t *kpgd = pgd_offset_k(v);
   13.52 -+			pud_t *kpud = pud_offset(kpgd, v);
   13.53 -+			pmd_t *kpmd = pmd_offset(kpud, v);
   13.54 -+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   13.55 -+			memcpy(pmd, kpmd, PAGE_SIZE);
   13.56 -+		}
   13.57 -+		pgd_list_add(pgd);
   13.58 -+		spin_unlock_irqrestore(&pgd_lock, flags);
   13.59 -+	}
   13.60 -+
   13.61 - 	return pgd;
   13.62 - 
   13.63 - out_oom:
   13.64 -@@ -263,9 +288,23 @@ void pgd_free(pgd_t *pgd)
   13.65 - 	int i;
   13.66 - 
   13.67 - 	/* in the PAE case user pgd entries are overwritten before usage */
   13.68 --	if (PTRS_PER_PMD > 1)
   13.69 --		for (i = 0; i < USER_PTRS_PER_PGD; ++i)
   13.70 --			kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
   13.71 -+	if (PTRS_PER_PMD > 1) {
   13.72 -+		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
   13.73 -+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   13.74 -+			kmem_cache_free(pmd_cache, pmd);
   13.75 -+		}
   13.76 -+		if (!HAVE_SHARED_KERNEL_PMD) {
   13.77 -+			unsigned long flags;
   13.78 -+			spin_lock_irqsave(&pgd_lock, flags);
   13.79 -+			pgd_list_del(pgd);
   13.80 -+			spin_unlock_irqrestore(&pgd_lock, flags);
   13.81 -+			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   13.82 -+				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   13.83 -+				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
   13.84 -+				kmem_cache_free(pmd_cache, pmd);
   13.85 -+			}
   13.86 -+		}
   13.87 -+	}
   13.88 - 	/* in the non-PAE case, free_pgtables() clears user pgd entries */
   13.89 - 	kmem_cache_free(pgd_cache, pgd);
   13.90 - }
   13.91 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-i386/pgtable-2level-defs.h ./include/asm-i386/pgtable-2level-defs.h
   13.92 ---- ../pristine-linux-2.6.16.13/include/asm-i386/pgtable-2level-defs.h	2006-05-02 22:38:44.000000000 +0100
   13.93 -+++ ./include/asm-i386/pgtable-2level-defs.h	2006-05-04 17:41:40.000000000 +0100
   13.94 -@@ -1,6 +1,8 @@
   13.95 - #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
   13.96 - #define _I386_PGTABLE_2LEVEL_DEFS_H
   13.97 - 
   13.98 -+#define HAVE_SHARED_KERNEL_PMD 0
   13.99 -+
  13.100 - /*
  13.101 -  * traditional i386 two-level paging structure:
  13.102 -  */
  13.103 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-i386/pgtable-3level-defs.h ./include/asm-i386/pgtable-3level-defs.h
  13.104 ---- ../pristine-linux-2.6.16.13/include/asm-i386/pgtable-3level-defs.h	2006-05-02 22:38:44.000000000 +0100
  13.105 -+++ ./include/asm-i386/pgtable-3level-defs.h	2006-05-04 17:41:40.000000000 +0100
  13.106 -@@ -1,6 +1,8 @@
  13.107 - #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
  13.108 - #define _I386_PGTABLE_3LEVEL_DEFS_H
  13.109 - 
  13.110 -+#define HAVE_SHARED_KERNEL_PMD 1
  13.111 -+
  13.112 - /*
  13.113 -  * PGDIR_SHIFT determines what a top-level page table entry can map
  13.114 -  */
    14.1 --- a/patches/linux-2.6.16.13/rcu_needs_cpu.patch	Tue Sep 19 14:26:22 2006 +0100
    14.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.3 @@ -1,33 +0,0 @@
    14.4 ---- ../pristine-linux-2.6.16.13/kernel/rcupdate.c	2006-05-02 22:38:44.000000000 +0100
    14.5 -+++ ./kernel/rcupdate.c	2006-06-09 20:27:45.000000000 +0100
    14.6 -@@ -485,6 +485,20 @@ int rcu_pending(int cpu)
    14.7 - 		__rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
    14.8 - }
    14.9 - 
   14.10 -+/*
   14.11 -+ * Check to see if any future RCU-related work will need to be done
   14.12 -+ * by the current CPU, even if none need be done immediately, returning
   14.13 -+ * 1 if so.  This function is part of the RCU implementation; it is -not-
   14.14 -+ * an exported member of the RCU API.
   14.15 -+ */
   14.16 -+int rcu_needs_cpu(int cpu)
   14.17 -+{
   14.18 -+	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
   14.19 -+	struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
   14.20 -+
   14.21 -+	return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
   14.22 -+}
   14.23 -+
   14.24 - void rcu_check_callbacks(int cpu, int user)
   14.25 - {
   14.26 - 	if (user || 
   14.27 ---- ../pristine-linux-2.6.16.13/include/linux/rcupdate.h	2006-05-02 22:38:44.000000000 +0100
   14.28 -+++ ./include/linux/rcupdate.h	2006-06-09 20:28:57.000000000 +0100
   14.29 -@@ -134,6 +134,7 @@ static inline void rcu_bh_qsctr_inc(int 
   14.30 - }
   14.31 - 
   14.32 - extern int rcu_pending(int cpu);
   14.33 -+extern int rcu_needs_cpu(int cpu);
   14.34 - 
   14.35 - /**
   14.36 -  * rcu_read_lock - mark the beginning of an RCU read-side critical section.
    15.1 --- a/patches/linux-2.6.16.13/rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch	Tue Sep 19 14:26:22 2006 +0100
    15.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.3 @@ -1,30 +0,0 @@
    15.4 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/kernel/entry.S ./arch/i386/kernel/entry.S
    15.5 ---- ../pristine-linux-2.6.16.13/arch/i386/kernel/entry.S	2006-05-02 22:38:44.000000000 +0100
    15.6 -+++ ./arch/i386/kernel/entry.S	2006-05-04 17:41:44.000000000 +0100
    15.7 -@@ -177,7 +177,7 @@ need_resched:
    15.8 - 
    15.9 - 	# sysenter call handler stub
   15.10 - ENTRY(sysenter_entry)
   15.11 --	movl TSS_sysenter_esp0(%esp),%esp
   15.12 -+	movl SYSENTER_stack_esp0(%esp),%esp
   15.13 - sysenter_past_esp:
   15.14 - 	sti
   15.15 - 	pushl $(__USER_DS)
   15.16 -@@ -492,7 +492,7 @@ device_not_available_emulate:
   15.17 -  * that sets up the real kernel stack. Check here, since we can't
   15.18 -  * allow the wrong stack to be used.
   15.19 -  *
   15.20 -- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
   15.21 -+ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
   15.22 -  * already pushed 3 words if it hits on the sysenter instruction:
   15.23 -  * eflags, cs and eip.
   15.24 -  *
   15.25 -@@ -504,7 +504,7 @@ device_not_available_emulate:
   15.26 - 	cmpw $__KERNEL_CS,4(%esp);		\
   15.27 - 	jne ok;					\
   15.28 - label:						\
   15.29 --	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
   15.30 -+	movl SYSENTER_stack_esp0+offset(%esp),%esp;	\
   15.31 - 	pushfl;					\
   15.32 - 	pushl $__KERNEL_CS;			\
   15.33 - 	pushl $sysenter_past_esp
    16.1 --- a/patches/linux-2.6.16.13/series	Tue Sep 19 14:26:22 2006 +0100
    16.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.3 @@ -1,23 +0,0 @@
    16.4 -blktap-aio-16_03_06.patch
    16.5 -device_bind.patch
    16.6 -fix-hz-suspend.patch
    16.7 -fix-ide-cd-pio-mode.patch
    16.8 -i386-mach-io-check-nmi.patch
    16.9 -ipv6-no-autoconf.patch
   16.10 -net-csum.patch
   16.11 -net-gso-0-base.patch
   16.12 -net-gso-1-check-dodgy.patch
   16.13 -net-gso-2-checksum-fix.patch
   16.14 -net-gso-3-fix-errorcheck.patch
   16.15 -net-gso-4-kill-warnon.patch
   16.16 -pmd-shared.patch
   16.17 -rcu_needs_cpu.patch
   16.18 -rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch
   16.19 -smp-alts.patch
   16.20 -tpm_plugin_2.6.17.patch
   16.21 -x86-increase-interrupt-vector-range.patch
   16.22 -xen-hotplug.patch
   16.23 -xenoprof-generic.patch
   16.24 -x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch
   16.25 -x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch
   16.26 -x86-elfnote-as-preprocessor-macro.patch
    17.1 --- a/patches/linux-2.6.16.13/smp-alts.patch	Tue Sep 19 14:26:22 2006 +0100
    17.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.3 @@ -1,591 +0,0 @@
    17.4 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/Kconfig ./arch/i386/Kconfig
    17.5 ---- ../pristine-linux-2.6.16.13/arch/i386/Kconfig	2006-05-02 22:38:44.000000000 +0100
    17.6 -+++ ./arch/i386/Kconfig	2006-05-04 17:41:45.000000000 +0100
    17.7 -@@ -202,6 +202,19 @@ config SMP
    17.8 - 
    17.9 - 	  If you don't know what to do here, say N.
   17.10 - 
   17.11 -+config SMP_ALTERNATIVES
   17.12 -+	bool "SMP alternatives support (EXPERIMENTAL)"
   17.13 -+	depends on SMP && EXPERIMENTAL
   17.14 -+	help
   17.15 -+	  Try to reduce the overhead of running an SMP kernel on a uniprocessor
   17.16 -+	  host slightly by replacing certain key instruction sequences
   17.17 -+	  according to whether we currently have more than one CPU available.
   17.18 -+	  This should provide a noticeable boost to performance when
   17.19 -+	  running SMP kernels on UP machines, and have negligible impact
   17.20 -+	  when running on an true SMP host.
   17.21 -+
   17.22 -+          If unsure, say N.
   17.23 -+	  
   17.24 - config NR_CPUS
   17.25 - 	int "Maximum number of CPUs (2-255)"
   17.26 - 	range 2 255
   17.27 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/kernel/Makefile ./arch/i386/kernel/Makefile
   17.28 ---- ../pristine-linux-2.6.16.13/arch/i386/kernel/Makefile	2006-05-02 22:38:44.000000000 +0100
   17.29 -+++ ./arch/i386/kernel/Makefile	2006-05-04 17:41:45.000000000 +0100
   17.30 -@@ -37,6 +37,7 @@ obj-$(CONFIG_EFI) 		+= efi.o efi_stub.o
   17.31 - obj-$(CONFIG_DOUBLEFAULT) 	+= doublefault.o
   17.32 - obj-$(CONFIG_VM86)		+= vm86.o
   17.33 - obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
   17.34 -+obj-$(CONFIG_SMP_ALTERNATIVES)  += smpalts.o
   17.35 - 
   17.36 - EXTRA_AFLAGS   := -traditional
   17.37 - 
   17.38 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/kernel/smpalts.c ./arch/i386/kernel/smpalts.c
   17.39 ---- ../pristine-linux-2.6.16.13/arch/i386/kernel/smpalts.c	1970-01-01 01:00:00.000000000 +0100
   17.40 -+++ ./arch/i386/kernel/smpalts.c	2006-05-04 17:41:45.000000000 +0100
   17.41 -@@ -0,0 +1,85 @@
   17.42 -+#include <linux/kernel.h>
   17.43 -+#include <asm/system.h>
   17.44 -+#include <asm/smp_alt.h>
   17.45 -+#include <asm/processor.h>
   17.46 -+#include <asm/string.h>
   17.47 -+
   17.48 -+struct smp_replacement_record {
   17.49 -+	unsigned char targ_size;
   17.50 -+	unsigned char smp1_size;
   17.51 -+	unsigned char smp2_size;
   17.52 -+	unsigned char up_size;
   17.53 -+	unsigned char feature;
   17.54 -+	unsigned char data[0];
   17.55 -+};
   17.56 -+
   17.57 -+struct smp_alternative_record {
   17.58 -+	void *targ_start;
   17.59 -+	struct smp_replacement_record *repl;
   17.60 -+};
   17.61 -+
   17.62 -+extern struct smp_alternative_record __start_smp_alternatives_table,
   17.63 -+  __stop_smp_alternatives_table;
   17.64 -+extern unsigned long __init_begin, __init_end;
   17.65 -+
   17.66 -+void prepare_for_smp(void)
   17.67 -+{
   17.68 -+	struct smp_alternative_record *r;
   17.69 -+	printk(KERN_INFO "Enabling SMP...\n");
   17.70 -+	for (r = &__start_smp_alternatives_table;
   17.71 -+	     r != &__stop_smp_alternatives_table;
   17.72 -+	     r++) {
   17.73 -+		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
   17.74 -+		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
   17.75 -+		BUG_ON(r->repl->targ_size < r->repl->up_size);
   17.76 -+               if (system_state == SYSTEM_RUNNING &&
   17.77 -+                   r->targ_start >= (void *)&__init_begin &&
   17.78 -+                   r->targ_start < (void *)&__init_end)
   17.79 -+                       continue;
   17.80 -+		if (r->repl->feature != (unsigned char)-1 &&
   17.81 -+		    boot_cpu_has(r->repl->feature)) {
   17.82 -+			memcpy(r->targ_start,
   17.83 -+			       r->repl->data + r->repl->smp1_size,
   17.84 -+			       r->repl->smp2_size);
   17.85 -+			memset(r->targ_start + r->repl->smp2_size,
   17.86 -+			       0x90,
   17.87 -+			       r->repl->targ_size - r->repl->smp2_size);
   17.88 -+		} else {
   17.89 -+			memcpy(r->targ_start,
   17.90 -+			       r->repl->data,
   17.91 -+			       r->repl->smp1_size);
   17.92 -+			memset(r->targ_start + r->repl->smp1_size,
   17.93 -+			       0x90,
   17.94 -+			       r->repl->targ_size - r->repl->smp1_size);
   17.95 -+		}
   17.96 -+	}
   17.97 -+	/* Paranoia */
   17.98 -+	asm volatile ("jmp 1f\n1:");
   17.99 -+	mb();
  17.100 -+}
  17.101 -+
  17.102 -+void unprepare_for_smp(void)
  17.103 -+{
  17.104 -+	struct smp_alternative_record *r;
  17.105 -+	printk(KERN_INFO "Disabling SMP...\n");
  17.106 -+	for (r = &__start_smp_alternatives_table;
  17.107 -+	     r != &__stop_smp_alternatives_table;
  17.108 -+	     r++) {
  17.109 -+		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
  17.110 -+		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
  17.111 -+		BUG_ON(r->repl->targ_size < r->repl->up_size);
  17.112 -+               if (system_state == SYSTEM_RUNNING &&
  17.113 -+                   r->targ_start >= (void *)&__init_begin &&
  17.114 -+                   r->targ_start < (void *)&__init_end)
  17.115 -+                       continue;
  17.116 -+		memcpy(r->targ_start,
  17.117 -+		       r->repl->data + r->repl->smp1_size + r->repl->smp2_size,
  17.118 -+		       r->repl->up_size);
  17.119 -+		memset(r->targ_start + r->repl->up_size,
  17.120 -+		       0x90,
  17.121 -+		       r->repl->targ_size - r->repl->up_size);
  17.122 -+	}
  17.123 -+	/* Paranoia */
  17.124 -+	asm volatile ("jmp 1f\n1:");
  17.125 -+	mb();
  17.126 -+}
  17.127 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/kernel/smpboot.c ./arch/i386/kernel/smpboot.c
  17.128 ---- ../pristine-linux-2.6.16.13/arch/i386/kernel/smpboot.c	2006-05-02 22:38:44.000000000 +0100
  17.129 -+++ ./arch/i386/kernel/smpboot.c	2006-05-04 17:41:45.000000000 +0100
  17.130 -@@ -1218,6 +1218,11 @@ static void __init smp_boot_cpus(unsigne
  17.131 - 		if (max_cpus <= cpucount+1)
  17.132 - 			continue;
  17.133 - 
  17.134 -+#ifdef CONFIG_SMP_ALTERNATIVES
  17.135 -+		if (kicked == 1)
  17.136 -+			prepare_for_smp();
  17.137 -+#endif
  17.138 -+
  17.139 - 		if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
  17.140 - 			printk("CPU #%d not responding - cannot use it.\n",
  17.141 - 								apicid);
  17.142 -@@ -1396,6 +1401,11 @@ int __devinit __cpu_up(unsigned int cpu)
  17.143 - 		return -EIO;
  17.144 - 	}
  17.145 - 
  17.146 -+#ifdef CONFIG_SMP_ALTERNATIVES
  17.147 -+	if (num_online_cpus() == 1)
  17.148 -+		prepare_for_smp();
  17.149 -+#endif
  17.150 -+
  17.151 - 	local_irq_enable();
  17.152 - 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
  17.153 - 	/* Unleash the CPU! */
  17.154 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/kernel/vmlinux.lds.S ./arch/i386/kernel/vmlinux.lds.S
  17.155 ---- ../pristine-linux-2.6.16.13/arch/i386/kernel/vmlinux.lds.S	2006-05-02 22:38:44.000000000 +0100
  17.156 -+++ ./arch/i386/kernel/vmlinux.lds.S	2006-05-04 17:41:45.000000000 +0100
  17.157 -@@ -34,6 +34,13 @@ SECTIONS
  17.158 -   __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
  17.159 -   __stop___ex_table = .;
  17.160 - 
  17.161 -+  . = ALIGN(16);
  17.162 -+  __start_smp_alternatives_table = .;
  17.163 -+  __smp_alternatives : { *(__smp_alternatives) }
  17.164 -+  __stop_smp_alternatives_table = .;
  17.165 -+
  17.166 -+  __smp_replacements : { *(__smp_replacements) }
  17.167 -+
  17.168 -   RODATA
  17.169 - 
  17.170 -   /* writeable */
  17.171 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-i386/atomic.h ./include/asm-i386/atomic.h
  17.172 ---- ../pristine-linux-2.6.16.13/include/asm-i386/atomic.h	2006-05-02 22:38:44.000000000 +0100
  17.173 -+++ ./include/asm-i386/atomic.h	2006-05-04 17:41:45.000000000 +0100
  17.174 -@@ -4,18 +4,13 @@
  17.175 - #include <linux/config.h>
  17.176 - #include <linux/compiler.h>
  17.177 - #include <asm/processor.h>
  17.178 -+#include <asm/smp_alt.h>
  17.179 - 
  17.180 - /*
  17.181 -  * Atomic operations that C can't guarantee us.  Useful for
  17.182 -  * resource counting etc..
  17.183 -  */
  17.184 - 
  17.185 --#ifdef CONFIG_SMP
  17.186 --#define LOCK "lock ; "
  17.187 --#else
  17.188 --#define LOCK ""
  17.189 --#endif
  17.190 --
  17.191 - /*
  17.192 -  * Make sure gcc doesn't try to be clever and move things around
  17.193 -  * on us. We need to use _exactly_ the address the user gave us,
  17.194 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-i386/bitops.h ./include/asm-i386/bitops.h
  17.195 ---- ../pristine-linux-2.6.16.13/include/asm-i386/bitops.h	2006-05-02 22:38:44.000000000 +0100
  17.196 -+++ ./include/asm-i386/bitops.h	2006-05-04 17:41:45.000000000 +0100
  17.197 -@@ -7,6 +7,7 @@
  17.198 - 
  17.199 - #include <linux/config.h>
  17.200 - #include <linux/compiler.h>
  17.201 -+#include <asm/smp_alt.h>
  17.202 - 
  17.203 - /*
  17.204 -  * These have to be done with inline assembly: that way the bit-setting
  17.205 -@@ -16,12 +17,6 @@
  17.206 -  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  17.207 -  */
  17.208 - 
  17.209 --#ifdef CONFIG_SMP
  17.210 --#define LOCK_PREFIX "lock ; "
  17.211 --#else
  17.212 --#define LOCK_PREFIX ""
  17.213 --#endif
  17.214 --
  17.215 - #define ADDR (*(volatile long *) addr)
  17.216 - 
  17.217 - /**
  17.218 -@@ -41,7 +36,7 @@
  17.219 -  */
  17.220 - static inline void set_bit(int nr, volatile unsigned long * addr)
  17.221 - {
  17.222 --	__asm__ __volatile__( LOCK_PREFIX
  17.223 -+	__asm__ __volatile__( LOCK
  17.224 - 		"btsl %1,%0"
  17.225 - 		:"+m" (ADDR)
  17.226 - 		:"Ir" (nr));
  17.227 -@@ -76,7 +71,7 @@ static inline void __set_bit(int nr, vol
  17.228 -  */
  17.229 - static inline void clear_bit(int nr, volatile unsigned long * addr)
  17.230 - {
  17.231 --	__asm__ __volatile__( LOCK_PREFIX
  17.232 -+	__asm__ __volatile__( LOCK
  17.233 - 		"btrl %1,%0"
  17.234 - 		:"+m" (ADDR)
  17.235 - 		:"Ir" (nr));
  17.236 -@@ -121,7 +116,7 @@ static inline void __change_bit(int nr, 
  17.237 -  */
  17.238 - static inline void change_bit(int nr, volatile unsigned long * addr)
  17.239 - {
  17.240 --	__asm__ __volatile__( LOCK_PREFIX
  17.241 -+	__asm__ __volatile__( LOCK
  17.242 - 		"btcl %1,%0"
  17.243 - 		:"+m" (ADDR)
  17.244 - 		:"Ir" (nr));
  17.245 -@@ -140,7 +135,7 @@ static inline int test_and_set_bit(int n
  17.246 - {
  17.247 - 	int oldbit;
  17.248 - 
  17.249 --	__asm__ __volatile__( LOCK_PREFIX
  17.250 -+	__asm__ __volatile__( LOCK
  17.251 - 		"btsl %2,%1\n\tsbbl %0,%0"
  17.252 - 		:"=r" (oldbit),"+m" (ADDR)
  17.253 - 		:"Ir" (nr) : "memory");
  17.254 -@@ -180,7 +175,7 @@ static inline int test_and_clear_bit(int
  17.255 - {
  17.256 - 	int oldbit;
  17.257 - 
  17.258 --	__asm__ __volatile__( LOCK_PREFIX
  17.259 -+	__asm__ __volatile__( LOCK
  17.260 - 		"btrl %2,%1\n\tsbbl %0,%0"
  17.261 - 		:"=r" (oldbit),"+m" (ADDR)
  17.262 - 		:"Ir" (nr) : "memory");
  17.263 -@@ -231,7 +226,7 @@ static inline int test_and_change_bit(in
  17.264 - {
  17.265 - 	int oldbit;
  17.266 - 
  17.267 --	__asm__ __volatile__( LOCK_PREFIX
  17.268 -+	__asm__ __volatile__( LOCK
  17.269 - 		"btcl %2,%1\n\tsbbl %0,%0"
  17.270 - 		:"=r" (oldbit),"+m" (ADDR)
  17.271 - 		:"Ir" (nr) : "memory");
  17.272 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-i386/futex.h ./include/asm-i386/futex.h
  17.273 ---- ../pristine-linux-2.6.16.13/include/asm-i386/futex.h	2006-05-02 22:38:44.000000000 +0100
  17.274 -+++ ./include/asm-i386/futex.h	2006-05-04 17:41:45.000000000 +0100
  17.275 -@@ -28,7 +28,7 @@
  17.276 - "1:	movl	%2, %0\n\
  17.277 - 	movl	%0, %3\n"					\
  17.278 - 	insn "\n"						\
  17.279 --"2:	" LOCK_PREFIX "cmpxchgl %3, %2\n\
  17.280 -+"2:	" LOCK "cmpxchgl %3, %2\n\
  17.281 - 	jnz	1b\n\
  17.282 - 3:	.section .fixup,\"ax\"\n\
  17.283 - 4:	mov	%5, %1\n\
  17.284 -@@ -68,7 +68,7 @@ futex_atomic_op_inuser (int encoded_op, 
  17.285 - #endif
  17.286 - 		switch (op) {
  17.287 - 		case FUTEX_OP_ADD:
  17.288 --			__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret,
  17.289 -+			__futex_atomic_op1(LOCK "xaddl %0, %2", ret,
  17.290 - 					   oldval, uaddr, oparg);
  17.291 - 			break;
  17.292 - 		case FUTEX_OP_OR:
  17.293 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-i386/rwsem.h ./include/asm-i386/rwsem.h
  17.294 ---- ../pristine-linux-2.6.16.13/include/asm-i386/rwsem.h	2006-05-02 22:38:44.000000000 +0100
  17.295 -+++ ./include/asm-i386/rwsem.h	2006-05-04 17:41:45.000000000 +0100
  17.296 -@@ -40,6 +40,7 @@
  17.297 - 
  17.298 - #include <linux/list.h>
  17.299 - #include <linux/spinlock.h>
  17.300 -+#include <asm/smp_alt.h>
  17.301 - 
  17.302 - struct rwsem_waiter;
  17.303 - 
  17.304 -@@ -99,7 +100,7 @@ static inline void __down_read(struct rw
  17.305 - {
  17.306 - 	__asm__ __volatile__(
  17.307 - 		"# beginning down_read\n\t"
  17.308 --LOCK_PREFIX	"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
  17.309 -+LOCK	        "  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
  17.310 - 		"  js        2f\n\t" /* jump if we weren't granted the lock */
  17.311 - 		"1:\n\t"
  17.312 - 		LOCK_SECTION_START("")
  17.313 -@@ -130,7 +131,7 @@ static inline int __down_read_trylock(st
  17.314 - 		"  movl	     %1,%2\n\t"
  17.315 - 		"  addl      %3,%2\n\t"
  17.316 - 		"  jle	     2f\n\t"
  17.317 --LOCK_PREFIX	"  cmpxchgl  %2,%0\n\t"
  17.318 -+LOCK	        "  cmpxchgl  %2,%0\n\t"
  17.319 - 		"  jnz	     1b\n\t"
  17.320 - 		"2:\n\t"
  17.321 - 		"# ending __down_read_trylock\n\t"
  17.322 -@@ -150,7 +151,7 @@ static inline void __down_write(struct r
  17.323 - 	tmp = RWSEM_ACTIVE_WRITE_BIAS;
  17.324 - 	__asm__ __volatile__(
  17.325 - 		"# beginning down_write\n\t"
  17.326 --LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
  17.327 -+LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
  17.328 - 		"  testl     %%edx,%%edx\n\t" /* was the count 0 before? */
  17.329 - 		"  jnz       2f\n\t" /* jump if we weren't granted the lock */
  17.330 - 		"1:\n\t"
  17.331 -@@ -188,7 +189,7 @@ static inline void __up_read(struct rw_s
  17.332 - 	__s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
  17.333 - 	__asm__ __volatile__(
  17.334 - 		"# beginning __up_read\n\t"
  17.335 --LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
  17.336 -+LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
  17.337 - 		"  js        2f\n\t" /* jump if the lock is being waited upon */
  17.338 - 		"1:\n\t"
  17.339 - 		LOCK_SECTION_START("")
  17.340 -@@ -214,7 +215,7 @@ static inline void __up_write(struct rw_
  17.341 - 	__asm__ __volatile__(
  17.342 - 		"# beginning __up_write\n\t"
  17.343 - 		"  movl      %2,%%edx\n\t"
  17.344 --LOCK_PREFIX	"  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
  17.345 -+LOCK	        "  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
  17.346 - 		"  jnz       2f\n\t" /* jump if the lock is being waited upon */
  17.347 - 		"1:\n\t"
  17.348 - 		LOCK_SECTION_START("")
  17.349 -@@ -239,7 +240,7 @@ static inline void __downgrade_write(str
  17.350 - {
  17.351 - 	__asm__ __volatile__(
  17.352 - 		"# beginning __downgrade_write\n\t"
  17.353 --LOCK_PREFIX	"  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
  17.354 -+LOCK	        "  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
  17.355 - 		"  js        2f\n\t" /* jump if the lock is being waited upon */
  17.356 - 		"1:\n\t"
  17.357 - 		LOCK_SECTION_START("")
  17.358 -@@ -263,7 +264,7 @@ LOCK_PREFIX	"  addl      %2,(%%eax)\n\t"
  17.359 - static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  17.360 - {
  17.361 - 	__asm__ __volatile__(
  17.362 --LOCK_PREFIX	"addl %1,%0"
  17.363 -+LOCK	          "addl %1,%0"
  17.364 - 		: "=m"(sem->count)
  17.365 - 		: "ir"(delta), "m"(sem->count));
  17.366 - }
  17.367 -@@ -276,7 +277,7 @@ static inline int rwsem_atomic_update(in
  17.368 - 	int tmp = delta;
  17.369 - 
  17.370 - 	__asm__ __volatile__(
  17.371 --LOCK_PREFIX	"xadd %0,(%2)"
  17.372 -+LOCK  	          "xadd %0,(%2)"
  17.373 - 		: "+r"(tmp), "=m"(sem->count)
  17.374 - 		: "r"(sem), "m"(sem->count)
  17.375 - 		: "memory");
  17.376 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-i386/smp_alt.h ./include/asm-i386/smp_alt.h
  17.377 ---- ../pristine-linux-2.6.16.13/include/asm-i386/smp_alt.h	1970-01-01 01:00:00.000000000 +0100
  17.378 -+++ ./include/asm-i386/smp_alt.h	2006-05-04 17:41:45.000000000 +0100
  17.379 -@@ -0,0 +1,32 @@
  17.380 -+#ifndef __ASM_SMP_ALT_H__
  17.381 -+#define __ASM_SMP_ALT_H__
  17.382 -+
  17.383 -+#include <linux/config.h>
  17.384 -+
  17.385 -+#ifdef CONFIG_SMP
  17.386 -+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
  17.387 -+#define LOCK \
  17.388 -+        "6677: nop\n" \
  17.389 -+	".section __smp_alternatives,\"a\"\n" \
  17.390 -+	".long 6677b\n" \
  17.391 -+	".long 6678f\n" \
  17.392 -+	".previous\n" \
  17.393 -+	".section __smp_replacements,\"a\"\n" \
  17.394 -+	"6678: .byte 1\n" \
  17.395 -+	".byte 1\n" \
  17.396 -+	".byte 0\n" \
  17.397 -+        ".byte 1\n" \
  17.398 -+	".byte -1\n" \
  17.399 -+	"lock\n" \
  17.400 -+	"nop\n" \
  17.401 -+	".previous\n"
  17.402 -+void prepare_for_smp(void);
  17.403 -+void unprepare_for_smp(void);
  17.404 -+#else
  17.405 -+#define LOCK "lock ; "
  17.406 -+#endif
  17.407 -+#else
  17.408 -+#define LOCK ""
  17.409 -+#endif
  17.410 -+
  17.411 -+#endif /* __ASM_SMP_ALT_H__ */
  17.412 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-i386/spinlock.h ./include/asm-i386/spinlock.h
  17.413 ---- ../pristine-linux-2.6.16.13/include/asm-i386/spinlock.h	2006-05-02 22:38:44.000000000 +0100
  17.414 -+++ ./include/asm-i386/spinlock.h	2006-05-04 17:41:45.000000000 +0100
  17.415 -@@ -6,6 +6,7 @@
  17.416 - #include <asm/page.h>
  17.417 - #include <linux/config.h>
  17.418 - #include <linux/compiler.h>
  17.419 -+#include <asm/smp_alt.h>
  17.420 - 
  17.421 - /*
  17.422 -  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  17.423 -@@ -23,7 +24,8 @@
  17.424 - 
  17.425 - #define __raw_spin_lock_string \
  17.426 - 	"\n1:\t" \
  17.427 --	"lock ; decb %0\n\t" \
  17.428 -+	LOCK \
  17.429 -+	"decb %0\n\t" \
  17.430 - 	"jns 3f\n" \
  17.431 - 	"2:\t" \
  17.432 - 	"rep;nop\n\t" \
  17.433 -@@ -34,7 +36,8 @@
  17.434 - 
  17.435 - #define __raw_spin_lock_string_flags \
  17.436 - 	"\n1:\t" \
  17.437 --	"lock ; decb %0\n\t" \
  17.438 -+	LOCK \
  17.439 -+	"decb %0\n\t" \
  17.440 - 	"jns 4f\n\t" \
  17.441 - 	"2:\t" \
  17.442 - 	"testl $0x200, %1\n\t" \
  17.443 -@@ -65,10 +68,34 @@ static inline void __raw_spin_lock_flags
  17.444 - static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  17.445 - {
  17.446 - 	char oldval;
  17.447 -+#ifdef CONFIG_SMP_ALTERNATIVES
  17.448 - 	__asm__ __volatile__(
  17.449 --		"xchgb %b0,%1"
  17.450 -+		"1:movb %1,%b0\n"
  17.451 -+		"movb $0,%1\n"
  17.452 -+		"2:"
  17.453 -+		".section __smp_alternatives,\"a\"\n"
  17.454 -+		".long 1b\n"
  17.455 -+		".long 3f\n"
  17.456 -+		".previous\n"
  17.457 -+		".section __smp_replacements,\"a\"\n"
  17.458 -+		"3: .byte 2b - 1b\n"
  17.459 -+		".byte 5f-4f\n"
  17.460 -+		".byte 0\n"
  17.461 -+		".byte 6f-5f\n"
  17.462 -+		".byte -1\n"
  17.463 -+		"4: xchgb %b0,%1\n"
  17.464 -+		"5: movb %1,%b0\n"
  17.465 -+		"movb $0,%1\n"
  17.466 -+		"6:\n"
  17.467 -+		".previous\n"
  17.468 - 		:"=q" (oldval), "=m" (lock->slock)
  17.469 - 		:"0" (0) : "memory");
  17.470 -+#else
  17.471 -+	__asm__ __volatile__(
  17.472 -+		"xchgb %b0,%1\n"
  17.473 -+		:"=q" (oldval), "=m" (lock->slock)
  17.474 -+		:"0" (0) : "memory");
  17.475 -+#endif
  17.476 - 	return oldval > 0;
  17.477 - }
  17.478 - 
  17.479 -@@ -178,12 +205,12 @@ static inline int __raw_write_trylock(ra
  17.480 - 
  17.481 - static inline void __raw_read_unlock(raw_rwlock_t *rw)
  17.482 - {
  17.483 --	asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
  17.484 -+	asm volatile(LOCK "incl %0" :"=m" (rw->lock) : : "memory");
  17.485 - }
  17.486 - 
  17.487 - static inline void __raw_write_unlock(raw_rwlock_t *rw)
  17.488 - {
  17.489 --	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
  17.490 -+	asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ", %0"
  17.491 - 				 : "=m" (rw->lock) : : "memory");
  17.492 - }
  17.493 - 
  17.494 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-i386/system.h ./include/asm-i386/system.h
  17.495 ---- ../pristine-linux-2.6.16.13/include/asm-i386/system.h	2006-05-02 22:38:44.000000000 +0100
  17.496 -+++ ./include/asm-i386/system.h	2006-05-04 17:41:45.000000000 +0100
  17.497 -@@ -5,7 +5,7 @@
  17.498 - #include <linux/kernel.h>
  17.499 - #include <asm/segment.h>
  17.500 - #include <asm/cpufeature.h>
  17.501 --#include <linux/bitops.h> /* for LOCK_PREFIX */
  17.502 -+#include <asm/smp_alt.h>
  17.503 - 
  17.504 - #ifdef __KERNEL__
  17.505 - 
  17.506 -@@ -271,19 +271,19 @@ static inline unsigned long __cmpxchg(vo
  17.507 - 	unsigned long prev;
  17.508 - 	switch (size) {
  17.509 - 	case 1:
  17.510 --		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
  17.511 -+		__asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
  17.512 - 				     : "=a"(prev)
  17.513 - 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
  17.514 - 				     : "memory");
  17.515 - 		return prev;
  17.516 - 	case 2:
  17.517 --		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
  17.518 -+		__asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
  17.519 - 				     : "=a"(prev)
  17.520 - 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
  17.521 - 				     : "memory");
  17.522 - 		return prev;
  17.523 - 	case 4:
  17.524 --		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
  17.525 -+		__asm__ __volatile__(LOCK "cmpxchgl %1,%2"
  17.526 - 				     : "=a"(prev)
  17.527 - 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
  17.528 - 				     : "memory");
  17.529 -@@ -336,7 +336,7 @@ static inline unsigned long long __cmpxc
  17.530 - 				      unsigned long long new)
  17.531 - {
  17.532 - 	unsigned long long prev;
  17.533 --	__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
  17.534 -+	__asm__ __volatile__(LOCK "cmpxchg8b %3"
  17.535 - 			     : "=A"(prev)
  17.536 - 			     : "b"((unsigned long)new),
  17.537 - 			       "c"((unsigned long)(new >> 32)),
  17.538 -@@ -503,11 +503,55 @@ struct alt_instr { 
  17.539 - #endif
  17.540 - 
  17.541 - #ifdef CONFIG_SMP
  17.542 -+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
  17.543 -+#define smp_alt_mb(instr)                                           \
  17.544 -+__asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
  17.545 -+		     ".section __smp_alternatives,\"a\"\n"          \
  17.546 -+		     ".long 6667b\n"                                \
  17.547 -+                     ".long 6673f\n"                                \
  17.548 -+		     ".previous\n"                                  \
  17.549 -+		     ".section __smp_replacements,\"a\"\n"          \
  17.550 -+		     "6673:.byte 6668b-6667b\n"                     \
  17.551 -+		     ".byte 6670f-6669f\n"                          \
  17.552 -+		     ".byte 6671f-6670f\n"                          \
  17.553 -+                     ".byte 0\n"                                    \
  17.554 -+		     ".byte %c0\n"                                  \
  17.555 -+		     "6669:lock;addl $0,0(%%esp)\n"                 \
  17.556 -+		     "6670:" instr "\n"                             \
  17.557 -+		     "6671:\n"                                      \
  17.558 -+		     ".previous\n"                                  \
  17.559 -+		     :                                              \
  17.560 -+		     : "i" (X86_FEATURE_XMM2)                       \
  17.561 -+		     : "memory")
  17.562 -+#define smp_rmb() smp_alt_mb("lfence")
  17.563 -+#define smp_mb()  smp_alt_mb("mfence")
  17.564 -+#define set_mb(var, value) do {                                     \
  17.565 -+unsigned long __set_mb_temp;                                        \
  17.566 -+__asm__ __volatile__("6667:movl %1, %0\n6668:\n"                    \
  17.567 -+		     ".section __smp_alternatives,\"a\"\n"          \
  17.568 -+		     ".long 6667b\n"                                \
  17.569 -+		     ".long 6673f\n"                                \
  17.570 -+		     ".previous\n"                                  \
  17.571 -+		     ".section __smp_replacements,\"a\"\n"          \
  17.572 -+		     "6673: .byte 6668b-6667b\n"                    \
  17.573 -+		     ".byte 6670f-6669f\n"                          \
  17.574 -+		     ".byte 0\n"                                    \
  17.575 -+		     ".byte 6671f-6670f\n"                          \
  17.576 -+		     ".byte -1\n"                                   \
  17.577 -+		     "6669: xchg %1, %0\n"                          \
  17.578 -+		     "6670:movl %1, %0\n"                           \
  17.579 -+		     "6671:\n"                                      \
  17.580 -+		     ".previous\n"                                  \
  17.581 -+		     : "=m" (var), "=r" (__set_mb_temp)             \
  17.582 -+		     : "1" (value)                                  \
  17.583 -+		     : "memory"); } while (0)
  17.584 -+#else
  17.585 - #define smp_mb()	mb()
  17.586 - #define smp_rmb()	rmb()
  17.587 -+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
  17.588 -+#endif
  17.589 - #define smp_wmb()	wmb()
  17.590 - #define smp_read_barrier_depends()	read_barrier_depends()
  17.591 --#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
  17.592 - #else
  17.593 - #define smp_mb()	barrier()
  17.594 - #define smp_rmb()	barrier()
    18.1 --- a/patches/linux-2.6.16.13/tpm_plugin_2.6.17.patch	Tue Sep 19 14:26:22 2006 +0100
    18.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.3 @@ -1,1546 +0,0 @@
    18.4 -diff -pruN ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_atmel.c ./drivers/char/tpm/tpm_atmel.c
    18.5 ---- ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_atmel.c	2006-06-26 18:05:03.000000000 -0400
    18.6 -+++ ./drivers/char/tpm/tpm_atmel.c	2006-06-26 18:16:33.000000000 -0400
    18.7 -@@ -47,12 +47,12 @@ static int tpm_atml_recv(struct tpm_chip
    18.8 - 		return -EIO;
    18.9 - 
   18.10 - 	for (i = 0; i < 6; i++) {
   18.11 --		status = ioread8(chip->vendor->iobase + 1);
   18.12 -+		status = ioread8(chip->vendor.iobase + 1);
   18.13 - 		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   18.14 - 			dev_err(chip->dev, "error reading header\n");
   18.15 - 			return -EIO;
   18.16 - 		}
   18.17 --		*buf++ = ioread8(chip->vendor->iobase);
   18.18 -+		*buf++ = ioread8(chip->vendor.iobase);
   18.19 - 	}
   18.20 - 
   18.21 - 	/* size of the data received */
   18.22 -@@ -63,7 +63,7 @@ static int tpm_atml_recv(struct tpm_chip
   18.23 - 		dev_err(chip->dev,
   18.24 - 			"Recv size(%d) less than available space\n", size);
   18.25 - 		for (; i < size; i++) {	/* clear the waiting data anyway */
   18.26 --			status = ioread8(chip->vendor->iobase + 1);
   18.27 -+			status = ioread8(chip->vendor.iobase + 1);
   18.28 - 			if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   18.29 - 				dev_err(chip->dev, "error reading data\n");
   18.30 - 				return -EIO;
   18.31 -@@ -74,16 +74,16 @@ static int tpm_atml_recv(struct tpm_chip
   18.32 - 
   18.33 - 	/* read all the data available */
   18.34 - 	for (; i < size; i++) {
   18.35 --		status = ioread8(chip->vendor->iobase + 1);
   18.36 -+		status = ioread8(chip->vendor.iobase + 1);
   18.37 - 		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   18.38 - 			dev_err(chip->dev, "error reading data\n");
   18.39 - 			return -EIO;
   18.40 - 		}
   18.41 --		*buf++ = ioread8(chip->vendor->iobase);
   18.42 -+		*buf++ = ioread8(chip->vendor.iobase);
   18.43 - 	}
   18.44 - 
   18.45 - 	/* make sure data available is gone */
   18.46 --	status = ioread8(chip->vendor->iobase + 1);
   18.47 -+	status = ioread8(chip->vendor.iobase + 1);
   18.48 - 
   18.49 - 	if (status & ATML_STATUS_DATA_AVAIL) {
   18.50 - 		dev_err(chip->dev, "data available is stuck\n");
   18.51 -@@ -100,7 +100,7 @@ static int tpm_atml_send(struct tpm_chip
   18.52 - 	dev_dbg(chip->dev, "tpm_atml_send:\n");
   18.53 - 	for (i = 0; i < count; i++) {
   18.54 - 		dev_dbg(chip->dev, "%d 0x%x(%d)\n",  i, buf[i], buf[i]);
   18.55 -- 		iowrite8(buf[i], chip->vendor->iobase);
   18.56 -+ 		iowrite8(buf[i], chip->vendor.iobase);
   18.57 - 	}
   18.58 - 
   18.59 - 	return count;
   18.60 -@@ -108,12 +108,12 @@ static int tpm_atml_send(struct tpm_chip
   18.61 - 
   18.62 - static void tpm_atml_cancel(struct tpm_chip *chip)
   18.63 - {
   18.64 --	iowrite8(ATML_STATUS_ABORT, chip->vendor->iobase + 1);
   18.65 -+	iowrite8(ATML_STATUS_ABORT, chip->vendor.iobase + 1);
   18.66 - }
   18.67 - 
   18.68 - static u8 tpm_atml_status(struct tpm_chip *chip)
   18.69 - {
   18.70 --	return ioread8(chip->vendor->iobase + 1);
   18.71 -+	return ioread8(chip->vendor.iobase + 1);
   18.72 - }
   18.73 - 
   18.74 - static struct file_operations atmel_ops = {
   18.75 -@@ -140,7 +140,7 @@ static struct attribute* atmel_attrs[] =
   18.76 - 
   18.77 - static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs };
   18.78 - 
   18.79 --static struct tpm_vendor_specific tpm_atmel = {
   18.80 -+static const struct tpm_vendor_specific tpm_atmel = {
   18.81 - 	.recv = tpm_atml_recv,
   18.82 - 	.send = tpm_atml_send,
   18.83 - 	.cancel = tpm_atml_cancel,
   18.84 -@@ -159,10 +159,10 @@ static void atml_plat_remove(void)
   18.85 - 	struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
   18.86 - 
   18.87 - 	if (chip) {
   18.88 --		if (chip->vendor->have_region)
   18.89 --			atmel_release_region(chip->vendor->base,
   18.90 --					     chip->vendor->region_size);
   18.91 --		atmel_put_base_addr(chip->vendor);
   18.92 -+		if (chip->vendor.have_region)
   18.93 -+			atmel_release_region(chip->vendor.base,
   18.94 -+					     chip->vendor.region_size);
   18.95 -+		atmel_put_base_addr(chip->vendor.iobase);
   18.96 - 		tpm_remove_hardware(chip->dev);
   18.97 - 		platform_device_unregister(pdev);
   18.98 - 	}
   18.99 -@@ -179,18 +179,22 @@ static struct device_driver atml_drv = {
  18.100 - static int __init init_atmel(void)
  18.101 - {
  18.102 - 	int rc = 0;
  18.103 -+	void __iomem *iobase = NULL;
  18.104 -+	int have_region, region_size;
  18.105 -+	unsigned long base;
  18.106 -+	struct  tpm_chip *chip;
  18.107 - 
  18.108 - 	driver_register(&atml_drv);
  18.109 - 
  18.110 --	if ((tpm_atmel.iobase = atmel_get_base_addr(&tpm_atmel)) == NULL) {
  18.111 -+	if ((iobase = atmel_get_base_addr(&base, &region_size)) == NULL) {
  18.112 - 		rc = -ENODEV;
  18.113 - 		goto err_unreg_drv;
  18.114 - 	}
  18.115 - 
  18.116 --	tpm_atmel.have_region =
  18.117 -+	have_region =
  18.118 - 	    (atmel_request_region
  18.119 --	     (tpm_atmel.base, tpm_atmel.region_size,
  18.120 --	      "tpm_atmel0") == NULL) ? 0 : 1;
  18.121 -+	     (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
  18.122 -+
  18.123 - 
  18.124 - 	if (IS_ERR
  18.125 - 	    (pdev =
  18.126 -@@ -199,17 +203,25 @@ static int __init init_atmel(void)
  18.127 - 		goto err_rel_reg;
  18.128 - 	}
  18.129 - 
  18.130 --	if ((rc = tpm_register_hardware(&pdev->dev, &tpm_atmel)) < 0)
  18.131 -+	if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_atmel))) {
  18.132 -+		rc = -ENODEV;
  18.133 - 		goto err_unreg_dev;
  18.134 -+	}
  18.135 -+
  18.136 -+	chip->vendor.iobase = iobase;
  18.137 -+	chip->vendor.base = base;
  18.138 -+	chip->vendor.have_region = have_region;
  18.139 -+	chip->vendor.region_size = region_size;
  18.140 -+
  18.141 - 	return 0;
  18.142 - 
  18.143 - err_unreg_dev:
  18.144 - 	platform_device_unregister(pdev);
  18.145 - err_rel_reg:
  18.146 --	atmel_put_base_addr(&tpm_atmel);
  18.147 --	if (tpm_atmel.have_region)
  18.148 --		atmel_release_region(tpm_atmel.base,
  18.149 --				     tpm_atmel.region_size);
  18.150 -+	atmel_put_base_addr(iobase);
  18.151 -+	if (have_region)
  18.152 -+		atmel_release_region(base,
  18.153 -+				     region_size);
  18.154 - err_unreg_drv:
  18.155 - 	driver_unregister(&atml_drv);
  18.156 - 	return rc;
  18.157 -diff -pruN ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_atmel.h ./drivers/char/tpm/tpm_atmel.h
  18.158 ---- ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_atmel.h	2006-06-26 18:05:03.000000000 -0400
  18.159 -+++ ./drivers/char/tpm/tpm_atmel.h	2006-06-26 18:16:33.000000000 -0400
  18.160 -@@ -28,13 +28,12 @@
  18.161 - #define atmel_request_region request_mem_region
  18.162 - #define atmel_release_region release_mem_region
  18.163 - 
  18.164 --static inline void atmel_put_base_addr(struct tpm_vendor_specific
  18.165 --					 *vendor)
  18.166 -+static inline void atmel_put_base_addr(void __iomem *iobase)
  18.167 - {
  18.168 --	iounmap(vendor->iobase);
  18.169 -+	iounmap(iobase);
  18.170 - }
  18.171 - 
  18.172 --static void __iomem * atmel_get_base_addr(struct tpm_vendor_specific *vendor)
  18.173 -+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
  18.174 - {
  18.175 - 	struct device_node *dn;
  18.176 - 	unsigned long address, size;
  18.177 -@@ -71,9 +70,9 @@ static void __iomem * atmel_get_base_add
  18.178 - 	else
  18.179 - 		size = reg[naddrc];
  18.180 - 
  18.181 --	vendor->base = address;
  18.182 --	vendor->region_size = size;
  18.183 --	return ioremap(vendor->base, vendor->region_size);
  18.184 -+	*base = address;
  18.185 -+	*region_size = size;
  18.186 -+	return ioremap(*base, *region_size);
  18.187 - }
  18.188 - #else
  18.189 - #define atmel_getb(chip, offset) inb(chip->vendor->base + offset)
  18.190 -@@ -106,14 +105,12 @@ static int atmel_verify_tpm11(void)
  18.191 - 	return 0;
  18.192 - }
  18.193 - 
  18.194 --static inline void atmel_put_base_addr(struct tpm_vendor_specific
  18.195 --					 *vendor)
  18.196 -+static inline void atmel_put_base_addr(void __iomem *iobase)
  18.197 - {
  18.198 - }
  18.199 - 
  18.200 - /* Determine where to talk to device */
  18.201 --static void __iomem * atmel_get_base_addr(struct tpm_vendor_specific
  18.202 --					 *vendor)
  18.203 -+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
  18.204 - {
  18.205 - 	int lo, hi;
  18.206 - 
  18.207 -@@ -123,9 +120,9 @@ static void __iomem * atmel_get_base_add
  18.208 - 	lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
  18.209 - 	hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
  18.210 - 
  18.211 --	vendor->base = (hi << 8) | lo;
  18.212 --	vendor->region_size = 2;
  18.213 -+	*base = (hi << 8) | lo;
  18.214 -+	*region_size = 2;
  18.215 - 
  18.216 --	return ioport_map(vendor->base, vendor->region_size);
  18.217 -+	return ioport_map(*base, *region_size);
  18.218 - }
  18.219 - #endif
  18.220 -diff -pruN ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_bios.c ./drivers/char/tpm/tpm_bios.c
  18.221 ---- ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_bios.c	2006-06-26 18:05:03.000000000 -0400
  18.222 -+++ ./drivers/char/tpm/tpm_bios.c	2006-06-26 18:16:33.000000000 -0400
  18.223 -@@ -29,6 +29,11 @@
  18.224 - #define MAX_TEXT_EVENT		1000	/* Max event string length */
  18.225 - #define ACPI_TCPA_SIG		"TCPA"	/* 0x41504354 /'TCPA' */
  18.226 - 
  18.227 -+enum bios_platform_class {
  18.228 -+	BIOS_CLIENT = 0x00,
  18.229 -+	BIOS_SERVER = 0x01,
  18.230 -+};
  18.231 -+
  18.232 - struct tpm_bios_log {
  18.233 - 	void *bios_event_log;
  18.234 - 	void *bios_event_log_end;
  18.235 -@@ -36,9 +41,18 @@ struct tpm_bios_log {
  18.236 - 
  18.237 - struct acpi_tcpa {
  18.238 - 	struct acpi_table_header hdr;
  18.239 --	u16 reserved;
  18.240 --	u32 log_max_len __attribute__ ((packed));
  18.241 --	u32 log_start_addr __attribute__ ((packed));
  18.242 -+	u16 platform_class;
  18.243 -+	union {
  18.244 -+		struct client_hdr {
  18.245 -+			u32 log_max_len __attribute__ ((packed));
  18.246 -+			u64 log_start_addr __attribute__ ((packed));
  18.247 -+		} client;
  18.248 -+		struct server_hdr {
  18.249 -+			u16 reserved;
  18.250 -+			u64 log_max_len __attribute__ ((packed));
  18.251 -+			u64 log_start_addr __attribute__ ((packed));
  18.252 -+		} server;
  18.253 -+	};
  18.254 - };
  18.255 - 
  18.256 - struct tcpa_event {
  18.257 -@@ -91,6 +105,12 @@ static const char* tcpa_event_type_strin
  18.258 - 	"Non-Host Info"
  18.259 - };
  18.260 - 
  18.261 -+struct tcpa_pc_event {
  18.262 -+	u32 event_id;
  18.263 -+	u32 event_size;
  18.264 -+	u8 event_data[0];
  18.265 -+};
  18.266 -+
  18.267 - enum tcpa_pc_event_ids {
  18.268 - 	SMBIOS = 1,
  18.269 - 	BIS_CERT,
  18.270 -@@ -100,14 +120,15 @@ enum tcpa_pc_event_ids {
  18.271 - 	NVRAM,
  18.272 - 	OPTION_ROM_EXEC,
  18.273 - 	OPTION_ROM_CONFIG,
  18.274 --	OPTION_ROM_MICROCODE,
  18.275 -+	OPTION_ROM_MICROCODE = 10,
  18.276 - 	S_CRTM_VERSION,
  18.277 - 	S_CRTM_CONTENTS,
  18.278 - 	POST_CONTENTS,
  18.279 -+	HOST_TABLE_OF_DEVICES,
  18.280 - };
  18.281 - 
  18.282 - static const char* tcpa_pc_event_id_strings[] = {
  18.283 --	""
  18.284 -+	"",
  18.285 - 	"SMBIOS",
  18.286 - 	"BIS Certificate",
  18.287 - 	"POST BIOS ",
  18.288 -@@ -116,10 +137,12 @@ static const char* tcpa_pc_event_id_stri
  18.289 - 	"NVRAM",
  18.290 - 	"Option ROM",
  18.291 - 	"Option ROM config",
  18.292 --	"Option ROM microcode",
  18.293 -+	"",
  18.294 -+	"Option ROM microcode ",
  18.295 - 	"S-CRTM Version",
  18.296 --	"S-CRTM Contents",
  18.297 --	"S-CRTM POST Contents",
  18.298 -+	"S-CRTM Contents ",
  18.299 -+	"POST Contents ",
  18.300 -+	"Table of Devices",
  18.301 - };
  18.302 - 
  18.303 - /* returns pointer to start of pos. entry of tcg log */
  18.304 -@@ -191,7 +214,7 @@ static int get_event_name(char *dest, st
  18.305 - 	const char *name = "";
  18.306 - 	char data[40] = "";
  18.307 - 	int i, n_len = 0, d_len = 0;
  18.308 --	u32 event_id;
  18.309 -+	struct tcpa_pc_event *pc_event;
  18.310 - 
  18.311 - 	switch(event->event_type) {
  18.312 - 	case PREBOOT:
  18.313 -@@ -220,31 +243,32 @@ static int get_event_name(char *dest, st
  18.314 - 		}
  18.315 - 		break;
  18.316 - 	case EVENT_TAG:
  18.317 --		event_id = be32_to_cpu(*((u32 *)event_entry));
  18.318 -+		pc_event = (struct tcpa_pc_event *)event_entry;
  18.319 - 
  18.320 - 		/* ToDo Row data -> Base64 */
  18.321 - 
  18.322 --		switch (event_id) {
  18.323 -+		switch (pc_event->event_id) {
  18.324 - 		case SMBIOS:
  18.325 - 		case BIS_CERT:
  18.326 - 		case CMOS:
  18.327 - 		case NVRAM:
  18.328 - 		case OPTION_ROM_EXEC:
  18.329 - 		case OPTION_ROM_CONFIG:
  18.330 --		case OPTION_ROM_MICROCODE:
  18.331 - 		case S_CRTM_VERSION:
  18.332 --		case S_CRTM_CONTENTS:
  18.333 --		case POST_CONTENTS:
  18.334 --			name = tcpa_pc_event_id_strings[event_id];
  18.335 -+			name = tcpa_pc_event_id_strings[pc_event->event_id];
  18.336 - 			n_len = strlen(name);
  18.337 - 			break;
  18.338 -+		/* hash data */
  18.339 - 		case POST_BIOS_ROM:
  18.340 - 		case ESCD:
  18.341 --			name = tcpa_pc_event_id_strings[event_id];
  18.342 -+		case OPTION_ROM_MICROCODE:
  18.343 -+		case S_CRTM_CONTENTS:
  18.344 -+		case POST_CONTENTS:
  18.345 -+			name = tcpa_pc_event_id_strings[pc_event->event_id];
  18.346 - 			n_len = strlen(name);
  18.347 - 			for (i = 0; i < 20; i++)
  18.348 --				d_len += sprintf(data, "%02x",
  18.349 --						event_entry[8 + i]);
  18.350 -+				d_len += sprintf(&data[2*i], "%02x",
  18.351 -+						pc_event->event_data[i]);
  18.352 - 			break;
  18.353 - 		default:
  18.354 - 			break;
  18.355 -@@ -260,52 +284,13 @@ static int get_event_name(char *dest, st
  18.356 - 
  18.357 - static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
  18.358 - {
  18.359 -+	struct tcpa_event *event = v;
  18.360 -+	char *data = v;
  18.361 -+	int i;
  18.362 - 
  18.363 --	char *eventname;
  18.364 --	char data[4];
  18.365 --	u32 help;
  18.366 --	int i, len;
  18.367 --	struct tcpa_event *event = (struct tcpa_event *) v;
  18.368 --	unsigned char *event_entry =
  18.369 --	    (unsigned char *) (v + sizeof(struct tcpa_event));
  18.370 --
  18.371 --	eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
  18.372 --	if (!eventname) {
  18.373 --		printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
  18.374 --		       __func__);
  18.375 --		return -ENOMEM;
  18.376 --	}
  18.377 --
  18.378 --	/* 1st: PCR used is in little-endian format (4 bytes) */
  18.379 --	help = le32_to_cpu(event->pcr_index);
  18.380 --	memcpy(data, &help, 4);
  18.381 --	for (i = 0; i < 4; i++)
  18.382 --		seq_putc(m, data[i]);
  18.383 --
  18.384 --	/* 2nd: SHA1 (20 bytes) */
  18.385 --	for (i = 0; i < 20; i++)
  18.386 --		seq_putc(m, event->pcr_value[i]);
  18.387 --
  18.388 --	/* 3rd: event type identifier (4 bytes) */
  18.389 --	help = le32_to_cpu(event->event_type);
  18.390 --	memcpy(data, &help, 4);
  18.391 --	for (i = 0; i < 4; i++)
  18.392 -+	for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
  18.393 - 		seq_putc(m, data[i]);
  18.394 - 
  18.395 --	len = 0;
  18.396 --
  18.397 --	len += get_event_name(eventname, event, event_entry);
  18.398 --
  18.399 --	/* 4th:  filename <= 255 + \'0' delimiter */
  18.400 --	if (len > TCG_EVENT_NAME_LEN_MAX)
  18.401 --		len = TCG_EVENT_NAME_LEN_MAX;
  18.402 --
  18.403 --	for (i = 0; i < len; i++)
  18.404 --		seq_putc(m, eventname[i]);
  18.405 --
  18.406 --	/* 5th: delimiter */
  18.407 --	seq_putc(m, '\0');
  18.408 --
  18.409 - 	return 0;
  18.410 - }
  18.411 - 
  18.412 -@@ -353,6 +338,7 @@ static int tpm_ascii_bios_measurements_s
  18.413 - 	/* 4th: eventname <= max + \'0' delimiter */
  18.414 - 	seq_printf(m, " %s\n", eventname);
  18.415 - 
  18.416 -+	kfree(eventname);
  18.417 - 	return 0;
  18.418 - }
  18.419 - 
  18.420 -@@ -376,6 +362,7 @@ static int read_log(struct tpm_bios_log 
  18.421 - 	struct acpi_tcpa *buff;
  18.422 - 	acpi_status status;
  18.423 - 	struct acpi_table_header *virt;
  18.424 -+	u64 len, start;
  18.425 - 
  18.426 - 	if (log->bios_event_log != NULL) {
  18.427 - 		printk(KERN_ERR
  18.428 -@@ -396,27 +383,37 @@ static int read_log(struct tpm_bios_log 
  18.429 - 		return -EIO;
  18.430 - 	}
  18.431 - 
  18.432 --	if (buff->log_max_len == 0) {
  18.433 -+	switch(buff->platform_class) {
  18.434 -+	case BIOS_SERVER:
  18.435 -+		len = buff->server.log_max_len;
  18.436 -+		start = buff->server.log_start_addr;
  18.437 -+		break;
  18.438 -+	case BIOS_CLIENT:
  18.439 -+	default:
  18.440 -+		len = buff->client.log_max_len;
  18.441 -+		start = buff->client.log_start_addr;
  18.442 -+		break;
  18.443 -+	}
  18.444 -+	if (!len) {
  18.445 - 		printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
  18.446 - 		return -EIO;
  18.447 - 	}
  18.448 - 
  18.449 - 	/* malloc EventLog space */
  18.450 --	log->bios_event_log = kmalloc(buff->log_max_len, GFP_KERNEL);
  18.451 -+	log->bios_event_log = kmalloc(len, GFP_KERNEL);
  18.452 - 	if (!log->bios_event_log) {
  18.453 --		printk
  18.454 --		    ("%s: ERROR - Not enough  Memory for BIOS measurements\n",
  18.455 --		     __func__);
  18.456 -+		printk("%s: ERROR - Not enough  Memory for BIOS measurements\n",
  18.457 -+			__func__);
  18.458 - 		return -ENOMEM;
  18.459 - 	}
  18.460 - 
  18.461 --	log->bios_event_log_end = log->bios_event_log + buff->log_max_len;
  18.462 -+	log->bios_event_log_end = log->bios_event_log + len;
  18.463 - 
  18.464 --	acpi_os_map_memory(buff->log_start_addr, buff->log_max_len, (void *) &virt);
  18.465 -+	acpi_os_map_memory(start, len, (void *) &virt);
  18.466 - 
  18.467 --	memcpy(log->bios_event_log, virt, buff->log_max_len);
  18.468 -+	memcpy(log->bios_event_log, virt, len);
  18.469 - 
  18.470 --	acpi_os_unmap_memory(virt, buff->log_max_len);
  18.471 -+	acpi_os_unmap_memory(virt, len);
  18.472 - 	return 0;
  18.473 - }
  18.474 - 
  18.475 -diff -pruN ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_infineon.c ./drivers/char/tpm/tpm_infineon.c
  18.476 ---- ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_infineon.c	2006-06-26 18:05:03.000000000 -0400
  18.477 -+++ ./drivers/char/tpm/tpm_infineon.c	2006-06-26 18:16:33.000000000 -0400
  18.478 -@@ -15,6 +15,7 @@
  18.479 -  * License.
  18.480 -  */
  18.481 - 
  18.482 -+#include <linux/init.h>
  18.483 - #include <linux/pnp.h>
  18.484 - #include "tpm.h"
  18.485 - 
  18.486 -@@ -104,7 +105,7 @@ static int empty_fifo(struct tpm_chip *c
  18.487 - 
  18.488 - 	if (clear_wrfifo) {
  18.489 - 		for (i = 0; i < 4096; i++) {
  18.490 --			status = inb(chip->vendor->base + WRFIFO);
  18.491 -+			status = inb(chip->vendor.base + WRFIFO);
  18.492 - 			if (status == 0xff) {
  18.493 - 				if (check == 5)
  18.494 - 					break;
  18.495 -@@ -124,8 +125,8 @@ static int empty_fifo(struct tpm_chip *c
  18.496 - 	 */
  18.497 - 	i = 0;
  18.498 - 	do {
  18.499 --		status = inb(chip->vendor->base + RDFIFO);
  18.500 --		status = inb(chip->vendor->base + STAT);
  18.501 -+		status = inb(chip->vendor.base + RDFIFO);
  18.502 -+		status = inb(chip->vendor.base + STAT);
  18.503 - 		i++;
  18.504 - 		if (i == TPM_MAX_TRIES)
  18.505 - 			return -EIO;
  18.506 -@@ -138,7 +139,7 @@ static int wait(struct tpm_chip *chip, i
  18.507 - 	int status;
  18.508 - 	int i;
  18.509 - 	for (i = 0; i < TPM_MAX_TRIES; i++) {
  18.510 --		status = inb(chip->vendor->base + STAT);
  18.511 -+		status = inb(chip->vendor.base + STAT);
  18.512 - 		/* check the status-register if wait_for_bit is set */
  18.513 - 		if (status & 1 << wait_for_bit)
  18.514 - 			break;
  18.515 -@@ -157,7 +158,7 @@ static int wait(struct tpm_chip *chip, i
  18.516 - static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
  18.517 - {
  18.518 - 	wait(chip, STAT_XFE);
  18.519 --	outb(sendbyte, chip->vendor->base + WRFIFO);
  18.520 -+	outb(sendbyte, chip->vendor.base + WRFIFO);
  18.521 - }
  18.522 - 
  18.523 -     /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more
  18.524 -@@ -204,7 +205,7 @@ recv_begin:
  18.525 - 		ret = wait(chip, STAT_RDA);
  18.526 - 		if (ret)
  18.527 - 			return -EIO;
  18.528 --		buf[i] = inb(chip->vendor->base + RDFIFO);
  18.529 -+		buf[i] = inb(chip->vendor.base + RDFIFO);
  18.530 - 	}
  18.531 - 
  18.532 - 	if (buf[0] != TPM_VL_VER) {
  18.533 -@@ -219,7 +220,7 @@ recv_begin:
  18.534 - 
  18.535 - 		for (i = 0; i < size; i++) {
  18.536 - 			wait(chip, STAT_RDA);
  18.537 --			buf[i] = inb(chip->vendor->base + RDFIFO);
  18.538 -+			buf[i] = inb(chip->vendor.base + RDFIFO);
  18.539 - 		}
  18.540 - 
  18.541 - 		if ((size == 0x6D00) && (buf[1] == 0x80)) {
  18.542 -@@ -268,7 +269,7 @@ static int tpm_inf_send(struct tpm_chip 
  18.543 - 	u8 count_high, count_low, count_4, count_3, count_2, count_1;
  18.544 - 
  18.545 - 	/* Disabling Reset, LP and IRQC */
  18.546 --	outb(RESET_LP_IRQC_DISABLE, chip->vendor->base + CMD);
  18.547 -+	outb(RESET_LP_IRQC_DISABLE, chip->vendor.base + CMD);
  18.548 - 
  18.549 - 	ret = empty_fifo(chip, 1);
  18.550 - 	if (ret) {
  18.551 -@@ -319,7 +320,7 @@ static void tpm_inf_cancel(struct tpm_ch
  18.552 - 
  18.553 - static u8 tpm_inf_status(struct tpm_chip *chip)
  18.554 - {
  18.555 --	return inb(chip->vendor->base + STAT);
  18.556 -+	return inb(chip->vendor.base + STAT);
  18.557 - }
  18.558 - 
  18.559 - static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
  18.560 -@@ -346,7 +347,7 @@ static struct file_operations inf_ops = 
  18.561 - 	.release = tpm_release,
  18.562 - };
  18.563 - 
  18.564 --static struct tpm_vendor_specific tpm_inf = {
  18.565 -+static const struct tpm_vendor_specific tpm_inf = {
  18.566 - 	.recv = tpm_inf_recv,
  18.567 - 	.send = tpm_inf_send,
  18.568 - 	.cancel = tpm_inf_cancel,
  18.569 -@@ -375,6 +376,7 @@ static int __devinit tpm_inf_pnp_probe(s
  18.570 - 	int version[2];
  18.571 - 	int productid[2];
  18.572 - 	char chipname[20];
  18.573 -+	struct tpm_chip *chip;
  18.574 - 
  18.575 - 	/* read IO-ports through PnP */
  18.576 - 	if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
  18.577 -@@ -395,14 +397,13 @@ static int __devinit tpm_inf_pnp_probe(s
  18.578 - 			goto err_last;
  18.579 - 		}
  18.580 - 		/* publish my base address and request region */
  18.581 --		tpm_inf.base = TPM_INF_BASE;
  18.582 - 		if (request_region
  18.583 --		    (tpm_inf.base, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) {
  18.584 -+		    (TPM_INF_BASE, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) {
  18.585 - 			rc = -EINVAL;
  18.586 - 			goto err_last;
  18.587 - 		}
  18.588 --		if (request_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN,
  18.589 --				"tpm_infineon0") == NULL) {
  18.590 -+		if (request_region
  18.591 -+		    (TPM_INF_ADDR, TPM_INF_ADDR_LEN, "tpm_infineon0") == NULL) {
  18.592 - 			rc = -EINVAL;
  18.593 - 			goto err_last;
  18.594 - 		}
  18.595 -@@ -442,9 +443,9 @@ static int __devinit tpm_inf_pnp_probe(s
  18.596 - 
  18.597 - 		/* configure TPM with IO-ports */
  18.598 - 		outb(IOLIMH, TPM_INF_ADDR);
  18.599 --		outb(((tpm_inf.base >> 8) & 0xff), TPM_INF_DATA);
  18.600 -+		outb(((TPM_INF_BASE >> 8) & 0xff), TPM_INF_DATA);
  18.601 - 		outb(IOLIML, TPM_INF_ADDR);
  18.602 --		outb((tpm_inf.base & 0xff), TPM_INF_DATA);
  18.603 -+		outb((TPM_INF_BASE & 0xff), TPM_INF_DATA);
  18.604 - 
  18.605 - 		/* control if IO-ports are set correctly */
  18.606 - 		outb(IOLIMH, TPM_INF_ADDR);
  18.607 -@@ -452,10 +453,10 @@ static int __devinit tpm_inf_pnp_probe(s
  18.608 - 		outb(IOLIML, TPM_INF_ADDR);
  18.609 - 		iol = inb(TPM_INF_DATA);
  18.610 - 
  18.611 --		if ((ioh << 8 | iol) != tpm_inf.base) {
  18.612 -+		if ((ioh << 8 | iol) != TPM_INF_BASE) {
  18.613 - 			dev_err(&dev->dev,
  18.614 --				"Could not set IO-ports to 0x%lx\n",
  18.615 --				tpm_inf.base);
  18.616 -+				"Could not set IO-ports to 0x%x\n",
  18.617 -+				TPM_INF_BASE);
  18.618 - 			rc = -EIO;
  18.619 - 			goto err_release_region;
  18.620 - 		}
  18.621 -@@ -466,15 +467,15 @@ static int __devinit tpm_inf_pnp_probe(s
  18.622 - 		outb(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
  18.623 - 
  18.624 - 		/* disable RESET, LP and IRQC */
  18.625 --		outb(RESET_LP_IRQC_DISABLE, tpm_inf.base + CMD);
  18.626 -+		outb(RESET_LP_IRQC_DISABLE, TPM_INF_BASE + CMD);
  18.627 - 
  18.628 - 		/* Finally, we're done, print some infos */
  18.629 - 		dev_info(&dev->dev, "TPM found: "
  18.630 - 			 "config base 0x%x, "
  18.631 - 			 "io base 0x%x, "
  18.632 --			 "chip version %02x%02x, "
  18.633 --			 "vendor id %x%x (Infineon), "
  18.634 --			 "product id %02x%02x"
  18.635 -+			 "chip version 0x%02x%02x, "
  18.636 -+			 "vendor id 0x%x%x (Infineon), "
  18.637 -+			 "product id 0x%02x%02x"
  18.638 - 			 "%s\n",
  18.639 - 			 TPM_INF_ADDR,
  18.640 - 			 TPM_INF_BASE,
  18.641 -@@ -482,11 +483,10 @@ static int __devinit tpm_inf_pnp_probe(s
  18.642 - 			 vendorid[0], vendorid[1],
  18.643 - 			 productid[0], productid[1], chipname);
  18.644 - 
  18.645 --		rc = tpm_register_hardware(&dev->dev, &tpm_inf);
  18.646 --		if (rc < 0) {
  18.647 --			rc = -ENODEV;
  18.648 -+		if (!(chip = tpm_register_hardware(&dev->dev, &tpm_inf))) {
  18.649 - 			goto err_release_region;
  18.650 - 		}
  18.651 -+		chip->vendor.base = TPM_INF_BASE;
  18.652 - 		return 0;
  18.653 - 	} else {
  18.654 - 		rc = -ENODEV;
  18.655 -@@ -494,7 +494,7 @@ static int __devinit tpm_inf_pnp_probe(s
  18.656 - 	}
  18.657 - 
  18.658 - err_release_region:
  18.659 --	release_region(tpm_inf.base, TPM_INF_PORT_LEN);
  18.660 -+	release_region(TPM_INF_BASE, TPM_INF_PORT_LEN);
  18.661 - 	release_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN);
  18.662 - 
  18.663 - err_last:
  18.664 -@@ -506,7 +506,8 @@ static __devexit void tpm_inf_pnp_remove
  18.665 - 	struct tpm_chip *chip = pnp_get_drvdata(dev);
  18.666 - 
  18.667 - 	if (chip) {
  18.668 --		release_region(chip->vendor->base, TPM_INF_PORT_LEN);
  18.669 -+		release_region(TPM_INF_BASE, TPM_INF_PORT_LEN);
  18.670 -+		release_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN);
  18.671 - 		tpm_remove_hardware(chip->dev);
  18.672 - 	}
  18.673 - }
  18.674 -@@ -520,7 +521,7 @@ static struct pnp_driver tpm_inf_pnp = {
  18.675 - 	},
  18.676 - 	.id_table = tpm_pnp_tbl,
  18.677 - 	.probe = tpm_inf_pnp_probe,
  18.678 --	.remove = tpm_inf_pnp_remove,
  18.679 -+	.remove = __devexit_p(tpm_inf_pnp_remove),
  18.680 - };
  18.681 - 
  18.682 - static int __init init_inf(void)
  18.683 -@@ -538,5 +539,5 @@ module_exit(cleanup_inf);
  18.684 - 
  18.685 - MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>");
  18.686 - MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
  18.687 --MODULE_VERSION("1.7");
  18.688 -+MODULE_VERSION("1.8");
  18.689 - MODULE_LICENSE("GPL");
  18.690 -diff -pruN ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_nsc.c ./drivers/char/tpm/tpm_nsc.c
  18.691 ---- ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_nsc.c	2006-06-26 18:05:03.000000000 -0400
  18.692 -+++ ./drivers/char/tpm/tpm_nsc.c	2006-06-26 18:16:33.000000000 -0400
  18.693 -@@ -71,7 +71,7 @@ static int wait_for_stat(struct tpm_chip
  18.694 - 	unsigned long stop;
  18.695 - 
  18.696 - 	/* status immediately available check */
  18.697 --	*data = inb(chip->vendor->base + NSC_STATUS);
  18.698 -+	*data = inb(chip->vendor.base + NSC_STATUS);
  18.699 - 	if ((*data & mask) == val)
  18.700 - 		return 0;
  18.701 - 
  18.702 -@@ -79,7 +79,7 @@ static int wait_for_stat(struct tpm_chip
  18.703 - 	stop = jiffies + 10 * HZ;
  18.704 - 	do {
  18.705 - 		msleep(TPM_TIMEOUT);
  18.706 --		*data = inb(chip->vendor->base + 1);
  18.707 -+		*data = inb(chip->vendor.base + 1);
  18.708 - 		if ((*data & mask) == val)
  18.709 - 			return 0;
  18.710 - 	}
  18.711 -@@ -94,9 +94,9 @@ static int nsc_wait_for_ready(struct tpm
  18.712 - 	unsigned long stop;
  18.713 - 
  18.714 - 	/* status immediately available check */
  18.715 --	status = inb(chip->vendor->base + NSC_STATUS);
  18.716 -+	status = inb(chip->vendor.base + NSC_STATUS);
  18.717 - 	if (status & NSC_STATUS_OBF)
  18.718 --		status = inb(chip->vendor->base + NSC_DATA);
  18.719 -+		status = inb(chip->vendor.base + NSC_DATA);
  18.720 - 	if (status & NSC_STATUS_RDY)
  18.721 - 		return 0;
  18.722 - 
  18.723 -@@ -104,9 +104,9 @@ static int nsc_wait_for_ready(struct tpm
  18.724 - 	stop = jiffies + 100;
  18.725 - 	do {
  18.726 - 		msleep(TPM_TIMEOUT);
  18.727 --		status = inb(chip->vendor->base + NSC_STATUS);
  18.728 -+		status = inb(chip->vendor.base + NSC_STATUS);
  18.729 - 		if (status & NSC_STATUS_OBF)
  18.730 --			status = inb(chip->vendor->base + NSC_DATA);
  18.731 -+			status = inb(chip->vendor.base + NSC_DATA);
  18.732 - 		if (status & NSC_STATUS_RDY)
  18.733 - 			return 0;
  18.734 - 	}
  18.735 -@@ -132,7 +132,7 @@ static int tpm_nsc_recv(struct tpm_chip 
  18.736 - 		return -EIO;
  18.737 - 	}
  18.738 - 	if ((data =
  18.739 --	     inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
  18.740 -+	     inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
  18.741 - 		dev_err(chip->dev, "not in normal mode (0x%x)\n",
  18.742 - 			data);
  18.743 - 		return -EIO;
  18.744 -@@ -148,7 +148,7 @@ static int tpm_nsc_recv(struct tpm_chip 
  18.745 - 		}
  18.746 - 		if (data & NSC_STATUS_F0)
  18.747 - 			break;
  18.748 --		*p = inb(chip->vendor->base + NSC_DATA);
  18.749 -+		*p = inb(chip->vendor.base + NSC_DATA);
  18.750 - 	}
  18.751 - 
  18.752 - 	if ((data & NSC_STATUS_F0) == 0 &&
  18.753 -@@ -156,7 +156,7 @@ static int tpm_nsc_recv(struct tpm_chip 
  18.754 - 		dev_err(chip->dev, "F0 not set\n");
  18.755 - 		return -EIO;
  18.756 - 	}
  18.757 --	if ((data = inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_EOC) {
  18.758 -+	if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) {
  18.759 - 		dev_err(chip->dev,
  18.760 - 			"expected end of command(0x%x)\n", data);
  18.761 - 		return -EIO;
  18.762 -@@ -182,7 +182,7 @@ static int tpm_nsc_send(struct tpm_chip 
  18.763 - 	 * fix it. Not sure why this is needed, we followed the flow
  18.764 - 	 * chart in the manual to the letter.
  18.765 - 	 */
  18.766 --	outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
  18.767 -+	outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND);
  18.768 - 
  18.769 - 	if (nsc_wait_for_ready(chip) != 0)
  18.770 - 		return -EIO;
  18.771 -@@ -192,7 +192,7 @@ static int tpm_nsc_send(struct tpm_chip 
  18.772 - 		return -EIO;
  18.773 - 	}
  18.774 - 
  18.775 --	outb(NSC_COMMAND_NORMAL, chip->vendor->base + NSC_COMMAND);
  18.776 -+	outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND);
  18.777 - 	if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
  18.778 - 		dev_err(chip->dev, "IBR timeout\n");
  18.779 - 		return -EIO;
  18.780 -@@ -204,26 +204,26 @@ static int tpm_nsc_send(struct tpm_chip 
  18.781 - 				"IBF timeout (while writing data)\n");
  18.782 - 			return -EIO;
  18.783 - 		}
  18.784 --		outb(buf[i], chip->vendor->base + NSC_DATA);
  18.785 -+		outb(buf[i], chip->vendor.base + NSC_DATA);
  18.786 - 	}
  18.787 - 
  18.788 - 	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
  18.789 - 		dev_err(chip->dev, "IBF timeout\n");
  18.790 - 		return -EIO;
  18.791 - 	}
  18.792 --	outb(NSC_COMMAND_EOC, chip->vendor->base + NSC_COMMAND);
  18.793 -+	outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND);
  18.794 - 
  18.795 - 	return count;
  18.796 - }
  18.797 - 
  18.798 - static void tpm_nsc_cancel(struct tpm_chip *chip)
  18.799 - {
  18.800 --	outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
  18.801 -+	outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND);
  18.802 - }
  18.803 - 
  18.804 - static u8 tpm_nsc_status(struct tpm_chip *chip)
  18.805 - {
  18.806 --	return inb(chip->vendor->base + NSC_STATUS);
  18.807 -+	return inb(chip->vendor.base + NSC_STATUS);
  18.808 - }
  18.809 - 
  18.810 - static struct file_operations nsc_ops = {
  18.811 -@@ -250,7 +250,7 @@ static struct attribute * nsc_attrs[] = 
  18.812 - 
  18.813 - static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs };
  18.814 - 
  18.815 --static struct tpm_vendor_specific tpm_nsc = {
  18.816 -+static const struct tpm_vendor_specific tpm_nsc = {
  18.817 - 	.recv = tpm_nsc_recv,
  18.818 - 	.send = tpm_nsc_send,
  18.819 - 	.cancel = tpm_nsc_cancel,
  18.820 -@@ -268,7 +268,7 @@ static void __devexit tpm_nsc_remove(str
  18.821 - {
  18.822 - 	struct tpm_chip *chip = dev_get_drvdata(dev);
  18.823 - 	if ( chip ) {
  18.824 --		release_region(chip->vendor->base, 2);
  18.825 -+		release_region(chip->vendor.base, 2);
  18.826 - 		tpm_remove_hardware(chip->dev);
  18.827 - 	}
  18.828 - }
  18.829 -@@ -286,7 +286,8 @@ static int __init init_nsc(void)
  18.830 - 	int rc = 0;
  18.831 - 	int lo, hi;
  18.832 - 	int nscAddrBase = TPM_ADDR;
  18.833 --
  18.834 -+	struct tpm_chip *chip;
  18.835 -+	unsigned long base;
  18.836 - 
  18.837 - 	/* verify that it is a National part (SID) */
  18.838 - 	if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
  18.839 -@@ -300,7 +301,7 @@ static int __init init_nsc(void)
  18.840 - 
  18.841 - 	hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
  18.842 - 	lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
  18.843 --	tpm_nsc.base = (hi<<8) | lo;
  18.844 -+	base = (hi<<8) | lo;
  18.845 - 
  18.846 - 	/* enable the DPM module */
  18.847 - 	tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
  18.848 -@@ -320,13 +321,15 @@ static int __init init_nsc(void)
  18.849 - 	if ((rc = platform_device_register(pdev)) < 0)
  18.850 - 		goto err_free_dev;
  18.851 - 
  18.852 --	if (request_region(tpm_nsc.base, 2, "tpm_nsc0") == NULL ) {
  18.853 -+	if (request_region(base, 2, "tpm_nsc0") == NULL ) {
  18.854 - 		rc = -EBUSY;
  18.855 - 		goto err_unreg_dev;
  18.856 - 	}
  18.857 - 
  18.858 --	if ((rc = tpm_register_hardware(&pdev->dev, &tpm_nsc)) < 0)
  18.859 -+	if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) {
  18.860 -+		rc = -ENODEV;
  18.861 - 		goto err_rel_reg;
  18.862 -+	}
  18.863 - 
  18.864 - 	dev_dbg(&pdev->dev, "NSC TPM detected\n");
  18.865 - 	dev_dbg(&pdev->dev,
  18.866 -@@ -361,10 +364,12 @@ static int __init init_nsc(void)
  18.867 - 		 "NSC TPM revision %d\n",
  18.868 - 		 tpm_read_index(nscAddrBase, 0x27) & 0x1F);
  18.869 - 
  18.870 -+	chip->vendor.base = base;
  18.871 -+
  18.872 - 	return 0;
  18.873 - 
  18.874 - err_rel_reg:
  18.875 --	release_region(tpm_nsc.base, 2);
  18.876 -+	release_region(base, 2);
  18.877 - err_unreg_dev:
  18.878 - 	platform_device_unregister(pdev);
  18.879 - err_free_dev:
  18.880 -diff -pruN ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_tis.c ./drivers/char/tpm/tpm_tis.c
  18.881 ---- ../pristine-linux-2.6.16.13/drivers/char/tpm/tpm_tis.c	1969-12-31 19:00:00.000000000 -0500
  18.882 -+++ ./drivers/char/tpm/tpm_tis.c	2006-06-26 18:16:33.000000000 -0400
  18.883 -@@ -0,0 +1,665 @@
  18.884 -+/*
  18.885 -+ * Copyright (C) 2005, 2006 IBM Corporation
  18.886 -+ *
  18.887 -+ * Authors:
  18.888 -+ * Leendert van Doorn <leendert@watson.ibm.com>
  18.889 -+ * Kylene Hall <kjhall@us.ibm.com>
  18.890 -+ *
  18.891 -+ * Device driver for TCG/TCPA TPM (trusted platform module).
  18.892 -+ * Specifications at www.trustedcomputinggroup.org
  18.893 -+ *
  18.894 -+ * This device driver implements the TPM interface as defined in
  18.895 -+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
  18.896 -+ *
  18.897 -+ * This program is free software; you can redistribute it and/or
  18.898 -+ * modify it under the terms of the GNU General Public License as
  18.899 -+ * published by the Free Software Foundation, version 2 of the
  18.900 -+ * License.
  18.901 -+ */
  18.902 -+#include <linux/init.h>
  18.903 -+#include <linux/module.h>
  18.904 -+#include <linux/moduleparam.h>
  18.905 -+#include <linux/pnp.h>
  18.906 -+#include <linux/interrupt.h>
  18.907 -+#include <linux/wait.h>
  18.908 -+#include "tpm.h"
  18.909 -+
  18.910 -+#define TPM_HEADER_SIZE 10
  18.911 -+
  18.912 -+enum tis_access {
  18.913 -+	TPM_ACCESS_VALID = 0x80,
  18.914 -+	TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
  18.915 -+	TPM_ACCESS_REQUEST_PENDING = 0x04,
  18.916 -+	TPM_ACCESS_REQUEST_USE = 0x02,
  18.917 -+};
  18.918 -+
  18.919 -+enum tis_status {
  18.920 -+	TPM_STS_VALID = 0x80,
  18.921 -+	TPM_STS_COMMAND_READY = 0x40,
  18.922 -+	TPM_STS_GO = 0x20,
  18.923 -+	TPM_STS_DATA_AVAIL = 0x10,
  18.924 -+	TPM_STS_DATA_EXPECT = 0x08,
  18.925 -+};
  18.926 -+
  18.927 -+enum tis_int_flags {
  18.928 -+	TPM_GLOBAL_INT_ENABLE = 0x80000000,
  18.929 -+	TPM_INTF_BURST_COUNT_STATIC = 0x100,
  18.930 -+	TPM_INTF_CMD_READY_INT = 0x080,
  18.931 -+	TPM_INTF_INT_EDGE_FALLING = 0x040,
  18.932 -+	TPM_INTF_INT_EDGE_RISING = 0x020,
  18.933 -+	TPM_INTF_INT_LEVEL_LOW = 0x010,
  18.934 -+	TPM_INTF_INT_LEVEL_HIGH = 0x008,
  18.935 -+	TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
  18.936 -+	TPM_INTF_STS_VALID_INT = 0x002,
  18.937 -+	TPM_INTF_DATA_AVAIL_INT = 0x001,
  18.938 -+};
  18.939 -+
  18.940 -+enum tis_defaults {
  18.941 -+	TIS_MEM_BASE = 0xFED40000,
  18.942 -+	TIS_MEM_LEN = 0x5000,
  18.943 -+	TIS_SHORT_TIMEOUT = 750,	/* ms */
  18.944 -+	TIS_LONG_TIMEOUT = 2000,	/* 2 sec */
  18.945 -+};
  18.946 -+
  18.947 -+#define	TPM_ACCESS(l)			(0x0000 | ((l) << 12))
  18.948 -+#define	TPM_INT_ENABLE(l)		(0x0008 | ((l) << 12))
  18.949 -+#define	TPM_INT_VECTOR(l)		(0x000C | ((l) << 12))
  18.950 -+#define	TPM_INT_STATUS(l)		(0x0010 | ((l) << 12))
  18.951 -+#define	TPM_INTF_CAPS(l)		(0x0014 | ((l) << 12))
  18.952 -+#define	TPM_STS(l)			(0x0018 | ((l) << 12))
  18.953 -+#define	TPM_DATA_FIFO(l)		(0x0024 | ((l) << 12))
  18.954 -+
  18.955 -+#define	TPM_DID_VID(l)			(0x0F00 | ((l) << 12))
  18.956 -+#define	TPM_RID(l)			(0x0F04 | ((l) << 12))
  18.957 -+
  18.958 -+static LIST_HEAD(tis_chips);
  18.959 -+static DEFINE_SPINLOCK(tis_lock);
  18.960 -+
  18.961 -+static int check_locality(struct tpm_chip *chip, int l)
  18.962 -+{
  18.963 -+	if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  18.964 -+	     (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
  18.965 -+	    (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
  18.966 -+		return chip->vendor.locality = l;
  18.967 -+
  18.968 -+	return -1;
  18.969 -+}
  18.970 -+
  18.971 -+static void release_locality(struct tpm_chip *chip, int l, int force)
  18.972 -+{
  18.973 -+	if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  18.974 -+		      (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
  18.975 -+	    (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
  18.976 -+		iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
  18.977 -+			 chip->vendor.iobase + TPM_ACCESS(l));
  18.978 -+}
  18.979 -+
  18.980 -+static int request_locality(struct tpm_chip *chip, int l)
  18.981 -+{
  18.982 -+	unsigned long stop;
  18.983 -+	long rc;
  18.984 -+
  18.985 -+	if (check_locality(chip, l) >= 0)
  18.986 -+		return l;
  18.987 -+
  18.988 -+	iowrite8(TPM_ACCESS_REQUEST_USE,
  18.989 -+		 chip->vendor.iobase + TPM_ACCESS(l));
  18.990 -+
  18.991 -+	if (chip->vendor.irq) {
  18.992 -+		rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
  18.993 -+						      (check_locality
  18.994 -+						       (chip, l) >= 0),
  18.995 -+						      chip->vendor.timeout_a);
  18.996 -+		if (rc > 0)
  18.997 -+			return l;
  18.998 -+
  18.999 -+	} else {
 18.1000 -+		/* wait for burstcount */
 18.1001 -+		stop = jiffies + chip->vendor.timeout_a;
 18.1002 -+		do {
 18.1003 -+			if (check_locality(chip, l) >= 0)
 18.1004 -+				return l;
 18.1005 -+			msleep(TPM_TIMEOUT);
 18.1006 -+		}
 18.1007 -+		while (time_before(jiffies, stop));
 18.1008 -+	}
 18.1009 -+	return -1;
 18.1010 -+}
 18.1011 -+
 18.1012 -+static u8 tpm_tis_status(struct tpm_chip *chip)
 18.1013 -+{
 18.1014 -+	return ioread8(chip->vendor.iobase +
 18.1015 -+		       TPM_STS(chip->vendor.locality));
 18.1016 -+}
 18.1017 -+
 18.1018 -+static void tpm_tis_ready(struct tpm_chip *chip)
 18.1019 -+{
 18.1020 -+	/* this causes the current command to be aborted */
 18.1021 -+	iowrite8(TPM_STS_COMMAND_READY,
 18.1022 -+		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
 18.1023 -+}
 18.1024 -+
 18.1025 -+static int get_burstcount(struct tpm_chip *chip)
 18.1026 -+{
 18.1027 -+	unsigned long stop;
 18.1028 -+	int burstcnt;
 18.1029 -+
 18.1030 -+	/* wait for burstcount */
 18.1031 -+	/* which timeout value, spec has 2 answers (c & d) */
 18.1032 -+	stop = jiffies + chip->vendor.timeout_d;
 18.1033 -+	do {
 18.1034 -+		burstcnt = ioread8(chip->vendor.iobase +
 18.1035 -+				   TPM_STS(chip->vendor.locality) + 1);
 18.1036 -+		burstcnt += ioread8(chip->vendor.iobase +
 18.1037 -+				    TPM_STS(chip->vendor.locality) +
 18.1038 -+				    2) << 8;
 18.1039 -+		if (burstcnt)
 18.1040 -+			return burstcnt;
 18.1041 -+		msleep(TPM_TIMEOUT);
 18.1042 -+	} while (time_before(jiffies, stop));
 18.1043 -+	return -EBUSY;
 18.1044 -+}
 18.1045 -+
 18.1046 -+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
 18.1047 -+			 wait_queue_head_t *queue)
 18.1048 -+{
 18.1049 -+	unsigned long stop;
 18.1050 -+	long rc;
 18.1051 -+	u8 status;
 18.1052 -+
 18.1053 -+	/* check current status */
 18.1054 -+	status = tpm_tis_status(chip);
 18.1055 -+	if ((status & mask) == mask)
 18.1056 -+		return 0;
 18.1057 -+
 18.1058 -+	if (chip->vendor.irq) {
 18.1059 -+		rc = wait_event_interruptible_timeout(*queue,
 18.1060 -+						      ((tpm_tis_status
 18.1061 -+							(chip) & mask) ==
 18.1062 -+						       mask), timeout);
 18.1063 -+		if (rc > 0)
 18.1064 -+			return 0;
 18.1065 -+	} else {
 18.1066 -+		stop = jiffies + timeout;
 18.1067 -+		do {
 18.1068 -+			msleep(TPM_TIMEOUT);
 18.1069 -+			status = tpm_tis_status(chip);
 18.1070 -+			if ((status & mask) == mask)
 18.1071 -+				return 0;
 18.1072 -+		} while (time_before(jiffies, stop));
 18.1073 -+	}
 18.1074 -+	return -ETIME;
 18.1075 -+}
 18.1076 -+
 18.1077 -+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
 18.1078 -+{
 18.1079 -+	int size = 0, burstcnt;
 18.1080 -+	while (size < count &&
 18.1081 -+	       wait_for_stat(chip,
 18.1082 -+			     TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 18.1083 -+			     chip->vendor.timeout_c,
 18.1084 -+			     &chip->vendor.read_queue)
 18.1085 -+	       == 0) {
 18.1086 -+		burstcnt = get_burstcount(chip);
 18.1087 -+		for (; burstcnt > 0 && size < count; burstcnt--)
 18.1088 -+			buf[size++] = ioread8(chip->vendor.iobase +
 18.1089 -+					      TPM_DATA_FIFO(chip->vendor.
 18.1090 -+							    locality));
 18.1091 -+	}
 18.1092 -+	return size;
 18.1093 -+}
 18.1094 -+
 18.1095 -+static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 18.1096 -+{
 18.1097 -+	int size = 0;
 18.1098 -+	int expected, status;
 18.1099 -+
 18.1100 -+	if (count < TPM_HEADER_SIZE) {
 18.1101 -+		size = -EIO;
 18.1102 -+		goto out;
 18.1103 -+	}
 18.1104 -+
 18.1105 -+	/* read first 10 bytes, including tag, paramsize, and result */
 18.1106 -+	if ((size =
 18.1107 -+	     recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
 18.1108 -+		dev_err(chip->dev, "Unable to read header\n");
 18.1109 -+		goto out;
 18.1110 -+	}
 18.1111 -+
 18.1112 -+	expected = be32_to_cpu(*(__be32 *) (buf + 2));
 18.1113 -+	if (expected > count) {
 18.1114 -+		size = -EIO;
 18.1115 -+		goto out;
 18.1116 -+	}
 18.1117 -+
 18.1118 -+	if ((size +=
 18.1119 -+	     recv_data(chip, &buf[TPM_HEADER_SIZE],
 18.1120 -+		       expected - TPM_HEADER_SIZE)) < expected) {
 18.1121 -+		dev_err(chip->dev, "Unable to read remainder of result\n");
 18.1122 -+		size = -ETIME;
 18.1123 -+		goto out;
 18.1124 -+	}
 18.1125 -+
 18.1126 -+	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
 18.1127 -+		      &chip->vendor.int_queue);
 18.1128 -+	status = tpm_tis_status(chip);
 18.1129 -+	if (status & TPM_STS_DATA_AVAIL) {	/* retry? */
 18.1130 -+		dev_err(chip->dev, "Error left over data\n");
 18.1131 -+		size = -EIO;
 18.1132 -+		goto out;
 18.1133 -+	}
 18.1134 -+
 18.1135 -+out:
 18.1136 -+	tpm_tis_ready(chip);
 18.1137 -+	release_locality(chip, chip->vendor.locality, 0);
 18.1138 -+	return size;
 18.1139 -+}
 18.1140 -+
 18.1141 -+/*
 18.1142 -+ * If interrupts are used (signaled by an irq set in the vendor structure)
 18.1143 -+ * tpm.c can skip polling for the data to be available as the interrupt is
 18.1144 -+ * waited for here
 18.1145 -+ */
 18.1146 -+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
 18.1147 -+{
 18.1148 -+	int rc, status, burstcnt;
 18.1149 -+	size_t count = 0;
 18.1150 -+	u32 ordinal;
 18.1151 -+
 18.1152 -+	if (request_locality(chip, 0) < 0)
 18.1153 -+		return -EBUSY;
 18.1154 -+
 18.1155 -+	status = tpm_tis_status(chip);
 18.1156 -+	if ((status & TPM_STS_COMMAND_READY) == 0) {
 18.1157 -+		tpm_tis_ready(chip);
 18.1158 -+		if (wait_for_stat
 18.1159 -+		    (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
 18.1160 -+		     &chip->vendor.int_queue) < 0) {
 18.1161 -+			rc = -ETIME;
 18.1162 -+			goto out_err;
 18.1163 -+		}
 18.1164 -+	}
 18.1165 -+
 18.1166 -+	while (count < len - 1) {
 18.1167 -+		burstcnt = get_burstcount(chip);
 18.1168 -+		for (; burstcnt > 0 && count < len - 1; burstcnt--) {
 18.1169 -+			iowrite8(buf[count], chip->vendor.iobase +
 18.1170 -+				 TPM_DATA_FIFO(chip->vendor.locality));
 18.1171 -+			count++;
 18.1172 -+		}
 18.1173 -+
 18.1174 -+		wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
 18.1175 -+			      &chip->vendor.int_queue);
 18.1176 -+		status = tpm_tis_status(chip);
 18.1177 -+		if ((status & TPM_STS_DATA_EXPECT) == 0) {
 18.1178 -+			rc = -EIO;
 18.1179 -+			goto out_err;
 18.1180 -+		}
 18.1181 -+	}
 18.1182 -+
 18.1183 -+	/* write last byte */
 18.1184 -+	iowrite8(buf[count],
 18.1185 -+		 chip->vendor.iobase +
 18.1186 -+		 TPM_DATA_FIFO(chip->vendor.locality));
 18.1187 -+	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
 18.1188 -+		      &chip->vendor.int_queue);
 18.1189 -+	status = tpm_tis_status(chip);
 18.1190 -+	if ((status & TPM_STS_DATA_EXPECT) != 0) {
 18.1191 -+		rc = -EIO;
 18.1192 -+		goto out_err;
 18.1193 -+	}
 18.1194 -+
 18.1195 -+	/* go and do it */
 18.1196 -+	iowrite8(TPM_STS_GO,
 18.1197 -+		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
 18.1198 -+
 18.1199 -+	if (chip->vendor.irq) {
 18.1200 -+		ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
 18.1201 -+		if (wait_for_stat
 18.1202 -+		    (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 18.1203 -+		     tpm_calc_ordinal_duration(chip, ordinal),
 18.1204 -+		     &chip->vendor.read_queue) < 0) {
 18.1205 -+			rc = -ETIME;
 18.1206 -+			goto out_err;
 18.1207 -+		}
 18.1208 -+	}
 18.1209 -+	return len;
 18.1210 -+out_err:
 18.1211 -+	tpm_tis_ready(chip);
 18.1212 -+	release_locality(chip, chip->vendor.locality, 0);
 18.1213 -+	return rc;
 18.1214 -+}
 18.1215 -+
 18.1216 -+static struct file_operations tis_ops = {
 18.1217 -+	.owner = THIS_MODULE,
 18.1218 -+	.llseek = no_llseek,
 18.1219 -+	.open = tpm_open,
 18.1220 -+	.read = tpm_read,
 18.1221 -+	.write = tpm_write,
 18.1222 -+	.release = tpm_release,
 18.1223 -+};
 18.1224 -+
 18.1225 -+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
 18.1226 -+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
 18.1227 -+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
 18.1228 -+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
 18.1229 -+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
 18.1230 -+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
 18.1231 -+		   NULL);
 18.1232 -+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
 18.1233 -+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
 18.1234 -+
 18.1235 -+static struct attribute *tis_attrs[] = {
 18.1236 -+	&dev_attr_pubek.attr,
 18.1237 -+	&dev_attr_pcrs.attr,
 18.1238 -+	&dev_attr_enabled.attr,
 18.1239 -+	&dev_attr_active.attr,
 18.1240 -+	&dev_attr_owned.attr,
 18.1241 -+	&dev_attr_temp_deactivated.attr,
 18.1242 -+	&dev_attr_caps.attr,
 18.1243 -+	&dev_attr_cancel.attr, NULL,
 18.1244 -+};
 18.1245 -+
 18.1246 -+static struct attribute_group tis_attr_grp = {
 18.1247 -+	.attrs = tis_attrs
 18.1248 -+};
 18.1249 -+
 18.1250 -+static struct tpm_vendor_specific tpm_tis = {
 18.1251 -+	.status = tpm_tis_status,
 18.1252 -+	.recv = tpm_tis_recv,
 18.1253 -+	.send = tpm_tis_send,
 18.1254 -+	.cancel = tpm_tis_ready,
 18.1255 -+	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 18.1256 -+	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 18.1257 -+	.req_canceled = TPM_STS_COMMAND_READY,
 18.1258 -+	.attr_group = &tis_attr_grp,
 18.1259 -+	.miscdev = {
 18.1260 -+		    .fops = &tis_ops,},
 18.1261 -+};
 18.1262 -+
 18.1263 -+static irqreturn_t tis_int_probe(int irq, void *dev_id, struct pt_regs *regs)
 18.1264 -+{
 18.1265 -+	struct tpm_chip *chip = (struct tpm_chip *) dev_id;
 18.1266 -+	u32 interrupt;
 18.1267 -+
 18.1268 -+	interrupt = ioread32(chip->vendor.iobase +
 18.1269 -+			     TPM_INT_STATUS(chip->vendor.locality));
 18.1270 -+
 18.1271 -+	if (interrupt == 0)
 18.1272 -+		return IRQ_NONE;
 18.1273 -+
 18.1274 -+	chip->vendor.irq = irq;
 18.1275 -+
 18.1276 -+	/* Clear interrupts handled with TPM_EOI */
 18.1277 -+	iowrite32(interrupt,
 18.1278 -+		  chip->vendor.iobase +
 18.1279 -+		  TPM_INT_STATUS(chip->vendor.locality));
 18.1280 -+	return IRQ_HANDLED;
 18.1281 -+}
 18.1282 -+
 18.1283 -+static irqreturn_t tis_int_handler(int irq, void *dev_id, struct pt_regs *regs)
 18.1284 -+{
 18.1285 -+	struct tpm_chip *chip = (struct tpm_chip *) dev_id;
 18.1286 -+	u32 interrupt;
 18.1287 -+	int i;
 18.1288 -+
 18.1289 -+	interrupt = ioread32(chip->vendor.iobase +
 18.1290 -+			     TPM_INT_STATUS(chip->vendor.locality));
 18.1291 -+
 18.1292 -+	if (interrupt == 0)
 18.1293 -+		return IRQ_NONE;
 18.1294 -+
 18.1295 -+	if (interrupt & TPM_INTF_DATA_AVAIL_INT)
 18.1296 -+		wake_up_interruptible(&chip->vendor.read_queue);
 18.1297 -+	if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
 18.1298 -+		for (i = 0; i < 5; i++)
 18.1299 -+			if (check_locality(chip, i) >= 0)
 18.1300 -+				break;
 18.1301 -+	if (interrupt &
 18.1302 -+	    (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
 18.1303 -+	     TPM_INTF_CMD_READY_INT))
 18.1304 -+		wake_up_interruptible(&chip->vendor.int_queue);
 18.1305 -+
 18.1306 -+	/* Clear interrupts handled with TPM_EOI */
 18.1307 -+	iowrite32(interrupt,
 18.1308 -+		  chip->vendor.iobase +
 18.1309 -+		  TPM_INT_STATUS(chip->vendor.locality));
 18.1310 -+	return IRQ_HANDLED;
 18.1311 -+}
 18.1312 -+
 18.1313 -+static int interrupts = 1;
 18.1314 -+module_param(interrupts, bool, 0444);
 18.1315 -+MODULE_PARM_DESC(interrupts, "Enable interrupts");
 18.1316 -+
 18.1317 -+static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
 18.1318 -+				      const struct pnp_device_id *pnp_id)
 18.1319 -+{
 18.1320 -+	u32 vendor, intfcaps, intmask;
 18.1321 -+	int rc, i;
 18.1322 -+	unsigned long start, len;
 18.1323 -+	struct tpm_chip *chip;
 18.1324 -+
 18.1325 -+	start = pnp_mem_start(pnp_dev, 0);
 18.1326 -+	len = pnp_mem_len(pnp_dev, 0);
 18.1327 -+
 18.1328 -+	if (!start)
 18.1329 -+		start = TIS_MEM_BASE;
 18.1330 -+	if (!len)
 18.1331 -+		len = TIS_MEM_LEN;
 18.1332 -+
 18.1333 -+	if (!(chip = tpm_register_hardware(&pnp_dev->dev, &tpm_tis)))
 18.1334 -+		return -ENODEV;
 18.1335 -+
 18.1336 -+	chip->vendor.iobase = ioremap(start, len);
 18.1337 -+	if (!chip->vendor.iobase) {
 18.1338 -+		rc = -EIO;
 18.1339 -+		goto out_err;
 18.1340 -+	}
 18.1341 -+
 18.1342 -+	vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
 18.1343 -+
 18.1344 -+	/* Default timeouts */
 18.1345 -+	chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
 18.1346 -+	chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
 18.1347 -+	chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
 18.1348 -+	chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
 18.1349 -+
 18.1350 -+	dev_info(&pnp_dev->dev,
 18.1351 -+		 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
 18.1352 -+		 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
 18.1353 -+
 18.1354 -+	/* Figure out the capabilities */
 18.1355 -+	intfcaps =
 18.1356 -+	    ioread32(chip->vendor.iobase +
 18.1357 -+		     TPM_INTF_CAPS(chip->vendor.locality));
 18.1358 -+	dev_dbg(&pnp_dev->dev, "TPM interface capabilities (0x%x):\n",
 18.1359 -+		intfcaps);
 18.1360 -+	if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
 18.1361 -+		dev_dbg(&pnp_dev->dev, "\tBurst Count Static\n");
 18.1362 -+	if (intfcaps & TPM_INTF_CMD_READY_INT)
 18.1363 -+		dev_dbg(&pnp_dev->dev, "\tCommand Ready Int Support\n");
 18.1364 -+	if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
 18.1365 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Edge Falling\n");
 18.1366 -+	if (intfcaps & TPM_INTF_INT_EDGE_RISING)
 18.1367 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Edge Rising\n");
 18.1368 -+	if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
 18.1369 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Level Low\n");
 18.1370 -+	if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
 18.1371 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Level High\n");
 18.1372 -+	if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
 18.1373 -+		dev_dbg(&pnp_dev->dev, "\tLocality Change Int Support\n");
 18.1374 -+	if (intfcaps & TPM_INTF_STS_VALID_INT)
 18.1375 -+		dev_dbg(&pnp_dev->dev, "\tSts Valid Int Support\n");
 18.1376 -+	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
 18.1377 -+		dev_dbg(&pnp_dev->dev, "\tData Avail Int Support\n");
 18.1378 -+
 18.1379 -+	if (request_locality(chip, 0) != 0) {
 18.1380 -+		rc = -ENODEV;
 18.1381 -+		goto out_err;
 18.1382 -+	}
 18.1383 -+
 18.1384 -+	/* INTERRUPT Setup */
 18.1385 -+	init_waitqueue_head(&chip->vendor.read_queue);
 18.1386 -+	init_waitqueue_head(&chip->vendor.int_queue);
 18.1387 -+
 18.1388 -+	intmask =
 18.1389 -+	    ioread32(chip->vendor.iobase +
 18.1390 -+		     TPM_INT_ENABLE(chip->vendor.locality));
 18.1391 -+
 18.1392 -+	intmask |= TPM_INTF_CMD_READY_INT
 18.1393 -+	    | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
 18.1394 -+	    | TPM_INTF_STS_VALID_INT;
 18.1395 -+
 18.1396 -+	iowrite32(intmask,
 18.1397 -+		  chip->vendor.iobase +
 18.1398 -+		  TPM_INT_ENABLE(chip->vendor.locality));
 18.1399 -+	if (interrupts) {
 18.1400 -+		chip->vendor.irq =
 18.1401 -+		    ioread8(chip->vendor.iobase +
 18.1402 -+			    TPM_INT_VECTOR(chip->vendor.locality));
 18.1403 -+
 18.1404 -+		for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
 18.1405 -+			iowrite8(i, chip->vendor.iobase +
 18.1406 -+				    TPM_INT_VECTOR(chip->vendor.locality));
 18.1407 -+			if (request_irq
 18.1408 -+			    (i, tis_int_probe, SA_SHIRQ,
 18.1409 -+			     chip->vendor.miscdev.name, chip) != 0) {
 18.1410 -+				dev_info(chip->dev,
 18.1411 -+					 "Unable to request irq: %d for probe\n",
 18.1412 -+					 i);
 18.1413 -+				continue;
 18.1414 -+			}
 18.1415 -+
 18.1416 -+			/* Clear all existing */
 18.1417 -+			iowrite32(ioread32
 18.1418 -+				  (chip->vendor.iobase +
 18.1419 -+				   TPM_INT_STATUS(chip->vendor.locality)),
 18.1420 -+				  chip->vendor.iobase +
 18.1421 -+				  TPM_INT_STATUS(chip->vendor.locality));
 18.1422 -+
 18.1423 -+			/* Turn on */
 18.1424 -+			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
 18.1425 -+				  chip->vendor.iobase +
 18.1426 -+				  TPM_INT_ENABLE(chip->vendor.locality));
 18.1427 -+
 18.1428 -+			/* Generate Interrupts */
 18.1429 -+			tpm_gen_interrupt(chip);
 18.1430 -+
 18.1431 -+			/* Turn off */
 18.1432 -+			iowrite32(intmask,
 18.1433 -+				  chip->vendor.iobase +
 18.1434 -+				  TPM_INT_ENABLE(chip->vendor.locality));
 18.1435 -+			free_irq(i, chip);
 18.1436 -+		}
 18.1437 -+	}
 18.1438 -+	if (chip->vendor.irq) {
 18.1439 -+		iowrite8(chip->vendor.irq,
 18.1440 -+			 chip->vendor.iobase +
 18.1441 -+			 TPM_INT_VECTOR(chip->vendor.locality));
 18.1442 -+		if (request_irq
 18.1443 -+		    (chip->vendor.irq, tis_int_handler, SA_SHIRQ,
 18.1444 -+		     chip->vendor.miscdev.name, chip) != 0) {
 18.1445 -+			dev_info(chip->dev,
 18.1446 -+				 "Unable to request irq: %d for use\n",
 18.1447 -+				 chip->vendor.irq);
 18.1448 -+			chip->vendor.irq = 0;
 18.1449 -+		} else {
 18.1450 -+			/* Clear all existing */
 18.1451 -+			iowrite32(ioread32
 18.1452 -+				  (chip->vendor.iobase +
 18.1453 -+				   TPM_INT_STATUS(chip->vendor.locality)),
 18.1454 -+				  chip->vendor.iobase +
 18.1455 -+				  TPM_INT_STATUS(chip->vendor.locality));
 18.1456 -+
 18.1457 -+			/* Turn on */
 18.1458 -+			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
 18.1459 -+				  chip->vendor.iobase +
 18.1460 -+				  TPM_INT_ENABLE(chip->vendor.locality));
 18.1461 -+		}
 18.1462 -+	}
 18.1463 -+
 18.1464 -+	INIT_LIST_HEAD(&chip->vendor.list);
 18.1465 -+	spin_lock(&tis_lock);
 18.1466 -+	list_add(&chip->vendor.list, &tis_chips);
 18.1467 -+	spin_unlock(&tis_lock);
 18.1468 -+
 18.1469 -+	tpm_get_timeouts(chip);
 18.1470 -+	tpm_continue_selftest(chip);
 18.1471 -+
 18.1472 -+	return 0;
 18.1473 -+out_err:
 18.1474 -+	if (chip->vendor.iobase)
 18.1475 -+		iounmap(chip->vendor.iobase);
 18.1476 -+	tpm_remove_hardware(chip->dev);
 18.1477 -+	return rc;
 18.1478 -+}
 18.1479 -+
 18.1480 -+static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
 18.1481 -+{
 18.1482 -+	return tpm_pm_suspend(&dev->dev, msg);
 18.1483 -+}
 18.1484 -+
 18.1485 -+static int tpm_tis_pnp_resume(struct pnp_dev *dev)
 18.1486 -+{
 18.1487 -+	return tpm_pm_resume(&dev->dev);
 18.1488 -+}
 18.1489 -+
 18.1490 -+static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
 18.1491 -+	{"PNP0C31", 0},		/* TPM */
 18.1492 -+	{"ATM1200", 0},		/* Atmel */
 18.1493 -+	{"IFX0102", 0},		/* Infineon */
 18.1494 -+	{"BCM0101", 0},		/* Broadcom */
 18.1495 -+	{"NSC1200", 0},		/* National */
 18.1496 -+	/* Add new here */
 18.1497 -+	{"", 0},		/* User Specified */
 18.1498 -+	{"", 0}			/* Terminator */
 18.1499 -+};
 18.1500 -+
 18.1501 -+static struct pnp_driver tis_pnp_driver = {
 18.1502 -+	.name = "tpm_tis",
 18.1503 -+	.id_table = tpm_pnp_tbl,
 18.1504 -+	.probe = tpm_tis_pnp_init,
 18.1505 -+	.suspend = tpm_tis_pnp_suspend,
 18.1506 -+	.resume = tpm_tis_pnp_resume,
 18.1507 -+};
 18.1508 -+
 18.1509 -+#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
 18.1510 -+module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
 18.1511 -+		    sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
 18.1512 -+MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
 18.1513 -+
 18.1514 -+static int __init init_tis(void)
 18.1515 -+{
 18.1516 -+	return pnp_register_driver(&tis_pnp_driver);
 18.1517 -+}
 18.1518 -+
 18.1519 -+static void __exit cleanup_tis(void)
 18.1520 -+{
 18.1521 -+	struct tpm_vendor_specific *i, *j;
 18.1522 -+	struct tpm_chip *chip;
 18.1523 -+	spin_lock(&tis_lock);
 18.1524 -+	list_for_each_entry_safe(i, j, &tis_chips, list) {
 18.1525 -+		chip = to_tpm_chip(i);
 18.1526 -+		iowrite32(~TPM_GLOBAL_INT_ENABLE &
 18.1527 -+			  ioread32(chip->vendor.iobase +
 18.1528 -+				   TPM_INT_ENABLE(chip->vendor.
 18.1529 -+						  locality)),
 18.1530 -+			  chip->vendor.iobase +
 18.1531 -+			  TPM_INT_ENABLE(chip->vendor.locality));
 18.1532 -+		release_locality(chip, chip->vendor.locality, 1);
 18.1533 -+		if (chip->vendor.irq)
 18.1534 -+			free_irq(chip->vendor.irq, chip);
 18.1535 -+		iounmap(i->iobase);
 18.1536 -+		list_del(&i->list);
 18.1537 -+		tpm_remove_hardware(chip->dev);
 18.1538 -+	}
 18.1539 -+	spin_unlock(&tis_lock);
 18.1540 -+	pnp_unregister_driver(&tis_pnp_driver);
 18.1541 -+}
 18.1542 -+
 18.1543 -+module_init(init_tis);
 18.1544 -+module_exit(cleanup_tis);
 18.1545 -+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
 18.1546 -+MODULE_DESCRIPTION("TPM Driver");
 18.1547 -+MODULE_VERSION("2.0");
 18.1548 -+MODULE_LICENSE("GPL");
 18.1549 -
    19.1 --- a/patches/linux-2.6.16.13/x86-elfnote-as-preprocessor-macro.patch	Tue Sep 19 14:26:22 2006 +0100
    19.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.3 @@ -1,44 +0,0 @@
    19.4 -
    19.5 -diff -r 4b7cd997c08f include/linux/elfnote.h
    19.6 ---- a/include/linux/elfnote.h	Wed Aug 23 11:48:46 2006 +0100
    19.7 -+++ b/include/linux/elfnote.h	Wed Aug 23 12:44:27 2006 +0100
    19.8 -@@ -31,22 +31,24 @@
    19.9 - /*
   19.10 -  * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
   19.11 -  * turn out to be the same size and shape), followed by the name and
   19.12 -- * desc data with appropriate padding.  The 'desc' argument includes
   19.13 -- * the assembler pseudo op defining the type of the data: .asciz
   19.14 -- * "hello, world"
   19.15 -+ * desc data with appropriate padding.  The 'desctype' argument is the
   19.16 -+ * assembler pseudo op defining the type of the data e.g. .asciz while
   19.17 -+ * 'descdata' is the data itself e.g.  "hello, world".
   19.18 -+ *
   19.19 -+ * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
   19.20 -+ *      ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
   19.21 -  */
   19.22 --.macro ELFNOTE name type desc:vararg
   19.23 --.pushsection ".note.\name"
   19.24 --  .align 4
   19.25 --  .long 2f - 1f			/* namesz */
   19.26 --  .long 4f - 3f			/* descsz */
   19.27 --  .long \type
   19.28 --1:.asciz "\name"
   19.29 --2:.align 4
   19.30 --3:\desc
   19.31 --4:.align 4
   19.32 --.popsection
   19.33 --.endm
   19.34 -+#define ELFNOTE(name, type, desctype, descdata)	\
   19.35 -+.pushsection .note.name			;	\
   19.36 -+  .align 4				;	\
   19.37 -+  .long 2f - 1f		/* namesz */	;	\
   19.38 -+  .long 4f - 3f		/* descsz */	;	\
   19.39 -+  .long type				;	\
   19.40 -+1:.asciz "name"				;	\
   19.41 -+2:.align 4				;	\
   19.42 -+3:desctype descdata			;	\
   19.43 -+4:.align 4				;	\
   19.44 -+.popsection				;
   19.45 - #else	/* !__ASSEMBLER__ */
   19.46 - #include <linux/elf.h>
   19.47 - /*
    20.1 --- a/patches/linux-2.6.16.13/x86-increase-interrupt-vector-range.patch	Tue Sep 19 14:26:22 2006 +0100
    20.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.3 @@ -1,89 +0,0 @@
    20.4 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/kernel/entry.S ./arch/i386/kernel/entry.S
    20.5 ---- ../pristine-linux-2.6.16.13/arch/i386/kernel/entry.S	2006-05-02 22:38:44.000000000 +0100
    20.6 -+++ ./arch/i386/kernel/entry.S	2006-05-04 17:41:49.000000000 +0100
    20.7 -@@ -406,7 +406,7 @@ vector=0
    20.8 - ENTRY(irq_entries_start)
    20.9 - .rept NR_IRQS
   20.10 - 	ALIGN
   20.11 --1:	pushl $vector-256
   20.12 -+1:	pushl $~(vector)
   20.13 - 	jmp common_interrupt
   20.14 - .data
   20.15 - 	.long 1b
   20.16 -@@ -423,7 +423,7 @@ common_interrupt:
   20.17 - 
   20.18 - #define BUILD_INTERRUPT(name, nr)	\
   20.19 - ENTRY(name)				\
   20.20 --	pushl $nr-256;			\
   20.21 -+	pushl $~(nr);			\
   20.22 - 	SAVE_ALL			\
   20.23 - 	movl %esp,%eax;			\
   20.24 - 	call smp_/**/name;		\
   20.25 -diff -pruN ../pristine-linux-2.6.16.13/arch/i386/kernel/irq.c ./arch/i386/kernel/irq.c
   20.26 ---- ../pristine-linux-2.6.16.13/arch/i386/kernel/irq.c	2006-05-02 22:38:44.000000000 +0100
   20.27 -+++ ./arch/i386/kernel/irq.c	2006-05-04 17:41:49.000000000 +0100
   20.28 -@@ -53,8 +53,8 @@ static union irq_ctx *softirq_ctx[NR_CPU
   20.29 -  */
   20.30 - fastcall unsigned int do_IRQ(struct pt_regs *regs)
   20.31 - {	
   20.32 --	/* high bits used in ret_from_ code */
   20.33 --	int irq = regs->orig_eax & 0xff;
   20.34 -+	/* high bit used in ret_from_ code */
   20.35 -+	int irq = ~regs->orig_eax;
   20.36 - #ifdef CONFIG_4KSTACKS
   20.37 - 	union irq_ctx *curctx, *irqctx;
   20.38 - 	u32 *isp;
   20.39 -diff -pruN ../pristine-linux-2.6.16.13/arch/x86_64/kernel/entry.S ./arch/x86_64/kernel/entry.S
   20.40 ---- ../pristine-linux-2.6.16.13/arch/x86_64/kernel/entry.S	2006-05-02 22:38:44.000000000 +0100
   20.41 -+++ ./arch/x86_64/kernel/entry.S	2006-05-04 17:41:49.000000000 +0100
   20.42 -@@ -601,7 +601,7 @@ retint_kernel:	
   20.43 -  */		
   20.44 - 	.macro apicinterrupt num,func
   20.45 - 	INTR_FRAME
   20.46 --	pushq $\num-256
   20.47 -+	pushq $~(\num)
   20.48 - 	CFI_ADJUST_CFA_OFFSET 8
   20.49 - 	interrupt \func
   20.50 - 	jmp ret_from_intr
   20.51 -diff -pruN ../pristine-linux-2.6.16.13/arch/x86_64/kernel/irq.c ./arch/x86_64/kernel/irq.c
   20.52 ---- ../pristine-linux-2.6.16.13/arch/x86_64/kernel/irq.c	2006-05-02 22:38:44.000000000 +0100
   20.53 -+++ ./arch/x86_64/kernel/irq.c	2006-05-04 17:41:49.000000000 +0100
   20.54 -@@ -96,8 +96,8 @@ skip:
   20.55 -  */
   20.56 - asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
   20.57 - {	
   20.58 --	/* high bits used in ret_from_ code  */
   20.59 --	unsigned irq = regs->orig_rax & 0xff;
   20.60 -+	/* high bit used in ret_from_ code  */
   20.61 -+	unsigned irq = ~regs->orig_rax;
   20.62 - 
   20.63 - 	exit_idle();
   20.64 - 	irq_enter();
   20.65 -diff -pruN ../pristine-linux-2.6.16.13/arch/x86_64/kernel/smp.c ./arch/x86_64/kernel/smp.c
   20.66 ---- ../pristine-linux-2.6.16.13/arch/x86_64/kernel/smp.c	2006-05-02 22:38:44.000000000 +0100
   20.67 -+++ ./arch/x86_64/kernel/smp.c	2006-05-04 17:41:49.000000000 +0100
   20.68 -@@ -135,10 +135,10 @@ asmlinkage void smp_invalidate_interrupt
   20.69 - 
   20.70 - 	cpu = smp_processor_id();
   20.71 - 	/*
   20.72 --	 * orig_rax contains the interrupt vector - 256.
   20.73 -+	 * orig_rax contains the negated interrupt vector.
   20.74 - 	 * Use that to determine where the sender put the data.
   20.75 - 	 */
   20.76 --	sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
   20.77 -+	sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
   20.78 - 	f = &per_cpu(flush_state, sender);
   20.79 - 
   20.80 - 	if (!cpu_isset(cpu, f->flush_cpumask))
   20.81 -diff -pruN ../pristine-linux-2.6.16.13/include/asm-x86_64/hw_irq.h ./include/asm-x86_64/hw_irq.h
   20.82 ---- ../pristine-linux-2.6.16.13/include/asm-x86_64/hw_irq.h	2006-05-02 22:38:44.000000000 +0100
   20.83 -+++ ./include/asm-x86_64/hw_irq.h	2006-05-04 17:41:49.000000000 +0100
   20.84 -@@ -127,7 +127,7 @@ asmlinkage void IRQ_NAME(nr); \
   20.85 - __asm__( \
   20.86 - "\n.p2align\n" \
   20.87 - "IRQ" #nr "_interrupt:\n\t" \
   20.88 --	"push $" #nr "-256 ; " \
   20.89 -+	"push $~(" #nr ") ; " \
   20.90 - 	"jmp common_interrupt");
   20.91 - 
   20.92 - #if defined(CONFIG_X86_IO_APIC)
    21.1 --- a/patches/linux-2.6.16.13/x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch	Tue Sep 19 14:26:22 2006 +0100
    21.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.3 @@ -1,174 +0,0 @@
    21.4 -Taken from 2.6.18-rc4-mm1.
    21.5 -
    21.6 -From: Jeremy Fitzhardinge <jeremy@xensource.com>
    21.7 -
    21.8 -This patch will pack any .note.* section into a PT_NOTE segment in the output
    21.9 -file.
   21.10 -
   21.11 -To do this, we tell ld that we need a PT_NOTE segment.  This requires us to
   21.12 -start explicitly mapping sections to segments, so we also need to explicitly
   21.13 -create PT_LOAD segments for text and data, and map the sections to them
   21.14 -appropriately.  Fortunately, each section will default to its previous
   21.15 -section's segment, so it doesn't take many changes to vmlinux.lds.S.
   21.16 -
   21.17 -This only changes i386 for now, but I presume the corresponding changes for
   21.18 -other architectures will be as simple.
   21.19 -
   21.20 -This change also adds <linux/elfnote.h>, which defines C and Assembler macros
   21.21 -for actually creating ELF notes.
   21.22 -
   21.23 -Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
   21.24 -Cc: Eric W. Biederman <ebiederm@xmission.com>
   21.25 -Cc: Hollis Blanchard <hollisb@us.ibm.com>
   21.26 -Signed-off-by: Andrew Morton <akpm@osdl.org>
   21.27 ----
   21.28 -
   21.29 - arch/i386/kernel/vmlinux.lds.S    |   12 +++
   21.30 - include/asm-generic/vmlinux.lds.h |    3 
   21.31 - include/linux/elfnote.h           |   88 ++++++++++++++++++++++++++++
   21.32 - 3 files changed, 101 insertions(+), 2 deletions(-)
   21.33 -
   21.34 -diff -puN arch/i386/kernel/vmlinux.lds.S~x86-put-note-sections-into-a-pt_note-segment-in-vmlinux arch/i386/kernel/vmlinux.lds.S
   21.35 ---- a/arch/i386/kernel/vmlinux.lds.S~x86-put-note-sections-into-a-pt_note-segment-in-vmlinux
   21.36 -+++ a/arch/i386/kernel/vmlinux.lds.S
   21.37 -@@ -13,6 +13,12 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386"
   21.38 - OUTPUT_ARCH(i386)
   21.39 - ENTRY(phys_startup_32)
   21.40 - jiffies = jiffies_64;
   21.41 -+
   21.42 -+PHDRS {
   21.43 -+	text PT_LOAD FLAGS(5);	/* R_E */
   21.44 -+	data PT_LOAD FLAGS(7);	/* RWE */
   21.45 -+	note PT_NOTE FLAGS(4);	/* R__ */
   21.46 -+}
   21.47 - SECTIONS
   21.48 - {
   21.49 -   . = __KERNEL_START;
   21.50 -@@ -26,7 +32,7 @@ SECTIONS
   21.51 - 	KPROBES_TEXT
   21.52 - 	*(.fixup)
   21.53 - 	*(.gnu.warning)
   21.54 --	} = 0x9090
   21.55 -+	} :text = 0x9090
   21.56 - 
   21.57 -   _etext = .;			/* End of text section */
   21.58 - 
   21.59 -@@ -50,7 +56,7 @@ SECTIONS
   21.60 -   .data : AT(ADDR(.data) - LOAD_OFFSET) {	/* Data */
   21.61 - 	*(.data)
   21.62 - 	CONSTRUCTORS
   21.63 --	}
   21.64 -+	} :data
   21.65 - 
   21.66 -   . = ALIGN(4096);
   21.67 -   __nosave_begin = .;
   21.68 -@@ -186,4 +192,6 @@ SECTIONS
   21.69 -   STABS_DEBUG
   21.70 - 
   21.71 -   DWARF_DEBUG
   21.72 -+
   21.73 -+  NOTES
   21.74 - }
   21.75 -diff -puN include/asm-generic/vmlinux.lds.h~x86-put-note-sections-into-a-pt_note-segment-in-vmlinux include/asm-generic/vmlinux.lds.h
   21.76 ---- a/include/asm-generic/vmlinux.lds.h~x86-put-note-sections-into-a-pt_note-segment-in-vmlinux
   21.77 -+++ a/include/asm-generic/vmlinux.lds.h
   21.78 -@@ -194,3 +194,6 @@
   21.79 - 		.stab.index 0 : { *(.stab.index) }			\
   21.80 - 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
   21.81 - 		.comment 0 : { *(.comment) }
   21.82 -+
   21.83 -+#define NOTES								\
   21.84 -+		.notes : { *(.note.*) } :note
   21.85 -diff -puN /dev/null include/linux/elfnote.h
   21.86 ---- /dev/null
   21.87 -+++ a/include/linux/elfnote.h
   21.88 -@@ -0,0 +1,88 @@
   21.89 -+#ifndef _LINUX_ELFNOTE_H
   21.90 -+#define _LINUX_ELFNOTE_H
   21.91 -+/*
   21.92 -+ * Helper macros to generate ELF Note structures, which are put into a
   21.93 -+ * PT_NOTE segment of the final vmlinux image.  These are useful for
   21.94 -+ * including name-value pairs of metadata into the kernel binary (or
   21.95 -+ * modules?) for use by external programs.
   21.96 -+ *
   21.97 -+ * Each note has three parts: a name, a type and a desc.  The name is
   21.98 -+ * intended to distinguish the note's originator, so it would be a
   21.99 -+ * company, project, subsystem, etc; it must be in a suitable form for
  21.100 -+ * use in a section name.  The type is an integer which is used to tag
  21.101 -+ * the data, and is considered to be within the "name" namespace (so
  21.102 -+ * "FooCo"'s type 42 is distinct from "BarProj"'s type 42).  The
  21.103 -+ * "desc" field is the actual data.  There are no constraints on the
  21.104 -+ * desc field's contents, though typically they're fairly small.
  21.105 -+ *
  21.106 -+ * All notes from a given NAME are put into a section named
  21.107 -+ * .note.NAME.  When the kernel image is finally linked, all the notes
  21.108 -+ * are packed into a single .notes section, which is mapped into the
  21.109 -+ * PT_NOTE segment.  Because notes for a given name are grouped into
  21.110 -+ * the same section, they'll all be adjacent the output file.
  21.111 -+ *
  21.112 -+ * This file defines macros for both C and assembler use.  Their
  21.113 -+ * syntax is slightly different, but they're semantically similar.
  21.114 -+ *
  21.115 -+ * See the ELF specification for more detail about ELF notes.
  21.116 -+ */
  21.117 -+
  21.118 -+#ifdef __ASSEMBLER__
  21.119 -+/*
  21.120 -+ * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
  21.121 -+ * turn out to be the same size and shape), followed by the name and
  21.122 -+ * desc data with appropriate padding.  The 'desc' argument includes
  21.123 -+ * the assembler pseudo op defining the type of the data: .asciz
  21.124 -+ * "hello, world"
  21.125 -+ */
  21.126 -+.macro ELFNOTE name type desc:vararg
  21.127 -+.pushsection ".note.\name"
  21.128 -+  .align 4
  21.129 -+  .long 2f - 1f			/* namesz */
  21.130 -+  .long 4f - 3f			/* descsz */
  21.131 -+  .long \type
  21.132 -+1:.asciz "\name"
  21.133 -+2:.align 4
  21.134 -+3:\desc
  21.135 -+4:.align 4
  21.136 -+.popsection
  21.137 -+.endm
  21.138 -+#else	/* !__ASSEMBLER__ */
  21.139 -+#include <linux/elf.h>
  21.140 -+/*
  21.141 -+ * Use an anonymous structure which matches the shape of
  21.142 -+ * Elf{32,64}_Nhdr, but includes the name and desc data.  The size and
  21.143 -+ * type of name and desc depend on the macro arguments.  "name" must
  21.144 -+ * be a literal string, and "desc" must be passed by value.  You may
  21.145 -+ * only define one note per line, since __LINE__ is used to generate
  21.146 -+ * unique symbols.
  21.147 -+ */
  21.148 -+#define _ELFNOTE_PASTE(a,b)	a##b
  21.149 -+#define _ELFNOTE(size, name, unique, type, desc)			\
  21.150 -+	static const struct {						\
  21.151 -+		struct elf##size##_note _nhdr;				\
  21.152 -+		unsigned char _name[sizeof(name)]			\
  21.153 -+		__attribute__((aligned(sizeof(Elf##size##_Word))));	\
  21.154 -+		typeof(desc) _desc					\
  21.155 -+			     __attribute__((aligned(sizeof(Elf##size##_Word)))); \
  21.156 -+	} _ELFNOTE_PASTE(_note_, unique)				\
  21.157 -+		__attribute_used__					\
  21.158 -+		__attribute__((section(".note." name),			\
  21.159 -+			       aligned(sizeof(Elf##size##_Word)),	\
  21.160 -+			       unused)) = {				\
  21.161 -+		{							\
  21.162 -+			sizeof(name),					\
  21.163 -+			sizeof(desc),					\
  21.164 -+			type,						\
  21.165 -+		},							\
  21.166 -+		name,							\
  21.167 -+		desc							\
  21.168 -+	}
  21.169 -+#define ELFNOTE(size, name, type, desc)		\
  21.170 -+	_ELFNOTE(size, name, __LINE__, type, desc)
  21.171 -+
  21.172 -+#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
  21.173 -+#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
  21.174 -+#endif	/* __ASSEMBLER__ */
  21.175 -+
  21.176 -+#endif /* _LINUX_ELFNOTE_H */
  21.177 -_
    22.1 --- a/patches/linux-2.6.16.13/x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch	Tue Sep 19 14:26:22 2006 +0100
    22.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.3 @@ -1,83 +0,0 @@
    22.4 ---- a/arch/x86_64/kernel/vmlinux.lds.S
    22.5 -+++ b/arch/x86_64/kernel/vmlinux.lds.S
    22.6 -@@ -13,6 +13,12 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86
    22.7 - OUTPUT_ARCH(i386:x86-64)
    22.8 - ENTRY(phys_startup_64)
    22.9 - jiffies_64 = jiffies;
   22.10 -+PHDRS {
   22.11 -+	text PT_LOAD FLAGS(5);	/* R_E */
   22.12 -+	data PT_LOAD FLAGS(7);	/* RWE */
   22.13 -+	user PT_LOAD FLAGS(7);	/* RWE */
   22.14 -+	note PT_NOTE FLAGS(4);	/* R__ */
   22.15 -+}
   22.16 - SECTIONS
   22.17 - {
   22.18 -   . = __START_KERNEL;
   22.19 -@@ -31,7 +37,7 @@ SECTIONS
   22.20 - 	KPROBES_TEXT
   22.21 - 	*(.fixup)
   22.22 - 	*(.gnu.warning)
   22.23 --	} = 0x9090
   22.24 -+	} :text = 0x9090
   22.25 -   				/* out-of-line lock text */
   22.26 -   .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
   22.27 - 
   22.28 -@@ -57,17 +63,10 @@ #endif
   22.29 -   .data : AT(ADDR(.data) - LOAD_OFFSET) {
   22.30 - 	*(.data)
   22.31 - 	CONSTRUCTORS
   22.32 --	}
   22.33 -+	} :data
   22.34 - 
   22.35 -   _edata = .;			/* End of data section */
   22.36 - 
   22.37 --  __bss_start = .;		/* BSS */
   22.38 --  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
   22.39 --	*(.bss.page_aligned)	
   22.40 --	*(.bss)
   22.41 --	}
   22.42 --  __bss_stop = .;
   22.43 --
   22.44 -   . = ALIGN(PAGE_SIZE);
   22.45 -   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
   22.46 -   .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
   22.47 -@@ -89,7 +88,7 @@ #define VVIRT_OFFSET (VSYSCALL_ADDR - VS
   22.48 - #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
   22.49 - 
   22.50 -   . = VSYSCALL_ADDR;
   22.51 --  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
   22.52 -+  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
   22.53 -   __vsyscall_0 = VSYSCALL_VIRT_ADDR;
   22.54 - 
   22.55 -   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
   22.56 -@@ -132,7 +131,7 @@ #undef VVIRT
   22.57 -   . = ALIGN(8192);		/* init_task */
   22.58 -   .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
   22.59 - 	*(.data.init_task)
   22.60 --  }
   22.61 -+  } :data
   22.62 - 
   22.63 -   . = ALIGN(4096);
   22.64 -   .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
   22.65 -@@ -222,6 +221,14 @@ SECTIONS
   22.66 -   . = ALIGN(4096);
   22.67 -   __nosave_end = .;
   22.68 - 
   22.69 -+  __bss_start = .;		/* BSS */
   22.70 -+  . = ALIGN(4096);
   22.71 -+  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
   22.72 -+	*(.bss.page_aligned)
   22.73 -+	*(.bss)
   22.74 -+	}
   22.75 -+  __bss_stop = .;
   22.76 -+
   22.77 -   _end = . ;
   22.78 - 
   22.79 -   /* Sections to be discarded */
   22.80 -@@ -235,4 +242,6 @@ #endif
   22.81 -   STABS_DEBUG
   22.82 - 
   22.83 -   DWARF_DEBUG
   22.84 -+
   22.85 -+  NOTES
   22.86 - }
    23.1 --- a/patches/linux-2.6.16.13/xen-hotplug.patch	Tue Sep 19 14:26:22 2006 +0100
    23.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.3 @@ -1,11 +0,0 @@
    23.4 ---- ../pristine-linux-2.6.16.13/fs/proc/proc_misc.c	2006-05-02 22:38:44.000000000 +0100
    23.5 -+++ ./fs/proc/proc_misc.c	2006-05-22 15:29:34.000000000 +0100
    23.6 -@@ -433,7 +433,7 @@ static int show_stat(struct seq_file *p,
    23.7 - 		(unsigned long long)cputime64_to_clock_t(irq),
    23.8 - 		(unsigned long long)cputime64_to_clock_t(softirq),
    23.9 - 		(unsigned long long)cputime64_to_clock_t(steal));
   23.10 --	for_each_online_cpu(i) {
   23.11 -+	for_each_cpu(i) {
   23.12 - 
   23.13 - 		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
   23.14 - 		user = kstat_cpu(i).cpustat.user;
    24.1 --- a/patches/linux-2.6.16.13/xenoprof-generic.patch	Tue Sep 19 14:26:22 2006 +0100
    24.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.3 @@ -1,615 +0,0 @@
    24.4 -diff -Naur orig/drivers/oprofile/buffer_sync.c new/drivers/oprofile/buffer_sync.c
    24.5 ---- orig/drivers/oprofile/buffer_sync.c	2006-05-02 14:38:44.000000000 -0700
    24.6 -+++ new/drivers/oprofile/buffer_sync.c	2006-07-06 18:19:05.000000000 -0700
    24.7 -@@ -6,6 +6,10 @@
    24.8 -  *
    24.9 -  * @author John Levon <levon@movementarian.org>
   24.10 -  *
   24.11 -+ * Modified by Aravind Menon for Xen
   24.12 -+ * These modifications are:
   24.13 -+ * Copyright (C) 2005 Hewlett-Packard Co.
   24.14 -+ *
   24.15 -  * This is the core of the buffer management. Each
   24.16 -  * CPU buffer is processed and entered into the
   24.17 -  * global event buffer. Such processing is necessary
   24.18 -@@ -275,15 +279,31 @@
   24.19 - 	last_cookie = INVALID_COOKIE;
   24.20 - }
   24.21 - 
   24.22 --static void add_kernel_ctx_switch(unsigned int in_kernel)
   24.23 -+static void add_cpu_mode_switch(unsigned int cpu_mode)
   24.24 - {
   24.25 - 	add_event_entry(ESCAPE_CODE);
   24.26 --	if (in_kernel)
   24.27 --		add_event_entry(KERNEL_ENTER_SWITCH_CODE); 
   24.28 --	else
   24.29 --		add_event_entry(KERNEL_EXIT_SWITCH_CODE); 
   24.30 -+	switch (cpu_mode) {
   24.31 -+	case CPU_MODE_USER:
   24.32 -+		add_event_entry(USER_ENTER_SWITCH_CODE);
   24.33 -+		break;
   24.34 -+	case CPU_MODE_KERNEL:
   24.35 -+		add_event_entry(KERNEL_ENTER_SWITCH_CODE);
   24.36 -+		break;
   24.37 -+	case CPU_MODE_XEN:
   24.38 -+		add_event_entry(XEN_ENTER_SWITCH_CODE);
   24.39 -+	  	break;
   24.40 -+	default:
   24.41 -+		break;
   24.42 -+	}
   24.43 - }
   24.44 -- 
   24.45 -+
   24.46 -+static void add_domain_switch(unsigned long domain_id)
   24.47 -+{
   24.48 -+	add_event_entry(ESCAPE_CODE);
   24.49 -+	add_event_entry(DOMAIN_SWITCH_CODE);
   24.50 -+	add_event_entry(domain_id);
   24.51 -+}
   24.52 -+
   24.53 - static void
   24.54 - add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
   24.55 - {
   24.56 -@@ -348,9 +368,9 @@
   24.57 -  * for later lookup from userspace.
   24.58 -  */
   24.59 - static int
   24.60 --add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
   24.61 -+add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
   24.62 - {
   24.63 --	if (in_kernel) {
   24.64 -+	if (cpu_mode >= CPU_MODE_KERNEL) {
   24.65 - 		add_sample_entry(s->eip, s->event);
   24.66 - 		return 1;
   24.67 - 	} else if (mm) {
   24.68 -@@ -496,10 +516,11 @@
   24.69 - 	struct mm_struct *mm = NULL;
   24.70 - 	struct task_struct * new;
   24.71 - 	unsigned long cookie = 0;
   24.72 --	int in_kernel = 1;
   24.73 -+	int cpu_mode = 1;
   24.74 - 	unsigned int i;
   24.75 - 	sync_buffer_state state = sb_buffer_start;
   24.76 - 	unsigned long available;
   24.77 -+	int domain_switch = 0;
   24.78 - 
   24.79 - 	down(&buffer_sem);
   24.80 -  
   24.81 -@@ -512,16 +533,18 @@
   24.82 - 	for (i = 0; i < available; ++i) {
   24.83 - 		struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
   24.84 -  
   24.85 --		if (is_code(s->eip)) {
   24.86 --			if (s->event <= CPU_IS_KERNEL) {
   24.87 --				/* kernel/userspace switch */
   24.88 --				in_kernel = s->event;
   24.89 -+		if (is_code(s->eip) && !domain_switch) {
   24.90 -+			if (s->event <= CPU_MODE_XEN) {
   24.91 -+				/* xen/kernel/userspace switch */
   24.92 -+				cpu_mode = s->event;
   24.93 - 				if (state == sb_buffer_start)
   24.94 - 					state = sb_sample_start;
   24.95 --				add_kernel_ctx_switch(s->event);
   24.96 -+				add_cpu_mode_switch(s->event);
   24.97 - 			} else if (s->event == CPU_TRACE_BEGIN) {
   24.98 - 				state = sb_bt_start;
   24.99 - 				add_trace_begin();
  24.100 -+			} else if (s->event == CPU_DOMAIN_SWITCH) {
  24.101 -+					domain_switch = 1;				
  24.102 - 			} else {
  24.103 - 				struct mm_struct * oldmm = mm;
  24.104 - 
  24.105 -@@ -535,11 +558,16 @@
  24.106 - 				add_user_ctx_switch(new, cookie);
  24.107 - 			}
  24.108 - 		} else {
  24.109 --			if (state >= sb_bt_start &&
  24.110 --			    !add_sample(mm, s, in_kernel)) {
  24.111 --				if (state == sb_bt_start) {
  24.112 --					state = sb_bt_ignore;
  24.113 --					atomic_inc(&oprofile_stats.bt_lost_no_mapping);
  24.114 -+			if (domain_switch) {
  24.115 -+				add_domain_switch(s->eip);
  24.116 -+				domain_switch = 0;
  24.117 -+			} else {
  24.118 -+				if (state >= sb_bt_start &&
  24.119 -+				    !add_sample(mm, s, cpu_mode)) {
  24.120 -+					if (state == sb_bt_start) {
  24.121 -+						state = sb_bt_ignore;
  24.122 -+						atomic_inc(&oprofile_stats.bt_lost_no_mapping);
  24.123 -+					}
  24.124 - 				}
  24.125 - 			}
  24.126 - 		}
  24.127 -diff -Naur orig/drivers/oprofile/cpu_buffer.c new/drivers/oprofile/cpu_buffer.c
  24.128 ---- orig/drivers/oprofile/cpu_buffer.c	2006-05-02 14:38:44.000000000 -0700
  24.129 -+++ new/drivers/oprofile/cpu_buffer.c	2006-07-06 18:19:05.000000000 -0700
  24.130 -@@ -6,6 +6,10 @@
  24.131 -  *
  24.132 -  * @author John Levon <levon@movementarian.org>
  24.133 -  *
  24.134 -+ * Modified by Aravind Menon for Xen
  24.135 -+ * These modifications are:
  24.136 -+ * Copyright (C) 2005 Hewlett-Packard Co.
  24.137 -+ *
  24.138 -  * Each CPU has a local buffer that stores PC value/event
  24.139 -  * pairs. We also log context switches when we notice them.
  24.140 -  * Eventually each CPU's buffer is processed into the global
  24.141 -@@ -34,6 +38,8 @@
  24.142 - #define DEFAULT_TIMER_EXPIRE (HZ / 10)
  24.143 - static int work_enabled;
  24.144 - 
  24.145 -+static int32_t current_domain = COORDINATOR_DOMAIN;
  24.146 -+
  24.147 - void free_cpu_buffers(void)
  24.148 - {
  24.149 - 	int i;
  24.150 -@@ -58,7 +64,7 @@
  24.151 - 			goto fail;
  24.152 -  
  24.153 - 		b->last_task = NULL;
  24.154 --		b->last_is_kernel = -1;
  24.155 -+		b->last_cpu_mode = -1;
  24.156 - 		b->tracing = 0;
  24.157 - 		b->buffer_size = buffer_size;
  24.158 - 		b->tail_pos = 0;
  24.159 -@@ -114,7 +120,7 @@
  24.160 - 	 * collected will populate the buffer with proper
  24.161 - 	 * values to initialize the buffer
  24.162 - 	 */
  24.163 --	cpu_buf->last_is_kernel = -1;
  24.164 -+	cpu_buf->last_cpu_mode = -1;
  24.165 - 	cpu_buf->last_task = NULL;
  24.166 - }
  24.167 - 
  24.168 -@@ -164,13 +170,13 @@
  24.169 -  * because of the head/tail separation of the writer and reader
  24.170 -  * of the CPU buffer.
  24.171 -  *
  24.172 -- * is_kernel is needed because on some architectures you cannot
  24.173 -+ * cpu_mode is needed because on some architectures you cannot
  24.174 -  * tell if you are in kernel or user space simply by looking at
  24.175 -- * pc. We tag this in the buffer by generating kernel enter/exit
  24.176 -- * events whenever is_kernel changes
  24.177 -+ * pc. We tag this in the buffer by generating kernel/user (and xen)
  24.178 -+ *  enter events whenever cpu_mode changes
  24.179 -  */
  24.180 - static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
  24.181 --		      int is_kernel, unsigned long event)
  24.182 -+		      int cpu_mode, unsigned long event)
  24.183 - {
  24.184 - 	struct task_struct * task;
  24.185 - 
  24.186 -@@ -181,18 +187,18 @@
  24.187 - 		return 0;
  24.188 - 	}
  24.189 - 
  24.190 --	is_kernel = !!is_kernel;
  24.191 --
  24.192 - 	task = current;
  24.193 - 
  24.194 - 	/* notice a switch from user->kernel or vice versa */
  24.195 --	if (cpu_buf->last_is_kernel != is_kernel) {
  24.196 --		cpu_buf->last_is_kernel = is_kernel;
  24.197 --		add_code(cpu_buf, is_kernel);
  24.198 -+	if (cpu_buf->last_cpu_mode != cpu_mode) {
  24.199 -+		cpu_buf->last_cpu_mode = cpu_mode;
  24.200 -+		add_code(cpu_buf, cpu_mode);
  24.201 - 	}
  24.202 --
  24.203 -+	
  24.204 - 	/* notice a task switch */
  24.205 --	if (cpu_buf->last_task != task) {
  24.206 -+	/* if not processing other domain samples */
  24.207 -+	if ((cpu_buf->last_task != task) &&
  24.208 -+	    (current_domain == COORDINATOR_DOMAIN)) {
  24.209 - 		cpu_buf->last_task = task;
  24.210 - 		add_code(cpu_buf, (unsigned long)task);
  24.211 - 	}
  24.212 -@@ -269,6 +275,25 @@
  24.213 - 	add_sample(cpu_buf, pc, 0);
  24.214 - }
  24.215 - 
  24.216 -+int oprofile_add_domain_switch(int32_t domain_id)
  24.217 -+{
  24.218 -+	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
  24.219 -+
  24.220 -+	/* should have space for switching into and out of domain 
  24.221 -+	   (2 slots each) plus one sample and one cpu mode switch */
  24.222 -+	if (((nr_available_slots(cpu_buf) < 6) && 
  24.223 -+	     (domain_id != COORDINATOR_DOMAIN)) ||
  24.224 -+	    (nr_available_slots(cpu_buf) < 2))
  24.225 -+		return 0;
  24.226 -+
  24.227 -+	add_code(cpu_buf, CPU_DOMAIN_SWITCH);
  24.228 -+	add_sample(cpu_buf, domain_id, 0);
  24.229 -+
  24.230 -+	current_domain = domain_id;
  24.231 -+
  24.232 -+	return 1;
  24.233 -+}
  24.234 -+
  24.235 - /*
  24.236 -  * This serves to avoid cpu buffer overflow, and makes sure
  24.237 -  * the task mortuary progresses
  24.238 -diff -Naur orig/drivers/oprofile/cpu_buffer.h new/drivers/oprofile/cpu_buffer.h
  24.239 ---- orig/drivers/oprofile/cpu_buffer.h	2006-05-02 14:38:44.000000000 -0700
  24.240 -+++ new/drivers/oprofile/cpu_buffer.h	2006-07-06 18:19:05.000000000 -0700
  24.241 -@@ -36,7 +36,7 @@
  24.242 - 	volatile unsigned long tail_pos;
  24.243 - 	unsigned long buffer_size;
  24.244 - 	struct task_struct * last_task;
  24.245 --	int last_is_kernel;
  24.246 -+	int last_cpu_mode;
  24.247 - 	int tracing;
  24.248 - 	struct op_sample * buffer;
  24.249 - 	unsigned long sample_received;
  24.250 -@@ -51,7 +51,10 @@
  24.251 - void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
  24.252 - 
  24.253 - /* transient events for the CPU buffer -> event buffer */
  24.254 --#define CPU_IS_KERNEL 1
  24.255 --#define CPU_TRACE_BEGIN 2
  24.256 -+#define CPU_MODE_USER           0
  24.257 -+#define CPU_MODE_KERNEL         1
  24.258 -+#define CPU_MODE_XEN            2
  24.259 -+#define CPU_TRACE_BEGIN         3
  24.260 -+#define CPU_DOMAIN_SWITCH       4
  24.261 - 
  24.262 - #endif /* OPROFILE_CPU_BUFFER_H */
  24.263 -diff -Naur orig/drivers/oprofile/event_buffer.h new/drivers/oprofile/event_buffer.h
  24.264 ---- orig/drivers/oprofile/event_buffer.h	2006-05-02 14:38:44.000000000 -0700
  24.265 -+++ new/drivers/oprofile/event_buffer.h	2006-07-06 18:19:05.000000000 -0700
  24.266 -@@ -29,15 +29,20 @@
  24.267 - #define CPU_SWITCH_CODE 		2
  24.268 - #define COOKIE_SWITCH_CODE 		3
  24.269 - #define KERNEL_ENTER_SWITCH_CODE	4
  24.270 --#define KERNEL_EXIT_SWITCH_CODE		5
  24.271 -+#define USER_ENTER_SWITCH_CODE		5
  24.272 - #define MODULE_LOADED_CODE		6
  24.273 - #define CTX_TGID_CODE			7
  24.274 - #define TRACE_BEGIN_CODE		8
  24.275 - #define TRACE_END_CODE			9
  24.276 -+#define XEN_ENTER_SWITCH_CODE		10
  24.277 -+#define DOMAIN_SWITCH_CODE		11
  24.278 -  
  24.279 - #define INVALID_COOKIE ~0UL
  24.280 - #define NO_COOKIE 0UL
  24.281 - 
  24.282 -+/* Constant used to refer to coordinator domain (Xen) */
  24.283 -+#define COORDINATOR_DOMAIN -1
  24.284 -+
  24.285 - /* add data to the event buffer */
  24.286 - void add_event_entry(unsigned long data);
  24.287 -  
  24.288 -diff -Naur orig/drivers/oprofile/oprof.c new/drivers/oprofile/oprof.c
  24.289 ---- orig/drivers/oprofile/oprof.c	2006-05-02 14:38:44.000000000 -0700
  24.290 -+++ new/drivers/oprofile/oprof.c	2006-07-06 18:19:05.000000000 -0700
  24.291 -@@ -5,6 +5,10 @@
  24.292 -  * @remark Read the file COPYING
  24.293 -  *
  24.294 -  * @author John Levon <levon@movementarian.org>
  24.295 -+ *
  24.296 -+ * Modified by Aravind Menon for Xen
  24.297 -+ * These modifications are:
  24.298 -+ * Copyright (C) 2005 Hewlett-Packard Co.
  24.299 -  */
  24.300 - 
  24.301 - #include <linux/kernel.h>
  24.302 -@@ -19,7 +23,7 @@
  24.303 - #include "cpu_buffer.h"
  24.304 - #include "buffer_sync.h"
  24.305 - #include "oprofile_stats.h"
  24.306 -- 
  24.307 -+
  24.308 - struct oprofile_operations oprofile_ops;
  24.309 - 
  24.310 - unsigned long oprofile_started;
  24.311 -@@ -33,6 +37,32 @@
  24.312 -  */
  24.313 - static int timer = 0;
  24.314 - 
  24.315 -+int oprofile_set_active(int active_domains[], unsigned int adomains)
  24.316 -+{
  24.317 -+	int err;
  24.318 -+
  24.319 -+	if (!oprofile_ops.set_active)
  24.320 -+		return -EINVAL;
  24.321 -+
  24.322 -+	down(&start_sem);
  24.323 -+	err = oprofile_ops.set_active(active_domains, adomains);
  24.324 -+	up(&start_sem);
  24.325 -+	return err;
  24.326 -+}
  24.327 -+
  24.328 -+int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
  24.329 -+{
  24.330 -+	int err;
  24.331 -+
  24.332 -+	if (!oprofile_ops.set_passive)
  24.333 -+		return -EINVAL;
  24.334 -+
  24.335 -+	down(&start_sem);
  24.336 -+	err = oprofile_ops.set_passive(passive_domains, pdomains);
  24.337 -+	up(&start_sem);
  24.338 -+	return err;
  24.339 -+}
  24.340 -+
  24.341 - int oprofile_setup(void)
  24.342 - {
  24.343 - 	int err;
  24.344 -diff -Naur orig/drivers/oprofile/oprof.h new/drivers/oprofile/oprof.h
  24.345 ---- orig/drivers/oprofile/oprof.h	2006-05-02 14:38:44.000000000 -0700
  24.346 -+++ new/drivers/oprofile/oprof.h	2006-07-06 18:19:05.000000000 -0700
  24.347 -@@ -35,5 +35,8 @@
  24.348 - void oprofile_timer_init(struct oprofile_operations * ops);
  24.349 - 
  24.350 - int oprofile_set_backtrace(unsigned long depth);
  24.351 -+
  24.352 -+int oprofile_set_active(int active_domains[], unsigned int adomains);
  24.353 -+int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
  24.354 -  
  24.355 - #endif /* OPROF_H */
  24.356 -diff -Naur orig/drivers/oprofile/oprofile_files.c new/drivers/oprofile/oprofile_files.c
  24.357 ---- orig/drivers/oprofile/oprofile_files.c	2006-05-02 14:38:44.000000000 -0700
  24.358 -+++ new/drivers/oprofile/oprofile_files.c	2006-07-06 18:19:05.000000000 -0700
  24.359 -@@ -5,15 +5,21 @@
  24.360 -  * @remark Read the file COPYING
  24.361 -  *
  24.362 -  * @author John Levon <levon@movementarian.org>
  24.363 -+ *
  24.364 -+ * Modified by Aravind Menon for Xen
  24.365 -+ * These modifications are:
  24.366 -+ * Copyright (C) 2005 Hewlett-Packard Co.	
  24.367 -  */
  24.368 - 
  24.369 - #include <linux/fs.h>
  24.370 - #include <linux/oprofile.h>
  24.371 -+#include <asm/uaccess.h>
  24.372 -+#include <linux/ctype.h>
  24.373 - 
  24.374 - #include "event_buffer.h"
  24.375 - #include "oprofile_stats.h"
  24.376 - #include "oprof.h"
  24.377 -- 
  24.378 -+
  24.379 - unsigned long fs_buffer_size = 131072;
  24.380 - unsigned long fs_cpu_buffer_size = 8192;
  24.381 - unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
  24.382 -@@ -117,11 +123,202 @@
  24.383 - static struct file_operations dump_fops = {
  24.384 - 	.write		= dump_write,
  24.385 - };
  24.386 -- 
  24.387 -+
  24.388 -+#define TMPBUFSIZE 512
  24.389 -+
  24.390 -+static unsigned int adomains = 0;
  24.391 -+static int active_domains[MAX_OPROF_DOMAINS + 1];
  24.392 -+static DEFINE_MUTEX(adom_mutex);
  24.393 -+
  24.394 -+static ssize_t adomain_write(struct file * file, char const __user * buf, 
  24.395 -+			     size_t count, loff_t * offset)
  24.396 -+{
  24.397 -+	char *tmpbuf;
  24.398 -+	char *startp, *endp;
  24.399 -+	int i;
  24.400 -+	unsigned long val;
  24.401 -+	ssize_t retval = count;
  24.402 -+	
  24.403 -+	if (*offset)
  24.404 -+		return -EINVAL;	
  24.405 -+	if (count > TMPBUFSIZE - 1)
  24.406 -+		return -EINVAL;
  24.407 -+
  24.408 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  24.409 -+		return -ENOMEM;
  24.410 -+
  24.411 -+	if (copy_from_user(tmpbuf, buf, count)) {
  24.412 -+		kfree(tmpbuf);
  24.413 -+		return -EFAULT;
  24.414 -+	}
  24.415 -+	tmpbuf[count] = 0;
  24.416 -+
  24.417 -+	mutex_lock(&adom_mutex);
  24.418 -+
  24.419 -+	startp = tmpbuf;
  24.420 -+	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
  24.421 -+	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
  24.422 -+		val = simple_strtoul(startp, &endp, 0);
  24.423 -+		if (endp == startp)
  24.424 -+			break;
  24.425 -+		while (ispunct(*endp) || isspace(*endp))
  24.426 -+			endp++;
  24.427 -+		active_domains[i] = val;
  24.428 -+		if (active_domains[i] != val)
  24.429 -+			/* Overflow, force error below */
  24.430 -+			i = MAX_OPROF_DOMAINS + 1;
  24.431 -+		startp = endp;
  24.432 -+	}
  24.433 -+	/* Force error on trailing junk */
  24.434 -+	adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
  24.435 -+
  24.436 -+	kfree(tmpbuf);
  24.437 -+
  24.438 -+	if (adomains > MAX_OPROF_DOMAINS
  24.439 -+	    || oprofile_set_active(active_domains, adomains)) {
  24.440 -+		adomains = 0;
  24.441 -+		retval = -EINVAL;
  24.442 -+	}
  24.443 -+
  24.444 -+	mutex_unlock(&adom_mutex);
  24.445 -+	return retval;
  24.446 -+}
  24.447 -+
  24.448 -+static ssize_t adomain_read(struct file * file, char __user * buf, 
  24.449 -+			    size_t count, loff_t * offset)
  24.450 -+{
  24.451 -+	char * tmpbuf;
  24.452 -+	size_t len;
  24.453 -+	int i;
  24.454 -+	ssize_t retval;
  24.455 -+
  24.456 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  24.457 -+		return -ENOMEM;
  24.458 -+
  24.459 -+	mutex_lock(&adom_mutex);
  24.460 -+
  24.461 -+	len = 0;
  24.462 -+	for (i = 0; i < adomains; i++)
  24.463 -+		len += snprintf(tmpbuf + len,
  24.464 -+				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
  24.465 -+				"%u ", active_domains[i]);
  24.466 -+	WARN_ON(len > TMPBUFSIZE);
  24.467 -+	if (len != 0 && len <= TMPBUFSIZE)
  24.468 -+		tmpbuf[len-1] = '\n';
  24.469 -+
  24.470 -+	mutex_unlock(&adom_mutex);
  24.471 -+
  24.472 -+	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
  24.473 -+
  24.474 -+	kfree(tmpbuf);
  24.475 -+	return retval;
  24.476 -+}
  24.477 -+
  24.478 -+
  24.479 -+static struct file_operations active_domain_ops = {
  24.480 -+	.read		= adomain_read,
  24.481 -+	.write		= adomain_write,
  24.482 -+};
  24.483 -+
  24.484 -+static unsigned int pdomains = 0;
  24.485 -+static int passive_domains[MAX_OPROF_DOMAINS];
  24.486 -+static DEFINE_MUTEX(pdom_mutex);
  24.487 -+
  24.488 -+static ssize_t pdomain_write(struct file * file, char const __user * buf, 
  24.489 -+			     size_t count, loff_t * offset)
  24.490 -+{
  24.491 -+	char *tmpbuf;
  24.492 -+	char *startp, *endp;
  24.493 -+	int i;
  24.494 -+	unsigned long val;
  24.495 -+	ssize_t retval = count;
  24.496 -+	
  24.497 -+	if (*offset)
  24.498 -+		return -EINVAL;	
  24.499 -+	if (count > TMPBUFSIZE - 1)
  24.500 -+		return -EINVAL;
  24.501 -+
  24.502 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  24.503 -+		return -ENOMEM;
  24.504 -+
  24.505 -+	if (copy_from_user(tmpbuf, buf, count)) {
  24.506 -+		kfree(tmpbuf);
  24.507 -+		return -EFAULT;
  24.508 -+	}
  24.509 -+	tmpbuf[count] = 0;
  24.510 -+
  24.511 -+	mutex_lock(&pdom_mutex);
  24.512 -+
  24.513 -+	startp = tmpbuf;
  24.514 -+	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
  24.515 -+	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
  24.516 -+		val = simple_strtoul(startp, &endp, 0);
  24.517 -+		if (endp == startp)
  24.518 -+			break;
  24.519 -+		while (ispunct(*endp) || isspace(*endp))
  24.520 -+			endp++;
  24.521 -+		passive_domains[i] = val;
  24.522 -+		if (passive_domains[i] != val)
  24.523 -+			/* Overflow, force error below */
  24.524 -+			i = MAX_OPROF_DOMAINS + 1;
  24.525 -+		startp = endp;
  24.526 -+	}
  24.527 -+	/* Force error on trailing junk */
  24.528 -+	pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
  24.529 -+
  24.530 -+	kfree(tmpbuf);
  24.531 -+
  24.532 -+	if (pdomains > MAX_OPROF_DOMAINS
  24.533 -+	    || oprofile_set_passive(passive_domains, pdomains)) {
  24.534 -+		pdomains = 0;
  24.535 -+		retval = -EINVAL;
  24.536 -+	}
  24.537 -+
  24.538 -+	mutex_unlock(&pdom_mutex);
  24.539 -+	return retval;
  24.540 -+}
  24.541 -+
  24.542 -+static ssize_t pdomain_read(struct file * file, char __user * buf, 
  24.543 -+			    size_t count, loff_t * offset)
  24.544 -+{
  24.545 -+	char * tmpbuf;
  24.546 -+	size_t len;
  24.547 -+	int i;
  24.548 -+	ssize_t retval;
  24.549 -+
  24.550 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  24.551 -+		return -ENOMEM;
  24.552 -+
  24.553 -+	mutex_lock(&pdom_mutex);
  24.554 -+
  24.555 -+	len = 0;
  24.556 -+	for (i = 0; i < pdomains; i++)
  24.557 -+		len += snprintf(tmpbuf + len,
  24.558 -+				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
  24.559 -+				"%u ", passive_domains[i]);
  24.560 -+	WARN_ON(len > TMPBUFSIZE);
  24.561 -+	if (len != 0 && len <= TMPBUFSIZE)
  24.562 -+		tmpbuf[len-1] = '\n';
  24.563 -+
  24.564 -+	mutex_unlock(&pdom_mutex);
  24.565 -+
  24.566 -+	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
  24.567 -+
  24.568 -+	kfree(tmpbuf);
  24.569 -+	return retval;
  24.570 -+}
  24.571 -+
  24.572 -+static struct file_operations passive_domain_ops = {
  24.573 -+	.read		= pdomain_read,
  24.574 -+	.write		= pdomain_write,
  24.575 -+};
  24.576 -+
  24.577 - void oprofile_create_files(struct super_block * sb, struct dentry * root)
  24.578 - {
  24.579 - 	oprofilefs_create_file(sb, root, "enable", &enable_fops);
  24.580 - 	oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
  24.581 -+	oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
  24.582 -+	oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
  24.583 - 	oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
  24.584 - 	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
  24.585 - 	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
  24.586 -diff -Naur orig/include/linux/oprofile.h new/include/linux/oprofile.h
  24.587 ---- orig/include/linux/oprofile.h	2006-05-02 14:38:44.000000000 -0700
  24.588 -+++ new/include/linux/oprofile.h	2006-07-06 18:19:31.000000000 -0700
  24.589 -@@ -16,6 +16,8 @@
  24.590 - #include <linux/types.h>
  24.591 - #include <linux/spinlock.h>
  24.592 - #include <asm/atomic.h>
  24.593 -+
  24.594 -+#include <xen/interface/xenoprof.h>
  24.595 -  
  24.596 - struct super_block;
  24.597 - struct dentry;
  24.598 -@@ -27,6 +29,11 @@
  24.599 - 	/* create any necessary configuration files in the oprofile fs.
  24.600 - 	 * Optional. */
  24.601 - 	int (*create_files)(struct super_block * sb, struct dentry * root);
  24.602 -+	/* setup active domains with Xen */
  24.603 -+	int (*set_active)(int *active_domains, unsigned int adomains);
  24.604 -+        /* setup passive domains with Xen */
  24.605 -+        int (*set_passive)(int *passive_domains, unsigned int pdomains);
  24.606 -+	
  24.607 - 	/* Do any necessary interrupt setup. Optional. */
  24.608 - 	int (*setup)(void);
  24.609 - 	/* Do any necessary interrupt shutdown. Optional. */
  24.610 -@@ -68,6 +75,8 @@
  24.611 - /* add a backtrace entry, to be called from the ->backtrace callback */
  24.612 - void oprofile_add_trace(unsigned long eip);
  24.613 - 
  24.614 -+/* add a domain switch entry */
  24.615 -+int oprofile_add_domain_switch(int32_t domain_id);
  24.616 - 
  24.617 - /**
  24.618 -  * Create a file of the given name as a child of the given root, with
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/patches/linux-2.6.16.29/blktap-aio-16_03_06.patch	Tue Sep 19 14:26:47 2006 +0100
    25.3 @@ -0,0 +1,294 @@
    25.4 +diff -pruN ../orig-linux-2.6.16.29/fs/aio.c ./fs/aio.c
    25.5 +--- ../orig-linux-2.6.16.29/fs/aio.c	2006-09-12 19:02:10.000000000 +0100
    25.6 ++++ ./fs/aio.c	2006-09-19 13:58:49.000000000 +0100
    25.7 +@@ -34,6 +34,11 @@
    25.8 + #include <asm/uaccess.h>
    25.9 + #include <asm/mmu_context.h>
   25.10 + 
   25.11 ++#ifdef CONFIG_EPOLL
   25.12 ++#include <linux/poll.h>
   25.13 ++#include <linux/eventpoll.h>
   25.14 ++#endif
   25.15 ++
   25.16 + #if DEBUG > 1
   25.17 + #define dprintk		printk
   25.18 + #else
   25.19 +@@ -1016,6 +1021,10 @@ put_rq:
   25.20 + 	if (waitqueue_active(&ctx->wait))
   25.21 + 		wake_up(&ctx->wait);
   25.22 + 
   25.23 ++#ifdef CONFIG_EPOLL
   25.24 ++	if (ctx->file && waitqueue_active(&ctx->poll_wait))
   25.25 ++		wake_up(&ctx->poll_wait);
   25.26 ++#endif
   25.27 + 	if (ret)
   25.28 + 		put_ioctx(ctx);
   25.29 + 
   25.30 +@@ -1025,6 +1034,8 @@ put_rq:
   25.31 + /* aio_read_evt
   25.32 +  *	Pull an event off of the ioctx's event ring.  Returns the number of 
   25.33 +  *	events fetched (0 or 1 ;-)
   25.34 ++ *	If ent parameter is 0, just returns the number of events that would
   25.35 ++ *	be fetched.
   25.36 +  *	FIXME: make this use cmpxchg.
   25.37 +  *	TODO: make the ringbuffer user mmap()able (requires FIXME).
   25.38 +  */
   25.39 +@@ -1047,13 +1058,18 @@ static int aio_read_evt(struct kioctx *i
   25.40 + 
   25.41 + 	head = ring->head % info->nr;
   25.42 + 	if (head != ring->tail) {
   25.43 +-		struct io_event *evp = aio_ring_event(info, head, KM_USER1);
   25.44 +-		*ent = *evp;
   25.45 +-		head = (head + 1) % info->nr;
   25.46 +-		smp_mb(); /* finish reading the event before updatng the head */
   25.47 +-		ring->head = head;
   25.48 +-		ret = 1;
   25.49 +-		put_aio_ring_event(evp, KM_USER1);
   25.50 ++		if (ent) { /* event requested */
   25.51 ++			struct io_event *evp =
   25.52 ++				aio_ring_event(info, head, KM_USER1);
   25.53 ++			*ent = *evp;
   25.54 ++			head = (head + 1) % info->nr;
   25.55 ++			/* finish reading the event before updatng the head */
   25.56 ++			smp_mb();
   25.57 ++			ring->head = head;
   25.58 ++			ret = 1;
   25.59 ++			put_aio_ring_event(evp, KM_USER1);
   25.60 ++		} else /* only need to know availability */
   25.61 ++			ret = 1;
   25.62 + 	}
   25.63 + 	spin_unlock(&info->ring_lock);
   25.64 + 
   25.65 +@@ -1236,9 +1252,78 @@ static void io_destroy(struct kioctx *io
   25.66 + 
   25.67 + 	aio_cancel_all(ioctx);
   25.68 + 	wait_for_all_aios(ioctx);
   25.69 ++#ifdef CONFIG_EPOLL
   25.70 ++	/* forget the poll file, but it's up to the user to close it */
   25.71 ++	if (ioctx->file) {
   25.72 ++		ioctx->file->private_data = 0;
   25.73 ++		ioctx->file = 0;
   25.74 ++	}
   25.75 ++#endif
   25.76 + 	put_ioctx(ioctx);	/* once for the lookup */
   25.77 + }
   25.78 + 
   25.79 ++#ifdef CONFIG_EPOLL
   25.80 ++
   25.81 ++static int aio_queue_fd_close(struct inode *inode, struct file *file)
   25.82 ++{
   25.83 ++	struct kioctx *ioctx = file->private_data;
   25.84 ++	if (ioctx) {
   25.85 ++		file->private_data = 0;
   25.86 ++		spin_lock_irq(&ioctx->ctx_lock);
   25.87 ++		ioctx->file = 0;
   25.88 ++		spin_unlock_irq(&ioctx->ctx_lock);
   25.89 ++	}
   25.90 ++	return 0;
   25.91 ++}
   25.92 ++
   25.93 ++static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
   25.94 ++{	unsigned int pollflags = 0;
   25.95 ++	struct kioctx *ioctx = file->private_data;
   25.96 ++
   25.97 ++	if (ioctx) {
   25.98 ++
   25.99 ++		spin_lock_irq(&ioctx->ctx_lock);
  25.100 ++		/* Insert inside our poll wait queue */
  25.101 ++		poll_wait(file, &ioctx->poll_wait, wait);
  25.102 ++
  25.103 ++		/* Check our condition */
  25.104 ++		if (aio_read_evt(ioctx, 0))
  25.105 ++			pollflags = POLLIN | POLLRDNORM;
  25.106 ++		spin_unlock_irq(&ioctx->ctx_lock);
  25.107 ++	}
  25.108 ++
  25.109 ++	return pollflags;
  25.110 ++}
  25.111 ++
  25.112 ++static struct file_operations aioq_fops = {
  25.113 ++	.release	= aio_queue_fd_close,
  25.114 ++	.poll		= aio_queue_fd_poll
  25.115 ++};
  25.116 ++
  25.117 ++/* make_aio_fd:
  25.118 ++ *  Create a file descriptor that can be used to poll the event queue.
  25.119 ++ *  Based and piggybacked on the excellent epoll code.
  25.120 ++ */
  25.121 ++
  25.122 ++static int make_aio_fd(struct kioctx *ioctx)
  25.123 ++{
  25.124 ++	int error, fd;
  25.125 ++	struct inode *inode;
  25.126 ++	struct file *file;
  25.127 ++
  25.128 ++	error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
  25.129 ++	if (error)
  25.130 ++		return error;
  25.131 ++
  25.132 ++	/* associate the file with the IO context */
  25.133 ++	file->private_data = ioctx;
  25.134 ++	ioctx->file = file;
  25.135 ++	init_waitqueue_head(&ioctx->poll_wait);
  25.136 ++	return fd;
  25.137 ++}
  25.138 ++#endif
  25.139 ++
  25.140 ++
  25.141 + /* sys_io_setup:
  25.142 +  *	Create an aio_context capable of receiving at least nr_events.
  25.143 +  *	ctxp must not point to an aio_context that already exists, and
  25.144 +@@ -1251,18 +1336,30 @@ static void io_destroy(struct kioctx *io
  25.145 +  *	resources are available.  May fail with -EFAULT if an invalid
  25.146 +  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
  25.147 +  *	implemented.
  25.148 ++ *
  25.149 ++ *	To request a selectable fd, the user context has to be initialized
  25.150 ++ *	to 1, instead of 0, and the return value is the fd.
  25.151 ++ *	This keeps the system call compatible, since a non-zero value
  25.152 ++ *	was not allowed so far.
  25.153 +  */
  25.154 + asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
  25.155 + {
  25.156 + 	struct kioctx *ioctx = NULL;
  25.157 + 	unsigned long ctx;
  25.158 + 	long ret;
  25.159 ++	int make_fd = 0;
  25.160 + 
  25.161 + 	ret = get_user(ctx, ctxp);
  25.162 + 	if (unlikely(ret))
  25.163 + 		goto out;
  25.164 + 
  25.165 + 	ret = -EINVAL;
  25.166 ++#ifdef CONFIG_EPOLL
  25.167 ++	if (ctx == 1) {
  25.168 ++		make_fd = 1;
  25.169 ++		ctx = 0;
  25.170 ++	}
  25.171 ++#endif
  25.172 + 	if (unlikely(ctx || nr_events == 0)) {
  25.173 + 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
  25.174 + 		         ctx, nr_events);
  25.175 +@@ -1273,8 +1370,12 @@ asmlinkage long sys_io_setup(unsigned nr
  25.176 + 	ret = PTR_ERR(ioctx);
  25.177 + 	if (!IS_ERR(ioctx)) {
  25.178 + 		ret = put_user(ioctx->user_id, ctxp);
  25.179 +-		if (!ret)
  25.180 +-			return 0;
  25.181 ++#ifdef CONFIG_EPOLL
  25.182 ++		if (make_fd && ret >= 0)
  25.183 ++			ret = make_aio_fd(ioctx);
  25.184 ++#endif
  25.185 ++		if (ret >= 0)
  25.186 ++			return ret;
  25.187 + 
  25.188 + 		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
  25.189 + 		io_destroy(ioctx);
  25.190 +diff -pruN ../orig-linux-2.6.16.29/fs/eventpoll.c ./fs/eventpoll.c
  25.191 +--- ../orig-linux-2.6.16.29/fs/eventpoll.c	2006-09-12 19:02:10.000000000 +0100
  25.192 ++++ ./fs/eventpoll.c	2006-09-19 13:58:49.000000000 +0100
  25.193 +@@ -235,8 +235,6 @@ struct ep_pqueue {
  25.194 + 
  25.195 + static void ep_poll_safewake_init(struct poll_safewake *psw);
  25.196 + static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
  25.197 +-static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  25.198 +-		    struct eventpoll *ep);
  25.199 + static int ep_alloc(struct eventpoll **pep);
  25.200 + static void ep_free(struct eventpoll *ep);
  25.201 + static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
  25.202 +@@ -266,7 +264,7 @@ static int ep_events_transfer(struct eve
  25.203 + static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
  25.204 + 		   int maxevents, long timeout);
  25.205 + static int eventpollfs_delete_dentry(struct dentry *dentry);
  25.206 +-static struct inode *ep_eventpoll_inode(void);
  25.207 ++static struct inode *ep_eventpoll_inode(struct file_operations *fops);
  25.208 + static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
  25.209 + 					      int flags, const char *dev_name,
  25.210 + 					      void *data);
  25.211 +@@ -525,7 +523,7 @@ asmlinkage long sys_epoll_create(int siz
  25.212 + 	 * Creates all the items needed to setup an eventpoll file. That is,
  25.213 + 	 * a file structure, and inode and a free file descriptor.
  25.214 + 	 */
  25.215 +-	error = ep_getfd(&fd, &inode, &file, ep);
  25.216 ++	error = ep_getfd(&fd, &inode, &file, ep, &eventpoll_fops);
  25.217 + 	if (error)
  25.218 + 		goto eexit_2;
  25.219 + 
  25.220 +@@ -710,8 +708,8 @@ eexit_1:
  25.221 + /*
  25.222 +  * Creates the file descriptor to be used by the epoll interface.
  25.223 +  */
  25.224 +-static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  25.225 +-		    struct eventpoll *ep)
  25.226 ++int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  25.227 ++		    struct eventpoll *ep, struct file_operations *fops)
  25.228 + {
  25.229 + 	struct qstr this;
  25.230 + 	char name[32];
  25.231 +@@ -727,7 +725,7 @@ static int ep_getfd(int *efd, struct ino
  25.232 + 		goto eexit_1;
  25.233 + 
  25.234 + 	/* Allocates an inode from the eventpoll file system */
  25.235 +-	inode = ep_eventpoll_inode();
  25.236 ++	inode = ep_eventpoll_inode(fops);
  25.237 + 	error = PTR_ERR(inode);
  25.238 + 	if (IS_ERR(inode))
  25.239 + 		goto eexit_2;
  25.240 +@@ -758,7 +756,7 @@ static int ep_getfd(int *efd, struct ino
  25.241 + 
  25.242 + 	file->f_pos = 0;
  25.243 + 	file->f_flags = O_RDONLY;
  25.244 +-	file->f_op = &eventpoll_fops;
  25.245 ++	file->f_op = fops;
  25.246 + 	file->f_mode = FMODE_READ;
  25.247 + 	file->f_version = 0;
  25.248 + 	file->private_data = ep;
  25.249 +@@ -1574,7 +1572,7 @@ static int eventpollfs_delete_dentry(str
  25.250 + }
  25.251 + 
  25.252 + 
  25.253 +-static struct inode *ep_eventpoll_inode(void)
  25.254 ++static struct inode *ep_eventpoll_inode(struct file_operations *fops)
  25.255 + {
  25.256 + 	int error = -ENOMEM;
  25.257 + 	struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
  25.258 +@@ -1582,7 +1580,7 @@ static struct inode *ep_eventpoll_inode(
  25.259 + 	if (!inode)
  25.260 + 		goto eexit_1;
  25.261 + 
  25.262 +-	inode->i_fop = &eventpoll_fops;
  25.263 ++	inode->i_fop = fops;
  25.264 + 
  25.265 + 	/*
  25.266 + 	 * Mark the inode dirty from the very beginning,
  25.267 +diff -pruN ../orig-linux-2.6.16.29/include/linux/aio.h ./include/linux/aio.h
  25.268 +--- ../orig-linux-2.6.16.29/include/linux/aio.h	2006-09-12 19:02:10.000000000 +0100
  25.269 ++++ ./include/linux/aio.h	2006-09-19 13:58:49.000000000 +0100
  25.270 +@@ -191,6 +191,11 @@ struct kioctx {
  25.271 + 	struct aio_ring_info	ring_info;
  25.272 + 
  25.273 + 	struct work_struct	wq;
  25.274 ++#ifdef CONFIG_EPOLL
  25.275 ++	// poll integration
  25.276 ++	wait_queue_head_t       poll_wait;
  25.277 ++	struct file		*file;
  25.278 ++#endif
  25.279 + };
  25.280 + 
  25.281 + /* prototypes */
  25.282 +diff -pruN ../orig-linux-2.6.16.29/include/linux/eventpoll.h ./include/linux/eventpoll.h
  25.283 +--- ../orig-linux-2.6.16.29/include/linux/eventpoll.h	2006-09-12 19:02:10.000000000 +0100
  25.284 ++++ ./include/linux/eventpoll.h	2006-09-19 13:58:49.000000000 +0100
  25.285 +@@ -86,6 +86,12 @@ static inline void eventpoll_release(str
  25.286 + }
  25.287 + 
  25.288 + 
  25.289 ++/*
  25.290 ++ * called by aio code to create fd that can poll the  aio event queueQ
  25.291 ++ */
  25.292 ++struct eventpoll;
  25.293 ++int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  25.294 ++             struct eventpoll *ep, struct file_operations *fops);
  25.295 + #else
  25.296 + 
  25.297 + static inline void eventpoll_init_file(struct file *file) {}
    26.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.2 +++ b/patches/linux-2.6.16.29/device_bind.patch	Tue Sep 19 14:26:47 2006 +0100
    26.3 @@ -0,0 +1,15 @@
    26.4 +diff -pruN ../orig-linux-2.6.16.29/drivers/base/bus.c ./drivers/base/bus.c
    26.5 +--- ../orig-linux-2.6.16.29/drivers/base/bus.c	2006-09-12 19:02:10.000000000 +0100
    26.6 ++++ ./drivers/base/bus.c	2006-09-19 13:58:54.000000000 +0100
    26.7 +@@ -188,6 +188,11 @@ static ssize_t driver_bind(struct device
    26.8 + 		up(&dev->sem);
    26.9 + 		if (dev->parent)
   26.10 + 			up(&dev->parent->sem);
   26.11 ++
   26.12 ++		if (err > 0) 		/* success */
   26.13 ++			err = count;
   26.14 ++		else if (err == 0)	/* driver didn't accept device */
   26.15 ++			err = -ENODEV;
   26.16 + 	}
   26.17 + 	put_device(dev);
   26.18 + 	put_bus(bus);
    27.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.2 +++ b/patches/linux-2.6.16.29/fix-hz-suspend.patch	Tue Sep 19 14:26:47 2006 +0100
    27.3 @@ -0,0 +1,26 @@
    27.4 +diff -pruN ../orig-linux-2.6.16.29/kernel/timer.c ./kernel/timer.c
    27.5 +--- ../orig-linux-2.6.16.29/kernel/timer.c	2006-09-12 19:02:10.000000000 +0100
    27.6 ++++ ./kernel/timer.c	2006-09-19 13:58:58.000000000 +0100
    27.7 +@@ -555,6 +555,22 @@ found:
    27.8 + 	}
    27.9 + 	spin_unlock(&base->t_base.lock);
   27.10 + 
   27.11 ++	/*
   27.12 ++	 * It can happen that other CPUs service timer IRQs and increment
   27.13 ++	 * jiffies, but we have not yet got a local timer tick to process
   27.14 ++	 * the timer wheels.  In that case, the expiry time can be before
   27.15 ++	 * jiffies, but since the high-resolution timer here is relative to
   27.16 ++	 * jiffies, the default expression when high-resolution timers are
   27.17 ++	 * not active,
   27.18 ++	 *
   27.19 ++	 *   time_before(MAX_JIFFY_OFFSET + jiffies, expires)
   27.20 ++	 *
   27.21 ++	 * would falsely evaluate to true.  If that is the case, just
   27.22 ++	 * return jiffies so that we can immediately fire the local timer
   27.23 ++	 */
   27.24 ++	if (time_before(expires, jiffies))
   27.25 ++		return jiffies;
   27.26 ++
   27.27 + 	if (time_before(hr_expires, expires))
   27.28 + 		return hr_expires;
   27.29 + 
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/patches/linux-2.6.16.29/fix-ide-cd-pio-mode.patch	Tue Sep 19 14:26:47 2006 +0100
    28.3 @@ -0,0 +1,18 @@
    28.4 +diff -pruN ../orig-linux-2.6.16.29/drivers/ide/ide-lib.c ./drivers/ide/ide-lib.c
    28.5 +--- ../orig-linux-2.6.16.29/drivers/ide/ide-lib.c	2006-09-12 19:02:10.000000000 +0100
    28.6 ++++ ./drivers/ide/ide-lib.c	2006-09-19 13:59:03.000000000 +0100
    28.7 +@@ -410,10 +410,10 @@ void ide_toggle_bounce(ide_drive_t *driv
    28.8 + {
    28.9 + 	u64 addr = BLK_BOUNCE_HIGH;	/* dma64_addr_t */
   28.10 + 
   28.11 +-	if (!PCI_DMA_BUS_IS_PHYS) {
   28.12 +-		addr = BLK_BOUNCE_ANY;
   28.13 +-	} else if (on && drive->media == ide_disk) {
   28.14 +-		if (HWIF(drive)->pci_dev)
   28.15 ++	if (on && drive->media == ide_disk) {
   28.16 ++		if (!PCI_DMA_BUS_IS_PHYS)
   28.17 ++			addr = BLK_BOUNCE_ANY;
   28.18 ++		else if (HWIF(drive)->pci_dev)
   28.19 + 			addr = HWIF(drive)->pci_dev->dma_mask;
   28.20 + 	}
   28.21 + 
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/patches/linux-2.6.16.29/i386-mach-io-check-nmi.patch	Tue Sep 19 14:26:47 2006 +0100
    29.3 @@ -0,0 +1,45 @@
    29.4 +diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/traps.c ./arch/i386/kernel/traps.c
    29.5 +--- ../orig-linux-2.6.16.29/arch/i386/kernel/traps.c	2006-09-12 19:02:10.000000000 +0100
    29.6 ++++ ./arch/i386/kernel/traps.c	2006-09-19 13:59:06.000000000 +0100
    29.7 +@@ -567,18 +567,11 @@ static void mem_parity_error(unsigned ch
    29.8 + 
    29.9 + static void io_check_error(unsigned char reason, struct pt_regs * regs)
   29.10 + {
   29.11 +-	unsigned long i;
   29.12 +-
   29.13 + 	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
   29.14 + 	show_registers(regs);
   29.15 + 
   29.16 + 	/* Re-enable the IOCK line, wait for a few seconds */
   29.17 +-	reason = (reason & 0xf) | 8;
   29.18 +-	outb(reason, 0x61);
   29.19 +-	i = 2000;
   29.20 +-	while (--i) udelay(1000);
   29.21 +-	reason &= ~8;
   29.22 +-	outb(reason, 0x61);
   29.23 ++	clear_io_check_error(reason);
   29.24 + }
   29.25 + 
   29.26 + static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
   29.27 +diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/mach-default/mach_traps.h ./include/asm-i386/mach-default/mach_traps.h
   29.28 +--- ../orig-linux-2.6.16.29/include/asm-i386/mach-default/mach_traps.h	2006-09-12 19:02:10.000000000 +0100
   29.29 ++++ ./include/asm-i386/mach-default/mach_traps.h	2006-09-19 13:59:06.000000000 +0100
   29.30 +@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
   29.31 + 	outb(reason, 0x61);
   29.32 + }
   29.33 + 
   29.34 ++static inline void clear_io_check_error(unsigned char reason)
   29.35 ++{
   29.36 ++	unsigned long i;
   29.37 ++
   29.38 ++	reason = (reason & 0xf) | 8;
   29.39 ++	outb(reason, 0x61);
   29.40 ++	i = 2000;
   29.41 ++	while (--i) udelay(1000);
   29.42 ++	reason &= ~8;
   29.43 ++	outb(reason, 0x61);
   29.44 ++}
   29.45 ++
   29.46 + static inline unsigned char get_nmi_reason(void)
   29.47 + {
   29.48 + 	return inb(0x61);
    30.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.2 +++ b/patches/linux-2.6.16.29/ipv6-no-autoconf.patch	Tue Sep 19 14:26:47 2006 +0100
    30.3 @@ -0,0 +1,19 @@
    30.4 +diff -pruN ../orig-linux-2.6.16.29/net/ipv6/addrconf.c ./net/ipv6/addrconf.c
    30.5 +--- ../orig-linux-2.6.16.29/net/ipv6/addrconf.c	2006-09-12 19:02:10.000000000 +0100
    30.6 ++++ ./net/ipv6/addrconf.c	2006-09-19 13:59:11.000000000 +0100
    30.7 +@@ -2471,6 +2471,7 @@ static void addrconf_dad_start(struct in
    30.8 + 	spin_lock_bh(&ifp->lock);
    30.9 + 
   30.10 + 	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
   30.11 ++	    !(dev->flags&IFF_MULTICAST) ||
   30.12 + 	    !(ifp->flags&IFA_F_TENTATIVE)) {
   30.13 + 		ifp->flags &= ~IFA_F_TENTATIVE;
   30.14 + 		spin_unlock_bh(&ifp->lock);
   30.15 +@@ -2555,6 +2556,7 @@ static void addrconf_dad_completed(struc
   30.16 + 	if (ifp->idev->cnf.forwarding == 0 &&
   30.17 + 	    ifp->idev->cnf.rtr_solicits > 0 &&
   30.18 + 	    (dev->flags&IFF_LOOPBACK) == 0 &&
   30.19 ++	    (dev->flags & IFF_MULTICAST) &&
   30.20 + 	    (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
   30.21 + 		struct in6_addr all_routers;
   30.22 + 
    31.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.2 +++ b/patches/linux-2.6.16.29/net-csum.patch	Tue Sep 19 14:26:47 2006 +0100
    31.3 @@ -0,0 +1,63 @@
    31.4 +diff -pruN ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_tcp.c ./net/ipv4/netfilter/ip_nat_proto_tcp.c
    31.5 +--- ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-12 19:02:10.000000000 +0100
    31.6 ++++ ./net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-19 13:59:15.000000000 +0100
    31.7 +@@ -129,7 +129,12 @@ tcp_manip_pkt(struct sk_buff **pskb,
    31.8 + 	if (hdrsize < sizeof(*hdr))
    31.9 + 		return 1;
   31.10 + 
   31.11 +-	hdr->check = ip_nat_cheat_check(~oldip, newip,
   31.12 ++#ifdef CONFIG_XEN
   31.13 ++	if ((*pskb)->proto_csum_blank)
   31.14 ++		hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
   31.15 ++	else
   31.16 ++#endif
   31.17 ++		hdr->check = ip_nat_cheat_check(~oldip, newip,
   31.18 + 					ip_nat_cheat_check(oldport ^ 0xFFFF,
   31.19 + 							   newport,
   31.20 + 							   hdr->check));
   31.21 +diff -pruN ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_udp.c ./net/ipv4/netfilter/ip_nat_proto_udp.c
   31.22 +--- ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-12 19:02:10.000000000 +0100
   31.23 ++++ ./net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-19 13:59:15.000000000 +0100
   31.24 +@@ -113,11 +113,17 @@ udp_manip_pkt(struct sk_buff **pskb,
   31.25 + 		newport = tuple->dst.u.udp.port;
   31.26 + 		portptr = &hdr->dest;
   31.27 + 	}
   31.28 +-	if (hdr->check) /* 0 is a special case meaning no checksum */
   31.29 +-		hdr->check = ip_nat_cheat_check(~oldip, newip,
   31.30 ++	if (hdr->check) { /* 0 is a special case meaning no checksum */
   31.31 ++#ifdef CONFIG_XEN
   31.32 ++		if ((*pskb)->proto_csum_blank)
   31.33 ++			hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
   31.34 ++		else
   31.35 ++#endif
   31.36 ++			hdr->check = ip_nat_cheat_check(~oldip, newip,
   31.37 + 					ip_nat_cheat_check(*portptr ^ 0xFFFF,
   31.38 + 							   newport,
   31.39 + 							   hdr->check));
   31.40 ++	}
   31.41 + 	*portptr = newport;
   31.42 + 	return 1;
   31.43 + }
   31.44 +diff -pruN ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
   31.45 +--- ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c	2006-09-12 19:02:10.000000000 +0100
   31.46 ++++ ./net/ipv4/xfrm4_output.c	2006-09-19 13:59:15.000000000 +0100
   31.47 +@@ -17,6 +17,8 @@
   31.48 + #include <net/xfrm.h>
   31.49 + #include <net/icmp.h>
   31.50 + 
   31.51 ++extern int skb_checksum_setup(struct sk_buff *skb);
   31.52 ++
   31.53 + /* Add encapsulation header.
   31.54 +  *
   31.55 +  * In transport mode, the IP header will be moved forward to make space
   31.56 +@@ -103,6 +105,10 @@ static int xfrm4_output_one(struct sk_bu
   31.57 + 	struct xfrm_state *x = dst->xfrm;
   31.58 + 	int err;
   31.59 + 	
   31.60 ++	err = skb_checksum_setup(skb);
   31.61 ++	if (err)
   31.62 ++		goto error_nolock;
   31.63 ++
   31.64 + 	if (skb->ip_summed == CHECKSUM_HW) {
   31.65 + 		err = skb_checksum_help(skb, 0);
   31.66 + 		if (err)
    32.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.2 +++ b/patches/linux-2.6.16.29/net-gso-0-base.patch	Tue Sep 19 14:26:47 2006 +0100
    32.3 @@ -0,0 +1,2835 @@
    32.4 +diff -pruN ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt ./Documentation/networking/netdevices.txt
    32.5 +--- ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt	2006-09-12 19:02:10.000000000 +0100
    32.6 ++++ ./Documentation/networking/netdevices.txt	2006-09-19 13:59:20.000000000 +0100
    32.7 +@@ -42,9 +42,9 @@ dev->get_stats:
    32.8 + 	Context: nominally process, but don't sleep inside an rwlock
    32.9 + 
   32.10 + dev->hard_start_xmit:
   32.11 +-	Synchronization: dev->xmit_lock spinlock.
   32.12 ++	Synchronization: netif_tx_lock spinlock.
   32.13 + 	When the driver sets NETIF_F_LLTX in dev->features this will be
   32.14 +-	called without holding xmit_lock. In this case the driver 
   32.15 ++	called without holding netif_tx_lock. In this case the driver
   32.16 + 	has to lock by itself when needed. It is recommended to use a try lock
   32.17 + 	for this and return -1 when the spin lock fails. 
   32.18 + 	The locking there should also properly protect against 
   32.19 +@@ -62,12 +62,12 @@ dev->hard_start_xmit:
   32.20 + 	  Only valid when NETIF_F_LLTX is set.
   32.21 + 
   32.22 + dev->tx_timeout:
   32.23 +-	Synchronization: dev->xmit_lock spinlock.
   32.24 ++	Synchronization: netif_tx_lock spinlock.
   32.25 + 	Context: BHs disabled
   32.26 + 	Notes: netif_queue_stopped() is guaranteed true
   32.27 + 
   32.28 + dev->set_multicast_list:
   32.29 +-	Synchronization: dev->xmit_lock spinlock.
   32.30 ++	Synchronization: netif_tx_lock spinlock.
   32.31 + 	Context: BHs disabled
   32.32 + 
   32.33 + dev->poll:
   32.34 +diff -pruN ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c ./drivers/block/aoe/aoenet.c
   32.35 +--- ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c	2006-09-12 19:02:10.000000000 +0100
   32.36 ++++ ./drivers/block/aoe/aoenet.c	2006-09-19 13:59:20.000000000 +0100
   32.37 +@@ -95,9 +95,8 @@ mac_addr(char addr[6])
   32.38 + static struct sk_buff *
   32.39 + skb_check(struct sk_buff *skb)
   32.40 + {
   32.41 +-	if (skb_is_nonlinear(skb))
   32.42 + 	if ((skb = skb_share_check(skb, GFP_ATOMIC)))
   32.43 +-	if (skb_linearize(skb, GFP_ATOMIC) < 0) {
   32.44 ++	if (skb_linearize(skb)) {
   32.45 + 		dev_kfree_skb(skb);
   32.46 + 		return NULL;
   32.47 + 	}
   32.48 +diff -pruN ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c
   32.49 +--- ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-12 19:02:10.000000000 +0100
   32.50 ++++ ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-19 13:59:20.000000000 +0100
   32.51 +@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_
   32.52 + 
   32.53 + 	ipoib_mcast_stop_thread(dev, 0);
   32.54 + 
   32.55 +-	spin_lock_irqsave(&dev->xmit_lock, flags);
   32.56 ++	local_irq_save(flags);
   32.57 ++	netif_tx_lock(dev);
   32.58 + 	spin_lock(&priv->lock);
   32.59 + 
   32.60 + 	/*
   32.61 +@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_
   32.62 + 	}
   32.63 + 
   32.64 + 	spin_unlock(&priv->lock);
   32.65 +-	spin_unlock_irqrestore(&dev->xmit_lock, flags);
   32.66 ++	netif_tx_unlock(dev);
   32.67 ++	local_irq_restore(flags);
   32.68 + 
   32.69 + 	/* We have to cancel outside of the spinlock */
   32.70 + 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
   32.71 +diff -pruN ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c ./drivers/media/dvb/dvb-core/dvb_net.c
   32.72 +--- ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c	2006-09-12 19:02:10.000000000 +0100
   32.73 ++++ ./drivers/media/dvb/dvb-core/dvb_net.c	2006-09-19 13:59:20.000000000 +0100
   32.74 +@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void 
   32.75 + 
   32.76 + 	dvb_net_feed_stop(dev);
   32.77 + 	priv->rx_mode = RX_MODE_UNI;
   32.78 +-	spin_lock_bh(&dev->xmit_lock);
   32.79 ++	netif_tx_lock_bh(dev);
   32.80 + 
   32.81 + 	if (dev->flags & IFF_PROMISC) {
   32.82 + 		dprintk("%s: promiscuous mode\n", dev->name);
   32.83 +@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void 
   32.84 + 		}
   32.85 + 	}
   32.86 + 
   32.87 +-	spin_unlock_bh(&dev->xmit_lock);
   32.88 ++	netif_tx_unlock_bh(dev);
   32.89 + 	dvb_net_feed_start(dev);
   32.90 + }
   32.91 + 
   32.92 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/8139cp.c ./drivers/net/8139cp.c
   32.93 +--- ../orig-linux-2.6.16.29/drivers/net/8139cp.c	2006-09-12 19:02:10.000000000 +0100
   32.94 ++++ ./drivers/net/8139cp.c	2006-09-19 13:59:20.000000000 +0100
   32.95 +@@ -794,7 +794,7 @@ static int cp_start_xmit (struct sk_buff
   32.96 + 	entry = cp->tx_head;
   32.97 + 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
   32.98 + 	if (dev->features & NETIF_F_TSO)
   32.99 +-		mss = skb_shinfo(skb)->tso_size;
  32.100 ++		mss = skb_shinfo(skb)->gso_size;
  32.101 + 
  32.102 + 	if (skb_shinfo(skb)->nr_frags == 0) {
  32.103 + 		struct cp_desc *txd = &cp->tx_ring[entry];
  32.104 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/bnx2.c ./drivers/net/bnx2.c
  32.105 +--- ../orig-linux-2.6.16.29/drivers/net/bnx2.c	2006-09-12 19:02:10.000000000 +0100
  32.106 ++++ ./drivers/net/bnx2.c	2006-09-19 13:59:20.000000000 +0100
  32.107 +@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
  32.108 + 		skb = tx_buf->skb;
  32.109 + #ifdef BCM_TSO 
  32.110 + 		/* partial BD completions possible with TSO packets */
  32.111 +-		if (skb_shinfo(skb)->tso_size) {
  32.112 ++		if (skb_shinfo(skb)->gso_size) {
  32.113 + 			u16 last_idx, last_ring_idx;
  32.114 + 
  32.115 + 			last_idx = sw_cons +
  32.116 +@@ -1948,7 +1948,7 @@ bnx2_poll(struct net_device *dev, int *b
  32.117 + 	return 1;
  32.118 + }
  32.119 + 
  32.120 +-/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
  32.121 ++/* Called with rtnl_lock from vlan functions and also netif_tx_lock
  32.122 +  * from set_multicast.
  32.123 +  */
  32.124 + static void
  32.125 +@@ -4403,7 +4403,7 @@ bnx2_vlan_rx_kill_vid(struct net_device 
  32.126 + }
  32.127 + #endif
  32.128 + 
  32.129 +-/* Called with dev->xmit_lock.
  32.130 ++/* Called with netif_tx_lock.
  32.131 +  * hard_start_xmit is pseudo-lockless - a lock is only required when
  32.132 +  * the tx queue is full. This way, we get the benefit of lockless
  32.133 +  * operations most of the time without the complexities to handle
  32.134 +@@ -4441,7 +4441,7 @@ bnx2_start_xmit(struct sk_buff *skb, str
  32.135 + 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
  32.136 + 	}
  32.137 + #ifdef BCM_TSO 
  32.138 +-	if ((mss = skb_shinfo(skb)->tso_size) &&
  32.139 ++	if ((mss = skb_shinfo(skb)->gso_size) &&
  32.140 + 		(skb->len > (bp->dev->mtu + ETH_HLEN))) {
  32.141 + 		u32 tcp_opt_len, ip_tcp_len;
  32.142 + 
  32.143 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c ./drivers/net/bonding/bond_main.c
  32.144 +--- ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c	2006-09-12 19:02:10.000000000 +0100
  32.145 ++++ ./drivers/net/bonding/bond_main.c	2006-09-19 13:59:20.000000000 +0100
  32.146 +@@ -1145,8 +1145,7 @@ int bond_sethwaddr(struct net_device *bo
  32.147 + }
  32.148 + 
  32.149 + #define BOND_INTERSECT_FEATURES \
  32.150 +-	(NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\
  32.151 +-	NETIF_F_TSO|NETIF_F_UFO)
  32.152 ++	(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
  32.153 + 
  32.154 + /* 
  32.155 +  * Compute the common dev->feature set available to all slaves.  Some
  32.156 +@@ -1164,9 +1163,7 @@ static int bond_compute_features(struct 
  32.157 + 		features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
  32.158 + 
  32.159 + 	if ((features & NETIF_F_SG) && 
  32.160 +-	    !(features & (NETIF_F_IP_CSUM |
  32.161 +-			  NETIF_F_NO_CSUM |
  32.162 +-			  NETIF_F_HW_CSUM)))
  32.163 ++	    !(features & NETIF_F_ALL_CSUM))
  32.164 + 		features &= ~NETIF_F_SG;
  32.165 + 
  32.166 + 	/* 
  32.167 +@@ -4147,7 +4144,7 @@ static int bond_init(struct net_device *
  32.168 + 	 */
  32.169 + 	bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
  32.170 + 
  32.171 +-	/* don't acquire bond device's xmit_lock when 
  32.172 ++	/* don't acquire bond device's netif_tx_lock when
  32.173 + 	 * transmitting */
  32.174 + 	bond_dev->features |= NETIF_F_LLTX;
  32.175 + 
  32.176 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c ./drivers/net/chelsio/sge.c
  32.177 +--- ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c	2006-09-12 19:02:10.000000000 +0100
  32.178 ++++ ./drivers/net/chelsio/sge.c	2006-09-19 13:59:20.000000000 +0100
  32.179 +@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
  32.180 + 	struct cpl_tx_pkt *cpl;
  32.181 + 
  32.182 + #ifdef NETIF_F_TSO
  32.183 +-	if (skb_shinfo(skb)->tso_size) {
  32.184 ++	if (skb_shinfo(skb)->gso_size) {
  32.185 + 		int eth_type;
  32.186 + 		struct cpl_tx_pkt_lso *hdr;
  32.187 + 
  32.188 +@@ -1434,7 +1434,7 @@ int t1_start_xmit(struct sk_buff *skb, s
  32.189 + 		hdr->ip_hdr_words = skb->nh.iph->ihl;
  32.190 + 		hdr->tcp_hdr_words = skb->h.th->doff;
  32.191 + 		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
  32.192 +-						skb_shinfo(skb)->tso_size));
  32.193 ++						skb_shinfo(skb)->gso_size));
  32.194 + 		hdr->len = htonl(skb->len - sizeof(*hdr));
  32.195 + 		cpl = (struct cpl_tx_pkt *)hdr;
  32.196 + 		sge->stats.tx_lso_pkts++;
  32.197 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c ./drivers/net/e1000/e1000_main.c
  32.198 +--- ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c	2006-09-12 19:02:10.000000000 +0100
  32.199 ++++ ./drivers/net/e1000/e1000_main.c	2006-09-19 13:59:20.000000000 +0100
  32.200 +@@ -2526,7 +2526,7 @@ e1000_tso(struct e1000_adapter *adapter,
  32.201 + 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
  32.202 + 	int err;
  32.203 + 
  32.204 +-	if (skb_shinfo(skb)->tso_size) {
  32.205 ++	if (skb_shinfo(skb)->gso_size) {
  32.206 + 		if (skb_header_cloned(skb)) {
  32.207 + 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  32.208 + 			if (err)
  32.209 +@@ -2534,7 +2534,7 @@ e1000_tso(struct e1000_adapter *adapter,
  32.210 + 		}
  32.211 + 
  32.212 + 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  32.213 +-		mss = skb_shinfo(skb)->tso_size;
  32.214 ++		mss = skb_shinfo(skb)->gso_size;
  32.215 + 		if (skb->protocol == ntohs(ETH_P_IP)) {
  32.216 + 			skb->nh.iph->tot_len = 0;
  32.217 + 			skb->nh.iph->check = 0;
  32.218 +@@ -2651,7 +2651,7 @@ e1000_tx_map(struct e1000_adapter *adapt
  32.219 + 		 * tso gets written back prematurely before the data is fully
  32.220 + 		 * DMAd to the controller */
  32.221 + 		if (!skb->data_len && tx_ring->last_tx_tso &&
  32.222 +-				!skb_shinfo(skb)->tso_size) {
  32.223 ++				!skb_shinfo(skb)->gso_size) {
  32.224 + 			tx_ring->last_tx_tso = 0;
  32.225 + 			size -= 4;
  32.226 + 		}
  32.227 +@@ -2893,7 +2893,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
  32.228 + 	}
  32.229 + 
  32.230 + #ifdef NETIF_F_TSO
  32.231 +-	mss = skb_shinfo(skb)->tso_size;
  32.232 ++	mss = skb_shinfo(skb)->gso_size;
  32.233 + 	/* The controller does a simple calculation to 
  32.234 + 	 * make sure there is enough room in the FIFO before
  32.235 + 	 * initiating the DMA for each buffer.  The calc is:
  32.236 +@@ -2935,7 +2935,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
  32.237 + #ifdef NETIF_F_TSO
  32.238 + 	/* Controller Erratum workaround */
  32.239 + 	if (!skb->data_len && tx_ring->last_tx_tso &&
  32.240 +-		!skb_shinfo(skb)->tso_size)
  32.241 ++		!skb_shinfo(skb)->gso_size)
  32.242 + 		count++;
  32.243 + #endif
  32.244 + 
  32.245 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/forcedeth.c ./drivers/net/forcedeth.c
  32.246 +--- ../orig-linux-2.6.16.29/drivers/net/forcedeth.c	2006-09-12 19:02:10.000000000 +0100
  32.247 ++++ ./drivers/net/forcedeth.c	2006-09-19 13:59:20.000000000 +0100
  32.248 +@@ -482,9 +482,9 @@ typedef union _ring_type {
  32.249 +  * critical parts:
  32.250 +  * - rx is (pseudo-) lockless: it relies on the single-threading provided
  32.251 +  *	by the arch code for interrupts.
  32.252 +- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
  32.253 ++ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  32.254 +  *	needs dev->priv->lock :-(
  32.255 +- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
  32.256 ++ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  32.257 +  */
  32.258 + 
  32.259 + /* in dev: base, irq */
  32.260 +@@ -1016,7 +1016,7 @@ static void drain_ring(struct net_device
  32.261 + 
  32.262 + /*
  32.263 +  * nv_start_xmit: dev->hard_start_xmit function
  32.264 +- * Called with dev->xmit_lock held.
  32.265 ++ * Called with netif_tx_lock held.
  32.266 +  */
  32.267 + static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  32.268 + {
  32.269 +@@ -1105,8 +1105,8 @@ static int nv_start_xmit(struct sk_buff 
  32.270 + 	np->tx_skbuff[nr] = skb;
  32.271 + 
  32.272 + #ifdef NETIF_F_TSO
  32.273 +-	if (skb_shinfo(skb)->tso_size)
  32.274 +-		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
  32.275 ++	if (skb_shinfo(skb)->gso_size)
  32.276 ++		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  32.277 + 	else
  32.278 + #endif
  32.279 + 	tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
  32.280 +@@ -1203,7 +1203,7 @@ static void nv_tx_done(struct net_device
  32.281 + 
  32.282 + /*
  32.283 +  * nv_tx_timeout: dev->tx_timeout function
  32.284 +- * Called with dev->xmit_lock held.
  32.285 ++ * Called with netif_tx_lock held.
  32.286 +  */
  32.287 + static void nv_tx_timeout(struct net_device *dev)
  32.288 + {
  32.289 +@@ -1524,7 +1524,7 @@ static int nv_change_mtu(struct net_devi
  32.290 + 		 * Changing the MTU is a rare event, it shouldn't matter.
  32.291 + 		 */
  32.292 + 		disable_irq(dev->irq);
  32.293 +-		spin_lock_bh(&dev->xmit_lock);
  32.294 ++		netif_tx_lock_bh(dev);
  32.295 + 		spin_lock(&np->lock);
  32.296 + 		/* stop engines */
  32.297 + 		nv_stop_rx(dev);
  32.298 +@@ -1559,7 +1559,7 @@ static int nv_change_mtu(struct net_devi
  32.299 + 		nv_start_rx(dev);
  32.300 + 		nv_start_tx(dev);
  32.301 + 		spin_unlock(&np->lock);
  32.302 +-		spin_unlock_bh(&dev->xmit_lock);
  32.303 ++		netif_tx_unlock_bh(dev);
  32.304 + 		enable_irq(dev->irq);
  32.305 + 	}
  32.306 + 	return 0;
  32.307 +@@ -1594,7 +1594,7 @@ static int nv_set_mac_address(struct net
  32.308 + 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  32.309 + 
  32.310 + 	if (netif_running(dev)) {
  32.311 +-		spin_lock_bh(&dev->xmit_lock);
  32.312 ++		netif_tx_lock_bh(dev);
  32.313 + 		spin_lock_irq(&np->lock);
  32.314 + 
  32.315 + 		/* stop rx engine */
  32.316 +@@ -1606,7 +1606,7 @@ static int nv_set_mac_address(struct net
  32.317 + 		/* restart rx engine */
  32.318 + 		nv_start_rx(dev);
  32.319 + 		spin_unlock_irq(&np->lock);
  32.320 +-		spin_unlock_bh(&dev->xmit_lock);
  32.321 ++		netif_tx_unlock_bh(dev);
  32.322 + 	} else {
  32.323 + 		nv_copy_mac_to_hw(dev);
  32.324 + 	}
  32.325 +@@ -1615,7 +1615,7 @@ static int nv_set_mac_address(struct net
  32.326 + 
  32.327 + /*
  32.328 +  * nv_set_multicast: dev->set_multicast function
  32.329 +- * Called with dev->xmit_lock held.
  32.330 ++ * Called with netif_tx_lock held.
  32.331 +  */
  32.332 + static void nv_set_multicast(struct net_device *dev)
  32.333 + {
  32.334 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c ./drivers/net/hamradio/6pack.c
  32.335 +--- ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c	2006-09-12 19:02:10.000000000 +0100
  32.336 ++++ ./drivers/net/hamradio/6pack.c	2006-09-19 13:59:20.000000000 +0100
  32.337 +@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net
  32.338 + {
  32.339 + 	struct sockaddr_ax25 *sa = addr;
  32.340 + 
  32.341 +-	spin_lock_irq(&dev->xmit_lock);
  32.342 ++	netif_tx_lock_bh(dev);
  32.343 + 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
  32.344 +-	spin_unlock_irq(&dev->xmit_lock);
  32.345 ++	netif_tx_unlock_bh(dev);
  32.346 + 
  32.347 + 	return 0;
  32.348 + }
  32.349 +@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_stru
  32.350 + 			break;
  32.351 + 		}
  32.352 + 
  32.353 +-		spin_lock_irq(&dev->xmit_lock);
  32.354 ++		netif_tx_lock_bh(dev);
  32.355 + 		memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
  32.356 +-		spin_unlock_irq(&dev->xmit_lock);
  32.357 ++		netif_tx_unlock_bh(dev);
  32.358 + 
  32.359 + 		err = 0;
  32.360 + 		break;
  32.361 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c ./drivers/net/hamradio/mkiss.c
  32.362 +--- ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c	2006-09-12 19:02:10.000000000 +0100
  32.363 ++++ ./drivers/net/hamradio/mkiss.c	2006-09-19 13:59:20.000000000 +0100
  32.364 +@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net
  32.365 + {
  32.366 + 	struct sockaddr_ax25 *sa = addr;
  32.367 + 
  32.368 +-	spin_lock_irq(&dev->xmit_lock);
  32.369 ++	netif_tx_lock_bh(dev);
  32.370 + 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
  32.371 +-	spin_unlock_irq(&dev->xmit_lock);
  32.372 ++	netif_tx_unlock_bh(dev);
  32.373 + 
  32.374 + 	return 0;
  32.375 + }
  32.376 +@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct
  32.377 + 			break;
  32.378 + 		}
  32.379 + 
  32.380 +-		spin_lock_irq(&dev->xmit_lock);
  32.381 ++		netif_tx_lock_bh(dev);
  32.382 + 		memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
  32.383 +-		spin_unlock_irq(&dev->xmit_lock);
  32.384 ++		netif_tx_unlock_bh(dev);
  32.385 + 
  32.386 + 		err = 0;
  32.387 + 		break;
  32.388 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/ifb.c ./drivers/net/ifb.c
  32.389 +--- ../orig-linux-2.6.16.29/drivers/net/ifb.c	2006-09-12 19:02:10.000000000 +0100
  32.390 ++++ ./drivers/net/ifb.c	2006-09-19 13:59:20.000000000 +0100
  32.391 +@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev
  32.392 + 	dp->st_task_enter++;
  32.393 + 	if ((skb = skb_peek(&dp->tq)) == NULL) {
  32.394 + 		dp->st_txq_refl_try++;
  32.395 +-		if (spin_trylock(&_dev->xmit_lock)) {
  32.396 ++		if (netif_tx_trylock(_dev)) {
  32.397 + 			dp->st_rxq_enter++;
  32.398 + 			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
  32.399 + 				skb_queue_tail(&dp->tq, skb);
  32.400 + 				dp->st_rx2tx_tran++;
  32.401 + 			}
  32.402 +-			spin_unlock(&_dev->xmit_lock);
  32.403 ++			netif_tx_unlock(_dev);
  32.404 + 		} else {
  32.405 + 			/* reschedule */
  32.406 + 			dp->st_rxq_notenter++;
  32.407 +@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev
  32.408 + 		}
  32.409 + 	}
  32.410 + 
  32.411 +-	if (spin_trylock(&_dev->xmit_lock)) {
  32.412 ++	if (netif_tx_trylock(_dev)) {
  32.413 + 		dp->st_rxq_check++;
  32.414 + 		if ((skb = skb_peek(&dp->rq)) == NULL) {
  32.415 + 			dp->tasklet_pending = 0;
  32.416 +@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev
  32.417 + 				netif_wake_queue(_dev);
  32.418 + 		} else {
  32.419 + 			dp->st_rxq_rsch++;
  32.420 +-			spin_unlock(&_dev->xmit_lock);
  32.421 ++			netif_tx_unlock(_dev);
  32.422 + 			goto resched;
  32.423 + 		}
  32.424 +-		spin_unlock(&_dev->xmit_lock);
  32.425 ++		netif_tx_unlock(_dev);
  32.426 + 	} else {
  32.427 + resched:
  32.428 + 		dp->tasklet_pending = 1;
  32.429 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c ./drivers/net/irda/vlsi_ir.c
  32.430 +--- ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c	2006-09-12 19:02:10.000000000 +0100
  32.431 ++++ ./drivers/net/irda/vlsi_ir.c	2006-09-19 13:59:20.000000000 +0100
  32.432 +@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct s
  32.433 + 			    ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
  32.434 + 			    	break;
  32.435 + 			udelay(100);
  32.436 +-			/* must not sleep here - we are called under xmit_lock! */
  32.437 ++			/* must not sleep here - called under netif_tx_lock! */
  32.438 + 		}
  32.439 + 	}
  32.440 + 
  32.441 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c ./drivers/net/ixgb/ixgb_main.c
  32.442 +--- ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c	2006-09-12 19:02:10.000000000 +0100
  32.443 ++++ ./drivers/net/ixgb/ixgb_main.c	2006-09-19 13:59:20.000000000 +0100
  32.444 +@@ -1163,7 +1163,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
  32.445 + 	uint16_t ipcse, tucse, mss;
  32.446 + 	int err;
  32.447 + 
  32.448 +-	if(likely(skb_shinfo(skb)->tso_size)) {
  32.449 ++	if(likely(skb_shinfo(skb)->gso_size)) {
  32.450 + 		if (skb_header_cloned(skb)) {
  32.451 + 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  32.452 + 			if (err)
  32.453 +@@ -1171,7 +1171,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
  32.454 + 		}
  32.455 + 
  32.456 + 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  32.457 +-		mss = skb_shinfo(skb)->tso_size;
  32.458 ++		mss = skb_shinfo(skb)->gso_size;
  32.459 + 		skb->nh.iph->tot_len = 0;
  32.460 + 		skb->nh.iph->check = 0;
  32.461 + 		skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
  32.462 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/loopback.c ./drivers/net/loopback.c
  32.463 +--- ../orig-linux-2.6.16.29/drivers/net/loopback.c	2006-09-12 19:02:10.000000000 +0100
  32.464 ++++ ./drivers/net/loopback.c	2006-09-19 13:59:20.000000000 +0100
  32.465 +@@ -74,7 +74,7 @@ static void emulate_large_send_offload(s
  32.466 + 	struct iphdr *iph = skb->nh.iph;
  32.467 + 	struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
  32.468 + 	unsigned int doffset = (iph->ihl + th->doff) * 4;
  32.469 +-	unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
  32.470 ++	unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
  32.471 + 	unsigned int offset = 0;
  32.472 + 	u32 seq = ntohl(th->seq);
  32.473 + 	u16 id  = ntohs(iph->id);
  32.474 +@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff 
  32.475 + #endif
  32.476 + 
  32.477 + #ifdef LOOPBACK_TSO
  32.478 +-	if (skb_shinfo(skb)->tso_size) {
  32.479 ++	if (skb_shinfo(skb)->gso_size) {
  32.480 + 		BUG_ON(skb->protocol != htons(ETH_P_IP));
  32.481 + 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
  32.482 + 
  32.483 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c ./drivers/net/mv643xx_eth.c
  32.484 +--- ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c	2006-09-12 19:02:10.000000000 +0100
  32.485 ++++ ./drivers/net/mv643xx_eth.c	2006-09-19 13:59:20.000000000 +0100
  32.486 +@@ -1107,7 +1107,7 @@ static int mv643xx_eth_start_xmit(struct
  32.487 + 
  32.488 + #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
  32.489 + 	if (has_tiny_unaligned_frags(skb)) {
  32.490 +-		if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
  32.491 ++		if (__skb_linearize(skb)) {
  32.492 + 			stats->tx_dropped++;
  32.493 + 			printk(KERN_DEBUG "%s: failed to linearize tiny "
  32.494 + 					"unaligned fragment\n", dev->name);
  32.495 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/natsemi.c ./drivers/net/natsemi.c
  32.496 +--- ../orig-linux-2.6.16.29/drivers/net/natsemi.c	2006-09-12 19:02:10.000000000 +0100
  32.497 ++++ ./drivers/net/natsemi.c	2006-09-19 13:59:20.000000000 +0100
  32.498 +@@ -323,12 +323,12 @@ performance critical codepaths:
  32.499 + The rx process only runs in the interrupt handler. Access from outside
  32.500 + the interrupt handler is only permitted after disable_irq().
  32.501 + 
  32.502 +-The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
  32.503 ++The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
  32.504 + is set, then access is permitted under spin_lock_irq(&np->lock).
  32.505 + 
  32.506 + Thus configuration functions that want to access everything must call
  32.507 + 	disable_irq(dev->irq);
  32.508 +-	spin_lock_bh(dev->xmit_lock);
  32.509 ++	netif_tx_lock_bh(dev);
  32.510 + 	spin_lock_irq(&np->lock);
  32.511 + 
  32.512 + IV. Notes
  32.513 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/r8169.c ./drivers/net/r8169.c
  32.514 +--- ../orig-linux-2.6.16.29/drivers/net/r8169.c	2006-09-12 19:02:10.000000000 +0100
  32.515 ++++ ./drivers/net/r8169.c	2006-09-19 13:59:20.000000000 +0100
  32.516 +@@ -2171,7 +2171,7 @@ static int rtl8169_xmit_frags(struct rtl
  32.517 + static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
  32.518 + {
  32.519 + 	if (dev->features & NETIF_F_TSO) {
  32.520 +-		u32 mss = skb_shinfo(skb)->tso_size;
  32.521 ++		u32 mss = skb_shinfo(skb)->gso_size;
  32.522 + 
  32.523 + 		if (mss)
  32.524 + 			return LargeSend | ((mss & MSSMask) << MSSShift);
  32.525 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/s2io.c ./drivers/net/s2io.c
  32.526 +--- ../orig-linux-2.6.16.29/drivers/net/s2io.c	2006-09-12 19:02:10.000000000 +0100
  32.527 ++++ ./drivers/net/s2io.c	2006-09-19 13:59:20.000000000 +0100
  32.528 +@@ -3522,8 +3522,8 @@ static int s2io_xmit(struct sk_buff *skb
  32.529 + 	txdp->Control_1 = 0;
  32.530 + 	txdp->Control_2 = 0;
  32.531 + #ifdef NETIF_F_TSO
  32.532 +-	mss = skb_shinfo(skb)->tso_size;
  32.533 +-	if (mss) {
  32.534 ++	mss = skb_shinfo(skb)->gso_size;
  32.535 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
  32.536 + 		txdp->Control_1 |= TXD_TCP_LSO_EN;
  32.537 + 		txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
  32.538 + 	}
  32.539 +@@ -3543,10 +3543,10 @@ static int s2io_xmit(struct sk_buff *skb
  32.540 + 	}
  32.541 + 
  32.542 + 	frg_len = skb->len - skb->data_len;
  32.543 +-	if (skb_shinfo(skb)->ufo_size) {
  32.544 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
  32.545 + 		int ufo_size;
  32.546 + 
  32.547 +-		ufo_size = skb_shinfo(skb)->ufo_size;
  32.548 ++		ufo_size = skb_shinfo(skb)->gso_size;
  32.549 + 		ufo_size &= ~7;
  32.550 + 		txdp->Control_1 |= TXD_UFO_EN;
  32.551 + 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
  32.552 +@@ -3572,7 +3572,7 @@ static int s2io_xmit(struct sk_buff *skb
  32.553 + 	txdp->Host_Control = (unsigned long) skb;
  32.554 + 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
  32.555 + 
  32.556 +-	if (skb_shinfo(skb)->ufo_size)
  32.557 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  32.558 + 		txdp->Control_1 |= TXD_UFO_EN;
  32.559 + 
  32.560 + 	frg_cnt = skb_shinfo(skb)->nr_frags;
  32.561 +@@ -3587,12 +3587,12 @@ static int s2io_xmit(struct sk_buff *skb
  32.562 + 		    (sp->pdev, frag->page, frag->page_offset,
  32.563 + 		     frag->size, PCI_DMA_TODEVICE);
  32.564 + 		txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
  32.565 +-		if (skb_shinfo(skb)->ufo_size)
  32.566 ++		if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  32.567 + 			txdp->Control_1 |= TXD_UFO_EN;
  32.568 + 	}
  32.569 + 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
  32.570 + 
  32.571 +-	if (skb_shinfo(skb)->ufo_size)
  32.572 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  32.573 + 		frg_cnt++; /* as Txd0 was used for inband header */
  32.574 + 
  32.575 + 	tx_fifo = mac_control->tx_FIFO_start[queue];
  32.576 +@@ -3606,7 +3606,7 @@ static int s2io_xmit(struct sk_buff *skb
  32.577 + 	if (mss)
  32.578 + 		val64 |= TX_FIFO_SPECIAL_FUNC;
  32.579 + #endif
  32.580 +-	if (skb_shinfo(skb)->ufo_size)
  32.581 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  32.582 + 		val64 |= TX_FIFO_SPECIAL_FUNC;
  32.583 + 	writeq(val64, &tx_fifo->List_Control);
  32.584 + 
  32.585 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/sky2.c ./drivers/net/sky2.c
  32.586 +--- ../orig-linux-2.6.16.29/drivers/net/sky2.c	2006-09-12 19:02:10.000000000 +0100
  32.587 ++++ ./drivers/net/sky2.c	2006-09-19 13:59:20.000000000 +0100
  32.588 +@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
  32.589 + 	count = sizeof(dma_addr_t) / sizeof(u32);
  32.590 + 	count += skb_shinfo(skb)->nr_frags * count;
  32.591 + 
  32.592 +-	if (skb_shinfo(skb)->tso_size)
  32.593 ++	if (skb_shinfo(skb)->gso_size)
  32.594 + 		++count;
  32.595 + 
  32.596 + 	if (skb->ip_summed == CHECKSUM_HW)
  32.597 +@@ -1197,7 +1197,7 @@ static int sky2_xmit_frame(struct sk_buf
  32.598 + 	}
  32.599 + 
  32.600 + 	/* Check for TCP Segmentation Offload */
  32.601 +-	mss = skb_shinfo(skb)->tso_size;
  32.602 ++	mss = skb_shinfo(skb)->gso_size;
  32.603 + 	if (mss != 0) {
  32.604 + 		/* just drop the packet if non-linear expansion fails */
  32.605 + 		if (skb_header_cloned(skb) &&
  32.606 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/tg3.c ./drivers/net/tg3.c
  32.607 +--- ../orig-linux-2.6.16.29/drivers/net/tg3.c	2006-09-12 19:02:10.000000000 +0100
  32.608 ++++ ./drivers/net/tg3.c	2006-09-19 13:59:20.000000000 +0100
  32.609 +@@ -3664,7 +3664,7 @@ static int tg3_start_xmit(struct sk_buff
  32.610 + #if TG3_TSO_SUPPORT != 0
  32.611 + 	mss = 0;
  32.612 + 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
  32.613 +-	    (mss = skb_shinfo(skb)->tso_size) != 0) {
  32.614 ++	    (mss = skb_shinfo(skb)->gso_size) != 0) {
  32.615 + 		int tcp_opt_len, ip_tcp_len;
  32.616 + 
  32.617 + 		if (skb_header_cloned(skb) &&
  32.618 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c ./drivers/net/tulip/winbond-840.c
  32.619 +--- ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c	2006-09-12 19:02:10.000000000 +0100
  32.620 ++++ ./drivers/net/tulip/winbond-840.c	2006-09-19 13:59:20.000000000 +0100
  32.621 +@@ -1605,11 +1605,11 @@ static void __devexit w840_remove1 (stru
  32.622 +  * - get_stats:
  32.623 +  * 	spin_lock_irq(np->lock), doesn't touch hw if not present
  32.624 +  * - hard_start_xmit:
  32.625 +- * 	netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
  32.626 ++ * 	synchronize_irq + netif_tx_disable;
  32.627 +  * - tx_timeout:
  32.628 +- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
  32.629 ++ * 	netif_device_detach + netif_tx_disable;
  32.630 +  * - set_multicast_list
  32.631 +- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
  32.632 ++ * 	netif_device_detach + netif_tx_disable;
  32.633 +  * - interrupt handler
  32.634 +  * 	doesn't touch hw if not present, synchronize_irq waits for
  32.635 +  * 	running instances of the interrupt handler.
  32.636 +@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev 
  32.637 + 		netif_device_detach(dev);
  32.638 + 		update_csr6(dev, 0);
  32.639 + 		iowrite32(0, ioaddr + IntrEnable);
  32.640 +-		netif_stop_queue(dev);
  32.641 + 		spin_unlock_irq(&np->lock);
  32.642 + 
  32.643 +-		spin_unlock_wait(&dev->xmit_lock);
  32.644 + 		synchronize_irq(dev->irq);
  32.645 ++		netif_tx_disable(dev);
  32.646 + 	
  32.647 + 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
  32.648 + 
  32.649 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/typhoon.c ./drivers/net/typhoon.c
  32.650 +--- ../orig-linux-2.6.16.29/drivers/net/typhoon.c	2006-09-12 19:02:10.000000000 +0100
  32.651 ++++ ./drivers/net/typhoon.c	2006-09-19 13:59:20.000000000 +0100
  32.652 +@@ -340,7 +340,7 @@ enum state_values {
  32.653 + #endif
  32.654 + 
  32.655 + #if defined(NETIF_F_TSO)
  32.656 +-#define skb_tso_size(x)		(skb_shinfo(x)->tso_size)
  32.657 ++#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
  32.658 + #define TSO_NUM_DESCRIPTORS	2
  32.659 + #define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
  32.660 + #else
  32.661 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/via-velocity.c ./drivers/net/via-velocity.c
  32.662 +--- ../orig-linux-2.6.16.29/drivers/net/via-velocity.c	2006-09-12 19:02:10.000000000 +0100
  32.663 ++++ ./drivers/net/via-velocity.c	2006-09-19 13:59:20.000000000 +0100
  32.664 +@@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff 
  32.665 + 
  32.666 + 	int pktlen = skb->len;
  32.667 + 
  32.668 ++#ifdef VELOCITY_ZERO_COPY_SUPPORT
  32.669 ++	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
  32.670 ++		kfree_skb(skb);
  32.671 ++		return 0;
  32.672 ++	}
  32.673 ++#endif
  32.674 ++
  32.675 + 	spin_lock_irqsave(&vptr->lock, flags);
  32.676 + 
  32.677 + 	index = vptr->td_curr[qnum];
  32.678 +@@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff 
  32.679 + 	 */
  32.680 + 	if (pktlen < ETH_ZLEN) {
  32.681 + 		/* Cannot occur until ZC support */
  32.682 +-		if(skb_linearize(skb, GFP_ATOMIC))
  32.683 +-			return 0; 
  32.684 + 		pktlen = ETH_ZLEN;
  32.685 + 		memcpy(tdinfo->buf, skb->data, skb->len);
  32.686 + 		memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
  32.687 +@@ -1933,7 +1938,6 @@ static int velocity_xmit(struct sk_buff 
  32.688 + 		int nfrags = skb_shinfo(skb)->nr_frags;
  32.689 + 		tdinfo->skb = skb;
  32.690 + 		if (nfrags > 6) {
  32.691 +-			skb_linearize(skb, GFP_ATOMIC);
  32.692 + 			memcpy(tdinfo->buf, skb->data, skb->len);
  32.693 + 			tdinfo->skb_dma[0] = tdinfo->buf_dma;
  32.694 + 			td_ptr->tdesc0.pktsize = 
  32.695 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c ./drivers/net/wireless/orinoco.c
  32.696 +--- ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c	2006-09-12 19:02:10.000000000 +0100
  32.697 ++++ ./drivers/net/wireless/orinoco.c	2006-09-19 13:59:20.000000000 +0100
  32.698 +@@ -1835,7 +1835,9 @@ static int __orinoco_program_rids(struct
  32.699 + 	/* Set promiscuity / multicast*/
  32.700 + 	priv->promiscuous = 0;
  32.701 + 	priv->mc_count = 0;
  32.702 +-	__orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
  32.703 ++
  32.704 ++	/* FIXME: what about netif_tx_lock */
  32.705 ++	__orinoco_set_multicast_list(dev);
  32.706 + 
  32.707 + 	return 0;
  32.708 + }
  32.709 +diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c ./drivers/s390/net/qeth_eddp.c
  32.710 +--- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c	2006-09-12 19:02:10.000000000 +0100
  32.711 ++++ ./drivers/s390/net/qeth_eddp.c	2006-09-19 13:59:20.000000000 +0100
  32.712 +@@ -421,7 +421,7 @@ __qeth_eddp_fill_context_tcp(struct qeth
  32.713 +        }
  32.714 + 	tcph = eddp->skb->h.th;
  32.715 + 	while (eddp->skb_offset < eddp->skb->len) {
  32.716 +-		data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
  32.717 ++		data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
  32.718 + 			       (int)(eddp->skb->len - eddp->skb_offset));
  32.719 + 		/* prepare qdio hdr */
  32.720 + 		if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
  32.721 +@@ -516,20 +516,20 @@ qeth_eddp_calc_num_pages(struct qeth_edd
  32.722 + 	
  32.723 + 	QETH_DBF_TEXT(trace, 5, "eddpcanp");
  32.724 + 	/* can we put multiple skbs in one page? */
  32.725 +-	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
  32.726 ++	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
  32.727 + 	if (skbs_per_page > 1){
  32.728 +-		ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
  32.729 ++		ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
  32.730 + 				 skbs_per_page + 1;
  32.731 + 		ctx->elements_per_skb = 1;
  32.732 + 	} else {
  32.733 + 		/* no -> how many elements per skb? */
  32.734 +-		ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
  32.735 ++		ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
  32.736 + 				     PAGE_SIZE) >> PAGE_SHIFT;
  32.737 + 		ctx->num_pages = ctx->elements_per_skb *
  32.738 +-				 (skb_shinfo(skb)->tso_segs + 1);
  32.739 ++				 (skb_shinfo(skb)->gso_segs + 1);
  32.740 + 	}
  32.741 + 	ctx->num_elements = ctx->elements_per_skb *
  32.742 +-			    (skb_shinfo(skb)->tso_segs + 1);
  32.743 ++			    (skb_shinfo(skb)->gso_segs + 1);
  32.744 + }
  32.745 + 
  32.746 + static inline struct qeth_eddp_context *
  32.747 +diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c ./drivers/s390/net/qeth_main.c
  32.748 +--- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c	2006-09-12 19:02:10.000000000 +0100
  32.749 ++++ ./drivers/s390/net/qeth_main.c	2006-09-19 13:59:20.000000000 +0100
  32.750 +@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
  32.751 + 	queue = card->qdio.out_qs
  32.752 + 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
  32.753 + 
  32.754 +-	if (skb_shinfo(skb)->tso_size)
  32.755 ++	if (skb_shinfo(skb)->gso_size)
  32.756 + 		large_send = card->options.large_send;
  32.757 + 
  32.758 + 	/*are we able to do TSO ? If so ,prepare and send it from here */
  32.759 +@@ -4501,7 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
  32.760 + 		card->stats.tx_packets++;
  32.761 + 		card->stats.tx_bytes += skb->len;
  32.762 + #ifdef CONFIG_QETH_PERF_STATS
  32.763 +-		if (skb_shinfo(skb)->tso_size &&
  32.764 ++		if (skb_shinfo(skb)->gso_size &&
  32.765 + 		   !(large_send == QETH_LARGE_SEND_NO)) {
  32.766 + 			card->perf_stats.large_send_bytes += skb->len;
  32.767 + 			card->perf_stats.large_send_cnt++;
  32.768 +diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h ./drivers/s390/net/qeth_tso.h
  32.769 +--- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h	2006-09-12 19:02:10.000000000 +0100
  32.770 ++++ ./drivers/s390/net/qeth_tso.h	2006-09-19 13:59:20.000000000 +0100
  32.771 +@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *c
  32.772 + 	hdr->ext.hdr_version = 1;
  32.773 + 	hdr->ext.hdr_len     = 28;
  32.774 + 	/*insert non-fix values */
  32.775 +-	hdr->ext.mss = skb_shinfo(skb)->tso_size;
  32.776 ++	hdr->ext.mss = skb_shinfo(skb)->gso_size;
  32.777 + 	hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
  32.778 + 	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
  32.779 + 				       sizeof(struct qeth_hdr_tso));
  32.780 +diff -pruN ../orig-linux-2.6.16.29/include/linux/ethtool.h ./include/linux/ethtool.h
  32.781 +--- ../orig-linux-2.6.16.29/include/linux/ethtool.h	2006-09-12 19:02:10.000000000 +0100
  32.782 ++++ ./include/linux/ethtool.h	2006-09-19 13:59:20.000000000 +0100
  32.783 +@@ -408,6 +408,8 @@ struct ethtool_ops {
  32.784 + #define ETHTOOL_GPERMADDR	0x00000020 /* Get permanent hardware address */
  32.785 + #define ETHTOOL_GUFO		0x00000021 /* Get UFO enable (ethtool_value) */
  32.786 + #define ETHTOOL_SUFO		0x00000022 /* Set UFO enable (ethtool_value) */
  32.787 ++#define ETHTOOL_GGSO		0x00000023 /* Get GSO enable (ethtool_value) */
  32.788 ++#define ETHTOOL_SGSO		0x00000024 /* Set GSO enable (ethtool_value) */
  32.789 + 
  32.790 + /* compatibility with older code */
  32.791 + #define SPARC_ETH_GSET		ETHTOOL_GSET
  32.792 +diff -pruN ../orig-linux-2.6.16.29/include/linux/netdevice.h ./include/linux/netdevice.h
  32.793 +--- ../orig-linux-2.6.16.29/include/linux/netdevice.h	2006-09-12 19:02:10.000000000 +0100
  32.794 ++++ ./include/linux/netdevice.h	2006-09-19 13:59:20.000000000 +0100
  32.795 +@@ -230,7 +230,8 @@ enum netdev_state_t
  32.796 + 	__LINK_STATE_SCHED,
  32.797 + 	__LINK_STATE_NOCARRIER,
  32.798 + 	__LINK_STATE_RX_SCHED,
  32.799 +-	__LINK_STATE_LINKWATCH_PENDING
  32.800 ++	__LINK_STATE_LINKWATCH_PENDING,
  32.801 ++	__LINK_STATE_QDISC_RUNNING,
  32.802 + };
  32.803 + 
  32.804 + 
  32.805 +@@ -306,9 +307,17 @@ struct net_device
  32.806 + #define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */
  32.807 + #define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */
  32.808 + #define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */
  32.809 +-#define NETIF_F_TSO		2048	/* Can offload TCP/IP segmentation */
  32.810 ++#define NETIF_F_GSO		2048	/* Enable software GSO. */
  32.811 + #define NETIF_F_LLTX		4096	/* LockLess TX */
  32.812 +-#define NETIF_F_UFO             8192    /* Can offload UDP Large Send*/
  32.813 ++
  32.814 ++	/* Segmentation offload features */
  32.815 ++#define NETIF_F_GSO_SHIFT	16
  32.816 ++#define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  32.817 ++#define NETIF_F_UFO		(SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
  32.818 ++#define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  32.819 ++
  32.820 ++#define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  32.821 ++#define NETIF_F_ALL_CSUM	(NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
  32.822 + 
  32.823 + 	struct net_device	*next_sched;
  32.824 + 
  32.825 +@@ -394,6 +403,9 @@ struct net_device
  32.826 + 	struct list_head	qdisc_list;
  32.827 + 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
  32.828 + 
  32.829 ++	/* Partially transmitted GSO packet. */
  32.830 ++	struct sk_buff		*gso_skb;
  32.831 ++
  32.832 + 	/* ingress path synchronizer */
  32.833 + 	spinlock_t		ingress_lock;
  32.834 + 	struct Qdisc		*qdisc_ingress;
  32.835 +@@ -402,7 +414,7 @@ struct net_device
  32.836 +  * One part is mostly used on xmit path (device)
  32.837 +  */
  32.838 + 	/* hard_start_xmit synchronizer */
  32.839 +-	spinlock_t		xmit_lock ____cacheline_aligned_in_smp;
  32.840 ++	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
  32.841 + 	/* cpu id of processor entered to hard_start_xmit or -1,
  32.842 + 	   if nobody entered there.
  32.843 + 	 */
  32.844 +@@ -527,6 +539,8 @@ struct packet_type {
  32.845 + 					 struct net_device *,
  32.846 + 					 struct packet_type *,
  32.847 + 					 struct net_device *);
  32.848 ++	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
  32.849 ++						int features);
  32.850 + 	void			*af_packet_priv;
  32.851 + 	struct list_head	list;
  32.852 + };
  32.853 +@@ -693,7 +707,8 @@ extern int		dev_change_name(struct net_d
  32.854 + extern int		dev_set_mtu(struct net_device *, int);
  32.855 + extern int		dev_set_mac_address(struct net_device *,
  32.856 + 					    struct sockaddr *);
  32.857 +-extern void		dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
  32.858 ++extern int		dev_hard_start_xmit(struct sk_buff *skb,
  32.859 ++					    struct net_device *dev);
  32.860 + 
  32.861 + extern void		dev_init(void);
  32.862 + 
  32.863 +@@ -900,11 +915,43 @@ static inline void __netif_rx_complete(s
  32.864 + 	clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
  32.865 + }
  32.866 + 
  32.867 ++static inline void netif_tx_lock(struct net_device *dev)
  32.868 ++{
  32.869 ++	spin_lock(&dev->_xmit_lock);
  32.870 ++	dev->xmit_lock_owner = smp_processor_id();
  32.871 ++}
  32.872 ++
  32.873 ++static inline void netif_tx_lock_bh(struct net_device *dev)
  32.874 ++{
  32.875 ++	spin_lock_bh(&dev->_xmit_lock);
  32.876 ++	dev->xmit_lock_owner = smp_processor_id();
  32.877 ++}
  32.878 ++
  32.879 ++static inline int netif_tx_trylock(struct net_device *dev)
  32.880 ++{
  32.881 ++	int err = spin_trylock(&dev->_xmit_lock);
  32.882 ++	if (!err)
  32.883 ++		dev->xmit_lock_owner = smp_processor_id();
  32.884 ++	return err;
  32.885 ++}
  32.886 ++
  32.887 ++static inline void netif_tx_unlock(struct net_device *dev)
  32.888 ++{
  32.889 ++	dev->xmit_lock_owner = -1;
  32.890 ++	spin_unlock(&dev->_xmit_lock);
  32.891 ++}
  32.892 ++
  32.893 ++static inline void netif_tx_unlock_bh(struct net_device *dev)
  32.894 ++{
  32.895 ++	dev->xmit_lock_owner = -1;
  32.896 ++	spin_unlock_bh(&dev->_xmit_lock);
  32.897 ++}
  32.898 ++
  32.899 + static inline void netif_tx_disable(struct net_device *dev)
  32.900 + {
  32.901 +-	spin_lock_bh(&dev->xmit_lock);
  32.902 ++	netif_tx_lock_bh(dev);
  32.903 + 	netif_stop_queue(dev);
  32.904 +-	spin_unlock_bh(&dev->xmit_lock);
  32.905 ++	netif_tx_unlock_bh(dev);
  32.906 + }
  32.907 + 
  32.908 + /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  32.909 +@@ -932,6 +979,7 @@ extern int		netdev_max_backlog;
  32.910 + extern int		weight_p;
  32.911 + extern int		netdev_set_master(struct net_device *dev, struct net_device *master);
  32.912 + extern int skb_checksum_help(struct sk_buff *skb, int inward);
  32.913 ++extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  32.914 + #ifdef CONFIG_BUG
  32.915 + extern void netdev_rx_csum_fault(struct net_device *dev);
  32.916 + #else
  32.917 +@@ -951,6 +999,18 @@ extern void dev_seq_stop(struct seq_file
  32.918 + 
  32.919 + extern void linkwatch_run_queue(void);
  32.920 + 
  32.921 ++static inline int skb_gso_ok(struct sk_buff *skb, int features)
  32.922 ++{
  32.923 ++	int feature = skb_shinfo(skb)->gso_size ?
  32.924 ++		      skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
  32.925 ++	return (features & feature) == feature;
  32.926 ++}
  32.927 ++
  32.928 ++static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  32.929 ++{
  32.930 ++	return !skb_gso_ok(skb, dev->features);
  32.931 ++}
  32.932 ++
  32.933 + #endif /* __KERNEL__ */
  32.934 + 
  32.935 + #endif	/* _LINUX_DEV_H */
  32.936 +diff -pruN ../orig-linux-2.6.16.29/include/linux/skbuff.h ./include/linux/skbuff.h
  32.937 +--- ../orig-linux-2.6.16.29/include/linux/skbuff.h	2006-09-12 19:02:10.000000000 +0100
  32.938 ++++ ./include/linux/skbuff.h	2006-09-19 13:59:20.000000000 +0100
  32.939 +@@ -134,9 +134,10 @@ struct skb_frag_struct {
  32.940 + struct skb_shared_info {
  32.941 + 	atomic_t	dataref;
  32.942 + 	unsigned short	nr_frags;
  32.943 +-	unsigned short	tso_size;
  32.944 +-	unsigned short	tso_segs;
  32.945 +-	unsigned short  ufo_size;
  32.946 ++	unsigned short	gso_size;
  32.947 ++	/* Warning: this field is not always filled in (UFO)! */
  32.948 ++	unsigned short	gso_segs;
  32.949 ++	unsigned short  gso_type;
  32.950 + 	unsigned int    ip6_frag_id;
  32.951 + 	struct sk_buff	*frag_list;
  32.952 + 	skb_frag_t	frags[MAX_SKB_FRAGS];
  32.953 +@@ -168,6 +169,14 @@ enum {
  32.954 + 	SKB_FCLONE_CLONE,
  32.955 + };
  32.956 + 
  32.957 ++enum {
  32.958 ++	SKB_GSO_TCPV4 = 1 << 0,
  32.959 ++	SKB_GSO_UDPV4 = 1 << 1,
  32.960 ++
  32.961 ++	/* This indicates the skb is from an untrusted source. */
  32.962 ++	SKB_GSO_DODGY = 1 << 2,
  32.963 ++};
  32.964 ++
  32.965 + /** 
  32.966 +  *	struct sk_buff - socket buffer
  32.967 +  *	@next: Next buffer in list
  32.968 +@@ -1148,18 +1157,34 @@ static inline int skb_can_coalesce(struc
  32.969 + 	return 0;
  32.970 + }
  32.971 + 
  32.972 ++static inline int __skb_linearize(struct sk_buff *skb)
  32.973 ++{
  32.974 ++	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
  32.975 ++}
  32.976 ++
  32.977 + /**
  32.978 +  *	skb_linearize - convert paged skb to linear one
  32.979 +  *	@skb: buffer to linarize
  32.980 +- *	@gfp: allocation mode
  32.981 +  *
  32.982 +  *	If there is no free memory -ENOMEM is returned, otherwise zero
  32.983 +  *	is returned and the old skb data released.
  32.984 +  */
  32.985 +-extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
  32.986 +-static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
  32.987 ++static inline int skb_linearize(struct sk_buff *skb)
  32.988 ++{
  32.989 ++	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
  32.990 ++}
  32.991 ++
  32.992 ++/**
  32.993 ++ *	skb_linearize_cow - make sure skb is linear and writable
  32.994 ++ *	@skb: buffer to process
  32.995 ++ *
  32.996 ++ *	If there is no free memory -ENOMEM is returned, otherwise zero
  32.997 ++ *	is returned and the old skb data released.
  32.998 ++ */
  32.999 ++static inline int skb_linearize_cow(struct sk_buff *skb)
 32.1000 + {
 32.1001 +-	return __skb_linearize(skb, gfp);
 32.1002 ++	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
 32.1003 ++	       __skb_linearize(skb) : 0;
 32.1004 + }
 32.1005 + 
 32.1006 + /**
 32.1007 +@@ -1254,6 +1279,7 @@ extern void	       skb_split(struct sk_b
 32.1008 + 				 struct sk_buff *skb1, const u32 len);
 32.1009 + 
 32.1010 + extern void	       skb_release_data(struct sk_buff *skb);
 32.1011 ++extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
 32.1012 + 
 32.1013 + static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
 32.1014 + 				       int len, void *buffer)
 32.1015 +diff -pruN ../orig-linux-2.6.16.29/include/net/pkt_sched.h ./include/net/pkt_sched.h
 32.1016 +--- ../orig-linux-2.6.16.29/include/net/pkt_sched.h	2006-09-12 19:02:10.000000000 +0100
 32.1017 ++++ ./include/net/pkt_sched.h	2006-09-19 13:59:20.000000000 +0100
 32.1018 +@@ -218,12 +218,13 @@ extern struct qdisc_rate_table *qdisc_ge
 32.1019 + 		struct rtattr *tab);
 32.1020 + extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
 32.1021 + 
 32.1022 +-extern int qdisc_restart(struct net_device *dev);
 32.1023 ++extern void __qdisc_run(struct net_device *dev);
 32.1024 + 
 32.1025 + static inline void qdisc_run(struct net_device *dev)
 32.1026 + {
 32.1027 +-	while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
 32.1028 +-		/* NOTHING */;
 32.1029 ++	if (!netif_queue_stopped(dev) &&
 32.1030 ++	    !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
 32.1031 ++		__qdisc_run(dev);
 32.1032 + }
 32.1033 + 
 32.1034 + extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
 32.1035 +diff -pruN ../orig-linux-2.6.16.29/include/net/protocol.h ./include/net/protocol.h
 32.1036 +--- ../orig-linux-2.6.16.29/include/net/protocol.h	2006-09-12 19:02:10.000000000 +0100
 32.1037 ++++ ./include/net/protocol.h	2006-09-19 13:59:20.000000000 +0100
 32.1038 +@@ -37,6 +37,8 @@
 32.1039 + struct net_protocol {
 32.1040 + 	int			(*handler)(struct sk_buff *skb);
 32.1041 + 	void			(*err_handler)(struct sk_buff *skb, u32 info);
 32.1042 ++	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
 32.1043 ++					       int features);
 32.1044 + 	int			no_policy;
 32.1045 + };
 32.1046 + 
 32.1047 +diff -pruN ../orig-linux-2.6.16.29/include/net/sock.h ./include/net/sock.h
 32.1048 +--- ../orig-linux-2.6.16.29/include/net/sock.h	2006-09-12 19:02:10.000000000 +0100
 32.1049 ++++ ./include/net/sock.h	2006-09-19 13:59:20.000000000 +0100
 32.1050 +@@ -1064,9 +1064,13 @@ static inline void sk_setup_caps(struct 
 32.1051 + {
 32.1052 + 	__sk_dst_set(sk, dst);
 32.1053 + 	sk->sk_route_caps = dst->dev->features;
 32.1054 ++	if (sk->sk_route_caps & NETIF_F_GSO)
 32.1055 ++		sk->sk_route_caps |= NETIF_F_TSO;
 32.1056 + 	if (sk->sk_route_caps & NETIF_F_TSO) {
 32.1057 + 		if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
 32.1058 + 			sk->sk_route_caps &= ~NETIF_F_TSO;
 32.1059 ++		else 
 32.1060 ++			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
 32.1061 + 	}
 32.1062 + }
 32.1063 + 
 32.1064 +diff -pruN ../orig-linux-2.6.16.29/include/net/tcp.h ./include/net/tcp.h
 32.1065 +--- ../orig-linux-2.6.16.29/include/net/tcp.h	2006-09-12 19:02:10.000000000 +0100
 32.1066 ++++ ./include/net/tcp.h	2006-09-19 13:59:20.000000000 +0100
 32.1067 +@@ -552,13 +552,13 @@ struct tcp_skb_cb {
 32.1068 +  */
 32.1069 + static inline int tcp_skb_pcount(const struct sk_buff *skb)
 32.1070 + {
 32.1071 +-	return skb_shinfo(skb)->tso_segs;
 32.1072 ++	return skb_shinfo(skb)->gso_segs;
 32.1073 + }
 32.1074 + 
 32.1075 + /* This is valid iff tcp_skb_pcount() > 1. */
 32.1076 + static inline int tcp_skb_mss(const struct sk_buff *skb)
 32.1077 + {
 32.1078 +-	return skb_shinfo(skb)->tso_size;
 32.1079 ++	return skb_shinfo(skb)->gso_size;
 32.1080 + }
 32.1081 + 
 32.1082 + static inline void tcp_dec_pcount_approx(__u32 *count,
 32.1083 +@@ -1063,6 +1063,8 @@ extern struct request_sock_ops tcp_reque
 32.1084 + 
 32.1085 + extern int tcp_v4_destroy_sock(struct sock *sk);
 32.1086 + 
 32.1087 ++extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
 32.1088 ++
 32.1089 + #ifdef CONFIG_PROC_FS
 32.1090 + extern int  tcp4_proc_init(void);
 32.1091 + extern void tcp4_proc_exit(void);
 32.1092 +diff -pruN ../orig-linux-2.6.16.29/net/atm/clip.c ./net/atm/clip.c
 32.1093 +--- ../orig-linux-2.6.16.29/net/atm/clip.c	2006-09-12 19:02:10.000000000 +0100
 32.1094 ++++ ./net/atm/clip.c	2006-09-19 13:59:20.000000000 +0100
 32.1095 +@@ -101,7 +101,7 @@ static void unlink_clip_vcc(struct clip_
 32.1096 + 		printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc);
 32.1097 + 		return;
 32.1098 + 	}
 32.1099 +-	spin_lock_bh(&entry->neigh->dev->xmit_lock);	/* block clip_start_xmit() */
 32.1100 ++	netif_tx_lock_bh(entry->neigh->dev);	/* block clip_start_xmit() */
 32.1101 + 	entry->neigh->used = jiffies;
 32.1102 + 	for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
 32.1103 + 		if (*walk == clip_vcc) {
 32.1104 +@@ -125,7 +125,7 @@ static void unlink_clip_vcc(struct clip_
 32.1105 + 	printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
 32.1106 + 	  "0x%p)\n",entry,clip_vcc);
 32.1107 + out:
 32.1108 +-	spin_unlock_bh(&entry->neigh->dev->xmit_lock);
 32.1109 ++	netif_tx_unlock_bh(entry->neigh->dev);
 32.1110 + }
 32.1111 + 
 32.1112 + /* The neighbour entry n->lock is held. */
 32.1113 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_device.c ./net/bridge/br_device.c
 32.1114 +--- ../orig-linux-2.6.16.29/net/bridge/br_device.c	2006-09-12 19:02:10.000000000 +0100
 32.1115 ++++ ./net/bridge/br_device.c	2006-09-19 13:59:20.000000000 +0100
 32.1116 +@@ -146,9 +146,9 @@ static int br_set_tx_csum(struct net_dev
 32.1117 + 	struct net_bridge *br = netdev_priv(dev);
 32.1118 + 
 32.1119 + 	if (data)
 32.1120 +-		br->feature_mask |= NETIF_F_IP_CSUM;
 32.1121 ++		br->feature_mask |= NETIF_F_NO_CSUM;
 32.1122 + 	else
 32.1123 +-		br->feature_mask &= ~NETIF_F_IP_CSUM;
 32.1124 ++		br->feature_mask &= ~NETIF_F_ALL_CSUM;
 32.1125 + 
 32.1126 + 	br_features_recompute(br);
 32.1127 + 	return 0;
 32.1128 +@@ -185,6 +185,6 @@ void br_dev_setup(struct net_device *dev
 32.1129 + 	dev->set_mac_address = br_set_mac_address;
 32.1130 + 	dev->priv_flags = IFF_EBRIDGE;
 32.1131 + 
 32.1132 +- 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
 32.1133 +- 		| NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM;
 32.1134 ++ 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
 32.1135 ++ 			NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
 32.1136 + }
 32.1137 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_forward.c ./net/bridge/br_forward.c
 32.1138 +--- ../orig-linux-2.6.16.29/net/bridge/br_forward.c	2006-09-12 19:02:10.000000000 +0100
 32.1139 ++++ ./net/bridge/br_forward.c	2006-09-19 13:59:20.000000000 +0100
 32.1140 +@@ -32,7 +32,7 @@ static inline int should_deliver(const s
 32.1141 + int br_dev_queue_push_xmit(struct sk_buff *skb)
 32.1142 + {
 32.1143 + 	/* drop mtu oversized packets except tso */
 32.1144 +-	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
 32.1145 ++	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
 32.1146 + 		kfree_skb(skb);
 32.1147 + 	else {
 32.1148 + #ifdef CONFIG_BRIDGE_NETFILTER
 32.1149 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_if.c ./net/bridge/br_if.c
 32.1150 +--- ../orig-linux-2.6.16.29/net/bridge/br_if.c	2006-09-12 19:02:10.000000000 +0100
 32.1151 ++++ ./net/bridge/br_if.c	2006-09-19 13:59:20.000000000 +0100
 32.1152 +@@ -385,17 +385,28 @@ void br_features_recompute(struct net_br
 32.1153 + 	struct net_bridge_port *p;
 32.1154 + 	unsigned long features, checksum;
 32.1155 + 
 32.1156 +-	features = br->feature_mask &~ NETIF_F_IP_CSUM;
 32.1157 +-	checksum = br->feature_mask & NETIF_F_IP_CSUM;
 32.1158 ++	checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
 32.1159 ++	features = br->feature_mask & ~NETIF_F_ALL_CSUM;
 32.1160 + 
 32.1161 + 	list_for_each_entry(p, &br->port_list, list) {
 32.1162 +-		if (!(p->dev->features 
 32.1163 +-		      & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)))
 32.1164 ++		unsigned long feature = p->dev->features;
 32.1165 ++
 32.1166 ++		if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
 32.1167 ++			checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
 32.1168 ++		if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
 32.1169 ++			checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
 32.1170 ++		if (!(feature & NETIF_F_IP_CSUM))
 32.1171 + 			checksum = 0;
 32.1172 +-		features &= p->dev->features;
 32.1173 ++
 32.1174 ++		if (feature & NETIF_F_GSO)
 32.1175 ++			feature |= NETIF_F_TSO;
 32.1176 ++		feature |= NETIF_F_GSO;
 32.1177 ++
 32.1178 ++		features &= feature;
 32.1179 + 	}
 32.1180 + 
 32.1181 +-	br->dev->features = features | checksum | NETIF_F_LLTX;
 32.1182 ++	br->dev->features = features | checksum | NETIF_F_LLTX |
 32.1183 ++			    NETIF_F_GSO_ROBUST;
 32.1184 + }
 32.1185 + 
 32.1186 + /* called with RTNL */
 32.1187 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c ./net/bridge/br_netfilter.c
 32.1188 +--- ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c	2006-09-12 19:02:10.000000000 +0100
 32.1189 ++++ ./net/bridge/br_netfilter.c	2006-09-19 13:59:20.000000000 +0100
 32.1190 +@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
 32.1191 + {
 32.1192 + 	if (skb->protocol == htons(ETH_P_IP) &&
 32.1193 + 	    skb->len > skb->dev->mtu &&
 32.1194 +-	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
 32.1195 ++	    !skb_shinfo(skb)->gso_size)
 32.1196 + 		return ip_fragment(skb, br_dev_queue_push_xmit);
 32.1197 + 	else
 32.1198 + 		return br_dev_queue_push_xmit(skb);
 32.1199 +diff -pruN ../orig-linux-2.6.16.29/net/core/dev.c ./net/core/dev.c
 32.1200 +--- ../orig-linux-2.6.16.29/net/core/dev.c	2006-09-12 19:02:10.000000000 +0100
 32.1201 ++++ ./net/core/dev.c	2006-09-19 13:59:20.000000000 +0100
 32.1202 +@@ -115,6 +115,7 @@
 32.1203 + #include <net/iw_handler.h>
 32.1204 + #endif	/* CONFIG_NET_RADIO */
 32.1205 + #include <asm/current.h>
 32.1206 ++#include <linux/err.h>
 32.1207 + 
 32.1208 + /*
 32.1209 +  *	The list of packet types we will receive (as opposed to discard)
 32.1210 +@@ -1032,7 +1033,7 @@ static inline void net_timestamp(struct 
 32.1211 +  *	taps currently in use.
 32.1212 +  */
 32.1213 + 
 32.1214 +-void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 32.1215 ++static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 32.1216 + {
 32.1217 + 	struct packet_type *ptype;
 32.1218 + 
 32.1219 +@@ -1106,6 +1107,45 @@ out:	
 32.1220 + 	return ret;
 32.1221 + }
 32.1222 + 
 32.1223 ++/**
 32.1224 ++ *	skb_gso_segment - Perform segmentation on skb.
 32.1225 ++ *	@skb: buffer to segment
 32.1226 ++ *	@features: features for the output path (see dev->features)
 32.1227 ++ *
 32.1228 ++ *	This function segments the given skb and returns a list of segments.
 32.1229 ++ *
 32.1230 ++ *	It may return NULL if the skb requires no segmentation.  This is
 32.1231 ++ *	only possible when GSO is used for verifying header integrity.
 32.1232 ++ */
 32.1233 ++struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
 32.1234 ++{
 32.1235 ++	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
 32.1236 ++	struct packet_type *ptype;
 32.1237 ++	int type = skb->protocol;
 32.1238 ++
 32.1239 ++	BUG_ON(skb_shinfo(skb)->frag_list);
 32.1240 ++	BUG_ON(skb->ip_summed != CHECKSUM_HW);
 32.1241 ++
 32.1242 ++	skb->mac.raw = skb->data;
 32.1243 ++	skb->mac_len = skb->nh.raw - skb->data;
 32.1244 ++	__skb_pull(skb, skb->mac_len);
 32.1245 ++
 32.1246 ++	rcu_read_lock();
 32.1247 ++	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
 32.1248 ++		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
 32.1249 ++			segs = ptype->gso_segment(skb, features);
 32.1250 ++			break;
 32.1251 ++		}
 32.1252 ++	}
 32.1253 ++	rcu_read_unlock();
 32.1254 ++
 32.1255 ++	__skb_push(skb, skb->data - skb->mac.raw);
 32.1256 ++
 32.1257 ++	return segs;
 32.1258 ++}
 32.1259 ++
 32.1260 ++EXPORT_SYMBOL(skb_gso_segment);
 32.1261 ++
 32.1262 + /* Take action when hardware reception checksum errors are detected. */
 32.1263 + #ifdef CONFIG_BUG
 32.1264 + void netdev_rx_csum_fault(struct net_device *dev)
 32.1265 +@@ -1142,75 +1182,108 @@ static inline int illegal_highdma(struct
 32.1266 + #define illegal_highdma(dev, skb)	(0)
 32.1267 + #endif
 32.1268 + 
 32.1269 +-/* Keep head the same: replace data */
 32.1270 +-int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
 32.1271 ++struct dev_gso_cb {
 32.1272 ++	void (*destructor)(struct sk_buff *skb);
 32.1273 ++};
 32.1274 ++
 32.1275 ++#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
 32.1276 ++
 32.1277 ++static void dev_gso_skb_destructor(struct sk_buff *skb)
 32.1278 ++{
 32.1279 ++	struct dev_gso_cb *cb;
 32.1280 ++
 32.1281 ++	do {
 32.1282 ++		struct sk_buff *nskb = skb->next;
 32.1283 ++
 32.1284 ++		skb->next = nskb->next;
 32.1285 ++		nskb->next = NULL;
 32.1286 ++		kfree_skb(nskb);
 32.1287 ++	} while (skb->next);
 32.1288 ++
 32.1289 ++	cb = DEV_GSO_CB(skb);
 32.1290 ++	if (cb->destructor)
 32.1291 ++		cb->destructor(skb);
 32.1292 ++}
 32.1293 ++
 32.1294 ++/**
 32.1295 ++ *	dev_gso_segment - Perform emulated hardware segmentation on skb.
 32.1296 ++ *	@skb: buffer to segment
 32.1297 ++ *
 32.1298 ++ *	This function segments the given skb and stores the list of segments
 32.1299 ++ *	in skb->next.
 32.1300 ++ */
 32.1301 ++static int dev_gso_segment(struct sk_buff *skb)
 32.1302 + {
 32.1303 +-	unsigned int size;
 32.1304 +-	u8 *data;
 32.1305 +-	long offset;
 32.1306 +-	struct skb_shared_info *ninfo;
 32.1307 +-	int headerlen = skb->data - skb->head;
 32.1308 +-	int expand = (skb->tail + skb->data_len) - skb->end;
 32.1309 +-
 32.1310 +-	if (skb_shared(skb))
 32.1311 +-		BUG();
 32.1312 +-
 32.1313 +-	if (expand <= 0)
 32.1314 +-		expand = 0;
 32.1315 +-
 32.1316 +-	size = skb->end - skb->head + expand;
 32.1317 +-	size = SKB_DATA_ALIGN(size);
 32.1318 +-	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
 32.1319 +-	if (!data)
 32.1320 +-		return -ENOMEM;
 32.1321 +-
 32.1322 +-	/* Copy entire thing */
 32.1323 +-	if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
 32.1324 +-		BUG();
 32.1325 +-
 32.1326 +-	/* Set up shinfo */
 32.1327 +-	ninfo = (struct skb_shared_info*)(data + size);
 32.1328 +-	atomic_set(&ninfo->dataref, 1);
 32.1329 +-	ninfo->tso_size = skb_shinfo(skb)->tso_size;
 32.1330 +-	ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
 32.1331 +-	ninfo->nr_frags = 0;
 32.1332 +-	ninfo->frag_list = NULL;
 32.1333 +-
 32.1334 +-	/* Offset between the two in bytes */
 32.1335 +-	offset = data - skb->head;
 32.1336 +-
 32.1337 +-	/* Free old data. */
 32.1338 +-	skb_release_data(skb);
 32.1339 +-
 32.1340 +-	skb->head = data;
 32.1341 +-	skb->end  = data + size;
 32.1342 +-
 32.1343 +-	/* Set up new pointers */
 32.1344 +-	skb->h.raw   += offset;
 32.1345 +-	skb->nh.raw  += offset;
 32.1346 +-	skb->mac.raw += offset;
 32.1347 +-	skb->tail    += offset;
 32.1348 +-	skb->data    += offset;
 32.1349 ++	struct net_device *dev = skb->dev;
 32.1350 ++	struct sk_buff *segs;
 32.1351 ++	int features = dev->features & ~(illegal_highdma(dev, skb) ?
 32.1352 ++					 NETIF_F_SG : 0);
 32.1353 ++
 32.1354 ++	segs = skb_gso_segment(skb, features);
 32.1355 ++
 32.1356 ++	/* Verifying header integrity only. */
 32.1357 ++	if (!segs)
 32.1358 ++		return 0;
 32.1359 + 
 32.1360 +-	/* We are no longer a clone, even if we were. */
 32.1361 +-	skb->cloned    = 0;
 32.1362 ++	if (unlikely(IS_ERR(segs)))
 32.1363 ++		return PTR_ERR(segs);
 32.1364 ++
 32.1365 ++	skb->next = segs;
 32.1366 ++	DEV_GSO_CB(skb)->destructor = skb->destructor;
 32.1367 ++	skb->destructor = dev_gso_skb_destructor;
 32.1368 + 
 32.1369 +-	skb->tail     += skb->data_len;
 32.1370 +-	skb->data_len  = 0;
 32.1371 ++	return 0;
 32.1372 ++}
 32.1373 ++
 32.1374 ++int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 32.1375 ++{
 32.1376 ++	if (likely(!skb->next)) {
 32.1377 ++		if (netdev_nit)
 32.1378 ++			dev_queue_xmit_nit(skb, dev);
 32.1379 ++
 32.1380 ++		if (netif_needs_gso(dev, skb)) {
 32.1381 ++			if (unlikely(dev_gso_segment(skb)))
 32.1382 ++				goto out_kfree_skb;
 32.1383 ++			if (skb->next)
 32.1384 ++				goto gso;
 32.1385 ++		}
 32.1386 ++
 32.1387 ++		return dev->hard_start_xmit(skb, dev);
 32.1388 ++	}
 32.1389 ++
 32.1390 ++gso:
 32.1391 ++	do {
 32.1392 ++		struct sk_buff *nskb = skb->next;
 32.1393 ++		int rc;
 32.1394 ++
 32.1395 ++		skb->next = nskb->next;
 32.1396 ++		nskb->next = NULL;
 32.1397 ++		rc = dev->hard_start_xmit(nskb, dev);
 32.1398 ++		if (unlikely(rc)) {
 32.1399 ++			nskb->next = skb->next;
 32.1400 ++			skb->next = nskb;
 32.1401 ++			return rc;
 32.1402 ++		}
 32.1403 ++		if (unlikely(netif_queue_stopped(dev) && skb->next))
 32.1404 ++			return NETDEV_TX_BUSY;
 32.1405 ++	} while (skb->next);
 32.1406 ++	
 32.1407 ++	skb->destructor = DEV_GSO_CB(skb)->destructor;
 32.1408 ++
 32.1409 ++out_kfree_skb:
 32.1410 ++	kfree_skb(skb);
 32.1411 + 	return 0;
 32.1412 + }
 32.1413 + 
 32.1414 + #define HARD_TX_LOCK(dev, cpu) {			\
 32.1415 + 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 32.1416 +-		spin_lock(&dev->xmit_lock);		\
 32.1417 +-		dev->xmit_lock_owner = cpu;		\
 32.1418 ++		netif_tx_lock(dev);			\
 32.1419 + 	}						\
 32.1420 + }
 32.1421 + 
 32.1422 + #define HARD_TX_UNLOCK(dev) {				\
 32.1423 + 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
 32.1424 +-		dev->xmit_lock_owner = -1;		\
 32.1425 +-		spin_unlock(&dev->xmit_lock);		\
 32.1426 ++		netif_tx_unlock(dev);			\
 32.1427 + 	}						\
 32.1428 + }
 32.1429 + 
 32.1430 +@@ -1246,9 +1319,13 @@ int dev_queue_xmit(struct sk_buff *skb)
 32.1431 + 	struct Qdisc *q;
 32.1432 + 	int rc = -ENOMEM;
 32.1433 + 
 32.1434 ++	/* GSO will handle the following emulations directly. */
 32.1435 ++	if (netif_needs_gso(dev, skb))
 32.1436 ++		goto gso;
 32.1437 ++
 32.1438 + 	if (skb_shinfo(skb)->frag_list &&
 32.1439 + 	    !(dev->features & NETIF_F_FRAGLIST) &&
 32.1440 +-	    __skb_linearize(skb, GFP_ATOMIC))
 32.1441 ++	    __skb_linearize(skb))
 32.1442 + 		goto out_kfree_skb;
 32.1443 + 
 32.1444 + 	/* Fragmented skb is linearized if device does not support SG,
 32.1445 +@@ -1257,25 +1334,26 @@ int dev_queue_xmit(struct sk_buff *skb)
 32.1446 + 	 */
 32.1447 + 	if (skb_shinfo(skb)->nr_frags &&
 32.1448 + 	    (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
 32.1449 +-	    __skb_linearize(skb, GFP_ATOMIC))
 32.1450 ++	    __skb_linearize(skb))
 32.1451 + 		goto out_kfree_skb;
 32.1452 + 
 32.1453 + 	/* If packet is not checksummed and device does not support
 32.1454 + 	 * checksumming for this protocol, complete checksumming here.
 32.1455 + 	 */
 32.1456 + 	if (skb->ip_summed == CHECKSUM_HW &&
 32.1457 +-	    (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
 32.1458 ++	    (!(dev->features & NETIF_F_GEN_CSUM) &&
 32.1459 + 	     (!(dev->features & NETIF_F_IP_CSUM) ||
 32.1460 + 	      skb->protocol != htons(ETH_P_IP))))
 32.1461 + 	      	if (skb_checksum_help(skb, 0))
 32.1462 + 	      		goto out_kfree_skb;
 32.1463 + 
 32.1464 ++gso:
 32.1465 + 	spin_lock_prefetch(&dev->queue_lock);
 32.1466 + 
 32.1467 + 	/* Disable soft irqs for various locks below. Also 
 32.1468 + 	 * stops preemption for RCU. 
 32.1469 + 	 */
 32.1470 +-	local_bh_disable(); 
 32.1471 ++	rcu_read_lock_bh(); 
 32.1472 + 
 32.1473 + 	/* Updates of qdisc are serialized by queue_lock. 
 32.1474 + 	 * The struct Qdisc which is pointed to by qdisc is now a 
 32.1475 +@@ -1309,8 +1387,8 @@ int dev_queue_xmit(struct sk_buff *skb)
 32.1476 + 	/* The device has no queue. Common case for software devices:
 32.1477 + 	   loopback, all the sorts of tunnels...
 32.1478 + 
 32.1479 +-	   Really, it is unlikely that xmit_lock protection is necessary here.
 32.1480 +-	   (f.e. loopback and IP tunnels are clean ignoring statistics
 32.1481 ++	   Really, it is unlikely that netif_tx_lock protection is necessary
 32.1482 ++	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
 32.1483 + 	   counters.)
 32.1484 + 	   However, it is possible, that they rely on protection
 32.1485 + 	   made by us here.
 32.1486 +@@ -1326,11 +1404,8 @@ int dev_queue_xmit(struct sk_buff *skb)
 32.1487 + 			HARD_TX_LOCK(dev, cpu);
 32.1488 + 
 32.1489 + 			if (!netif_queue_stopped(dev)) {
 32.1490 +-				if (netdev_nit)
 32.1491 +-					dev_queue_xmit_nit(skb, dev);
 32.1492 +-
 32.1493 + 				rc = 0;
 32.1494 +-				if (!dev->hard_start_xmit(skb, dev)) {
 32.1495 ++				if (!dev_hard_start_xmit(skb, dev)) {
 32.1496 + 					HARD_TX_UNLOCK(dev);
 32.1497 + 					goto out;
 32.1498 + 				}
 32.1499 +@@ -1349,13 +1424,13 @@ int dev_queue_xmit(struct sk_buff *skb)
 32.1500 + 	}
 32.1501 + 
 32.1502 + 	rc = -ENETDOWN;
 32.1503 +-	local_bh_enable();
 32.1504 ++	rcu_read_unlock_bh();
 32.1505 + 
 32.1506 + out_kfree_skb:
 32.1507 + 	kfree_skb(skb);
 32.1508 + 	return rc;
 32.1509 + out:
 32.1510 +-	local_bh_enable();
 32.1511 ++	rcu_read_unlock_bh();
 32.1512 + 	return rc;
 32.1513 + }
 32.1514 + 
 32.1515 +@@ -2670,7 +2745,7 @@ int register_netdevice(struct net_device
 32.1516 + 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
 32.1517 + 
 32.1518 + 	spin_lock_init(&dev->queue_lock);
 32.1519 +-	spin_lock_init(&dev->xmit_lock);
 32.1520 ++	spin_lock_init(&dev->_xmit_lock);
 32.1521 + 	dev->xmit_lock_owner = -1;
 32.1522 + #ifdef CONFIG_NET_CLS_ACT
 32.1523 + 	spin_lock_init(&dev->ingress_lock);
 32.1524 +@@ -2714,9 +2789,7 @@ int register_netdevice(struct net_device
 32.1525 + 
 32.1526 + 	/* Fix illegal SG+CSUM combinations. */
 32.1527 + 	if ((dev->features & NETIF_F_SG) &&
 32.1528 +-	    !(dev->features & (NETIF_F_IP_CSUM |
 32.1529 +-			       NETIF_F_NO_CSUM |
 32.1530 +-			       NETIF_F_HW_CSUM))) {
 32.1531 ++	    !(dev->features & NETIF_F_ALL_CSUM)) {
 32.1532 + 		printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
 32.1533 + 		       dev->name);
 32.1534 + 		dev->features &= ~NETIF_F_SG;
 32.1535 +@@ -3268,7 +3341,6 @@ subsys_initcall(net_dev_init);
 32.1536 + EXPORT_SYMBOL(__dev_get_by_index);
 32.1537 + EXPORT_SYMBOL(__dev_get_by_name);
 32.1538 + EXPORT_SYMBOL(__dev_remove_pack);
 32.1539 +-EXPORT_SYMBOL(__skb_linearize);
 32.1540 + EXPORT_SYMBOL(dev_valid_name);
 32.1541 + EXPORT_SYMBOL(dev_add_pack);
 32.1542 + EXPORT_SYMBOL(dev_alloc_name);
 32.1543 +diff -pruN ../orig-linux-2.6.16.29/net/core/dev_mcast.c ./net/core/dev_mcast.c
 32.1544 +--- ../orig-linux-2.6.16.29/net/core/dev_mcast.c	2006-09-12 19:02:10.000000000 +0100
 32.1545 ++++ ./net/core/dev_mcast.c	2006-09-19 13:59:20.000000000 +0100
 32.1546 +@@ -62,7 +62,7 @@
 32.1547 +  *	Device mc lists are changed by bh at least if IPv6 is enabled,
 32.1548 +  *	so that it must be bh protected.
 32.1549 +  *
 32.1550 +- *	We block accesses to device mc filters with dev->xmit_lock.
 32.1551 ++ *	We block accesses to device mc filters with netif_tx_lock.
 32.1552 +  */
 32.1553 + 
 32.1554 + /*
 32.1555 +@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_d
 32.1556 + 
 32.1557 + void dev_mc_upload(struct net_device *dev)
 32.1558 + {
 32.1559 +-	spin_lock_bh(&dev->xmit_lock);
 32.1560 ++	netif_tx_lock_bh(dev);
 32.1561 + 	__dev_mc_upload(dev);
 32.1562 +-	spin_unlock_bh(&dev->xmit_lock);
 32.1563 ++	netif_tx_unlock_bh(dev);
 32.1564 + }
 32.1565 + 
 32.1566 + /*
 32.1567 +@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev
 32.1568 + 	int err = 0;
 32.1569 + 	struct dev_mc_list *dmi, **dmip;
 32.1570 + 
 32.1571 +-	spin_lock_bh(&dev->xmit_lock);
 32.1572 ++	netif_tx_lock_bh(dev);
 32.1573 + 
 32.1574 + 	for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
 32.1575 + 		/*
 32.1576 +@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev
 32.1577 + 			 */
 32.1578 + 			__dev_mc_upload(dev);
 32.1579 + 			
 32.1580 +-			spin_unlock_bh(&dev->xmit_lock);
 32.1581 ++			netif_tx_unlock_bh(dev);
 32.1582 + 			return 0;
 32.1583 + 		}
 32.1584 + 	}
 32.1585 + 	err = -ENOENT;
 32.1586 + done:
 32.1587 +-	spin_unlock_bh(&dev->xmit_lock);
 32.1588 ++	netif_tx_unlock_bh(dev);
 32.1589 + 	return err;
 32.1590 + }
 32.1591 + 
 32.1592 +@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, v
 32.1593 + 
 32.1594 + 	dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
 32.1595 + 
 32.1596 +-	spin_lock_bh(&dev->xmit_lock);
 32.1597 ++	netif_tx_lock_bh(dev);
 32.1598 + 	for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
 32.1599 + 		if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
 32.1600 + 		    dmi->dmi_addrlen == alen) {
 32.1601 +@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, v
 32.1602 + 	}
 32.1603 + 
 32.1604 + 	if ((dmi = dmi1) == NULL) {
 32.1605 +-		spin_unlock_bh(&dev->xmit_lock);
 32.1606 ++		netif_tx_unlock_bh(dev);
 32.1607 + 		return -ENOMEM;
 32.1608 + 	}
 32.1609 + 	memcpy(dmi->dmi_addr, addr, alen);
 32.1610 +@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, v
 32.1611 + 
 32.1612 + 	__dev_mc_upload(dev);
 32.1613 + 	
 32.1614 +-	spin_unlock_bh(&dev->xmit_lock);
 32.1615 ++	netif_tx_unlock_bh(dev);
 32.1616 + 	return 0;
 32.1617 + 
 32.1618 + done:
 32.1619 +-	spin_unlock_bh(&dev->xmit_lock);
 32.1620 ++	netif_tx_unlock_bh(dev);
 32.1621 + 	kfree(dmi1);
 32.1622 + 	return err;
 32.1623 + }
 32.1624 +@@ -204,7 +204,7 @@ done:
 32.1625 + 
 32.1626 + void dev_mc_discard(struct net_device *dev)
 32.1627 + {
 32.1628 +-	spin_lock_bh(&dev->xmit_lock);
 32.1629 ++	netif_tx_lock_bh(dev);
 32.1630 + 	
 32.1631 + 	while (dev->mc_list != NULL) {
 32.1632 + 		struct dev_mc_list *tmp = dev->mc_list;
 32.1633 +@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *d
 32.1634 + 	}
 32.1635 + 	dev->mc_count = 0;
 32.1636 + 
 32.1637 +-	spin_unlock_bh(&dev->xmit_lock);
 32.1638 ++	netif_tx_unlock_bh(dev);
 32.1639 + }
 32.1640 + 
 32.1641 + #ifdef CONFIG_PROC_FS
 32.1642 +@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_fi
 32.1643 + 	struct dev_mc_list *m;
 32.1644 + 	struct net_device *dev = v;
 32.1645 + 
 32.1646 +-	spin_lock_bh(&dev->xmit_lock);
 32.1647 ++	netif_tx_lock_bh(dev);
 32.1648 + 	for (m = dev->mc_list; m; m = m->next) {
 32.1649 + 		int i;
 32.1650 + 
 32.1651 +@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_fi
 32.1652 + 
 32.1653 + 		seq_putc(seq, '\n');
 32.1654 + 	}
 32.1655 +-	spin_unlock_bh(&dev->xmit_lock);
 32.1656 ++	netif_tx_unlock_bh(dev);
 32.1657 + 	return 0;
 32.1658 + }
 32.1659 + 
 32.1660 +diff -pruN ../orig-linux-2.6.16.29/net/core/ethtool.c ./net/core/ethtool.c
 32.1661 +--- ../orig-linux-2.6.16.29/net/core/ethtool.c	2006-09-12 19:02:10.000000000 +0100
 32.1662 ++++ ./net/core/ethtool.c	2006-09-19 13:59:20.000000000 +0100
 32.1663 +@@ -30,7 +30,7 @@ u32 ethtool_op_get_link(struct net_devic
 32.1664 + 
 32.1665 + u32 ethtool_op_get_tx_csum(struct net_device *dev)
 32.1666 + {
 32.1667 +-	return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0;
 32.1668 ++	return (dev->features & NETIF_F_ALL_CSUM) != 0;
 32.1669 + }
 32.1670 + 
 32.1671 + int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
 32.1672