ia64/xen-unstable

changeset 12590:a5aadc4c6bb6

[LINUX] Update to 2.6.16.30.

No changes required to sparse tree or patches directory.

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Ian Campbell <ian.campbell@xensource.com>
date Mon Nov 27 13:50:01 2006 +0000 (2006-11-27)
parents 277bdeee88f8
children 447ac06f74d3
files buildconfigs/mk.linux-2.6-xen patches/linux-2.6.16.29/blktap-aio-16_03_06.patch patches/linux-2.6.16.29/device_bind.patch patches/linux-2.6.16.29/fix-hz-suspend.patch patches/linux-2.6.16.29/fix-ide-cd-pio-mode.patch patches/linux-2.6.16.29/i386-mach-io-check-nmi.patch patches/linux-2.6.16.29/ipv6-no-autoconf.patch patches/linux-2.6.16.29/net-csum.patch patches/linux-2.6.16.29/net-gso-0-base.patch patches/linux-2.6.16.29/net-gso-1-check-dodgy.patch patches/linux-2.6.16.29/net-gso-2-checksum-fix.patch patches/linux-2.6.16.29/net-gso-3-fix-errorcheck.patch patches/linux-2.6.16.29/net-gso-4-kill-warnon.patch patches/linux-2.6.16.29/net-gso-5-rcv-mss.patch patches/linux-2.6.16.29/pci-mmconfig-fix-from-2.6.17.patch patches/linux-2.6.16.29/pmd-shared.patch patches/linux-2.6.16.29/rcu_needs_cpu.patch patches/linux-2.6.16.29/rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch patches/linux-2.6.16.29/series patches/linux-2.6.16.29/smp-alts.patch patches/linux-2.6.16.29/tpm_plugin_2.6.17.patch patches/linux-2.6.16.29/x86-elfnote-as-preprocessor-macro.patch patches/linux-2.6.16.29/x86-increase-interrupt-vector-range.patch patches/linux-2.6.16.29/x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.29/x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.29/xen-hotplug.patch patches/linux-2.6.16.29/xenoprof-generic.patch patches/linux-2.6.16.30/blktap-aio-16_03_06.patch patches/linux-2.6.16.30/device_bind.patch patches/linux-2.6.16.30/fix-hz-suspend.patch patches/linux-2.6.16.30/fix-ide-cd-pio-mode.patch patches/linux-2.6.16.30/i386-mach-io-check-nmi.patch patches/linux-2.6.16.30/ipv6-no-autoconf.patch patches/linux-2.6.16.30/net-csum.patch patches/linux-2.6.16.30/net-gso-0-base.patch patches/linux-2.6.16.30/net-gso-1-check-dodgy.patch patches/linux-2.6.16.30/net-gso-2-checksum-fix.patch patches/linux-2.6.16.30/net-gso-3-fix-errorcheck.patch patches/linux-2.6.16.30/net-gso-4-kill-warnon.patch patches/linux-2.6.16.30/net-gso-5-rcv-mss.patch patches/linux-2.6.16.30/pci-mmconfig-fix-from-2.6.17.patch patches/linux-2.6.16.30/pmd-shared.patch patches/linux-2.6.16.30/rcu_needs_cpu.patch patches/linux-2.6.16.30/rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch patches/linux-2.6.16.30/series patches/linux-2.6.16.30/smp-alts.patch patches/linux-2.6.16.30/tpm_plugin_2.6.17.patch patches/linux-2.6.16.30/x86-elfnote-as-preprocessor-macro.patch patches/linux-2.6.16.30/x86-increase-interrupt-vector-range.patch patches/linux-2.6.16.30/x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.30/x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch patches/linux-2.6.16.30/xen-hotplug.patch patches/linux-2.6.16.30/xenoprof-generic.patch
line diff
     1.1 --- a/buildconfigs/mk.linux-2.6-xen	Mon Nov 27 13:22:21 2006 +0000
     1.2 +++ b/buildconfigs/mk.linux-2.6-xen	Mon Nov 27 13:50:01 2006 +0000
     1.3 @@ -1,5 +1,5 @@
     1.4  LINUX_SERIES = 2.6
     1.5 -LINUX_VER    = 2.6.16.29
     1.6 +LINUX_VER    = 2.6.16.30
     1.7  
     1.8  EXTRAVERSION ?= xen
     1.9  
     2.1 --- a/patches/linux-2.6.16.29/blktap-aio-16_03_06.patch	Mon Nov 27 13:22:21 2006 +0000
     2.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.3 @@ -1,294 +0,0 @@
     2.4 -diff -pruN ../orig-linux-2.6.16.29/fs/aio.c ./fs/aio.c
     2.5 ---- ../orig-linux-2.6.16.29/fs/aio.c	2006-09-12 19:02:10.000000000 +0100
     2.6 -+++ ./fs/aio.c	2006-09-19 13:58:49.000000000 +0100
     2.7 -@@ -34,6 +34,11 @@
     2.8 - #include <asm/uaccess.h>
     2.9 - #include <asm/mmu_context.h>
    2.10 - 
    2.11 -+#ifdef CONFIG_EPOLL
    2.12 -+#include <linux/poll.h>
    2.13 -+#include <linux/eventpoll.h>
    2.14 -+#endif
    2.15 -+
    2.16 - #if DEBUG > 1
    2.17 - #define dprintk		printk
    2.18 - #else
    2.19 -@@ -1016,6 +1021,10 @@ put_rq:
    2.20 - 	if (waitqueue_active(&ctx->wait))
    2.21 - 		wake_up(&ctx->wait);
    2.22 - 
    2.23 -+#ifdef CONFIG_EPOLL
    2.24 -+	if (ctx->file && waitqueue_active(&ctx->poll_wait))
    2.25 -+		wake_up(&ctx->poll_wait);
    2.26 -+#endif
    2.27 - 	if (ret)
    2.28 - 		put_ioctx(ctx);
    2.29 - 
    2.30 -@@ -1025,6 +1034,8 @@ put_rq:
    2.31 - /* aio_read_evt
    2.32 -  *	Pull an event off of the ioctx's event ring.  Returns the number of 
    2.33 -  *	events fetched (0 or 1 ;-)
    2.34 -+ *	If ent parameter is 0, just returns the number of events that would
    2.35 -+ *	be fetched.
    2.36 -  *	FIXME: make this use cmpxchg.
    2.37 -  *	TODO: make the ringbuffer user mmap()able (requires FIXME).
    2.38 -  */
    2.39 -@@ -1047,13 +1058,18 @@ static int aio_read_evt(struct kioctx *i
    2.40 - 
    2.41 - 	head = ring->head % info->nr;
    2.42 - 	if (head != ring->tail) {
    2.43 --		struct io_event *evp = aio_ring_event(info, head, KM_USER1);
    2.44 --		*ent = *evp;
    2.45 --		head = (head + 1) % info->nr;
    2.46 --		smp_mb(); /* finish reading the event before updatng the head */
    2.47 --		ring->head = head;
    2.48 --		ret = 1;
    2.49 --		put_aio_ring_event(evp, KM_USER1);
    2.50 -+		if (ent) { /* event requested */
    2.51 -+			struct io_event *evp =
    2.52 -+				aio_ring_event(info, head, KM_USER1);
    2.53 -+			*ent = *evp;
    2.54 -+			head = (head + 1) % info->nr;
    2.55 -+			/* finish reading the event before updatng the head */
    2.56 -+			smp_mb();
    2.57 -+			ring->head = head;
    2.58 -+			ret = 1;
    2.59 -+			put_aio_ring_event(evp, KM_USER1);
    2.60 -+		} else /* only need to know availability */
    2.61 -+			ret = 1;
    2.62 - 	}
    2.63 - 	spin_unlock(&info->ring_lock);
    2.64 - 
    2.65 -@@ -1236,9 +1252,78 @@ static void io_destroy(struct kioctx *io
    2.66 - 
    2.67 - 	aio_cancel_all(ioctx);
    2.68 - 	wait_for_all_aios(ioctx);
    2.69 -+#ifdef CONFIG_EPOLL
    2.70 -+	/* forget the poll file, but it's up to the user to close it */
    2.71 -+	if (ioctx->file) {
    2.72 -+		ioctx->file->private_data = 0;
    2.73 -+		ioctx->file = 0;
    2.74 -+	}
    2.75 -+#endif
    2.76 - 	put_ioctx(ioctx);	/* once for the lookup */
    2.77 - }
    2.78 - 
    2.79 -+#ifdef CONFIG_EPOLL
    2.80 -+
    2.81 -+static int aio_queue_fd_close(struct inode *inode, struct file *file)
    2.82 -+{
    2.83 -+	struct kioctx *ioctx = file->private_data;
    2.84 -+	if (ioctx) {
    2.85 -+		file->private_data = 0;
    2.86 -+		spin_lock_irq(&ioctx->ctx_lock);
    2.87 -+		ioctx->file = 0;
    2.88 -+		spin_unlock_irq(&ioctx->ctx_lock);
    2.89 -+	}
    2.90 -+	return 0;
    2.91 -+}
    2.92 -+
    2.93 -+static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
    2.94 -+{	unsigned int pollflags = 0;
    2.95 -+	struct kioctx *ioctx = file->private_data;
    2.96 -+
    2.97 -+	if (ioctx) {
    2.98 -+
    2.99 -+		spin_lock_irq(&ioctx->ctx_lock);
   2.100 -+		/* Insert inside our poll wait queue */
   2.101 -+		poll_wait(file, &ioctx->poll_wait, wait);
   2.102 -+
   2.103 -+		/* Check our condition */
   2.104 -+		if (aio_read_evt(ioctx, 0))
   2.105 -+			pollflags = POLLIN | POLLRDNORM;
   2.106 -+		spin_unlock_irq(&ioctx->ctx_lock);
   2.107 -+	}
   2.108 -+
   2.109 -+	return pollflags;
   2.110 -+}
   2.111 -+
   2.112 -+static struct file_operations aioq_fops = {
   2.113 -+	.release	= aio_queue_fd_close,
   2.114 -+	.poll		= aio_queue_fd_poll
   2.115 -+};
   2.116 -+
   2.117 -+/* make_aio_fd:
   2.118 -+ *  Create a file descriptor that can be used to poll the event queue.
   2.119 -+ *  Based and piggybacked on the excellent epoll code.
   2.120 -+ */
   2.121 -+
   2.122 -+static int make_aio_fd(struct kioctx *ioctx)
   2.123 -+{
   2.124 -+	int error, fd;
   2.125 -+	struct inode *inode;
   2.126 -+	struct file *file;
   2.127 -+
   2.128 -+	error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
   2.129 -+	if (error)
   2.130 -+		return error;
   2.131 -+
   2.132 -+	/* associate the file with the IO context */
   2.133 -+	file->private_data = ioctx;
   2.134 -+	ioctx->file = file;
   2.135 -+	init_waitqueue_head(&ioctx->poll_wait);
   2.136 -+	return fd;
   2.137 -+}
   2.138 -+#endif
   2.139 -+
   2.140 -+
   2.141 - /* sys_io_setup:
   2.142 -  *	Create an aio_context capable of receiving at least nr_events.
   2.143 -  *	ctxp must not point to an aio_context that already exists, and
   2.144 -@@ -1251,18 +1336,30 @@ static void io_destroy(struct kioctx *io
   2.145 -  *	resources are available.  May fail with -EFAULT if an invalid
   2.146 -  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
   2.147 -  *	implemented.
   2.148 -+ *
   2.149 -+ *	To request a selectable fd, the user context has to be initialized
   2.150 -+ *	to 1, instead of 0, and the return value is the fd.
   2.151 -+ *	This keeps the system call compatible, since a non-zero value
   2.152 -+ *	was not allowed so far.
   2.153 -  */
   2.154 - asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
   2.155 - {
   2.156 - 	struct kioctx *ioctx = NULL;
   2.157 - 	unsigned long ctx;
   2.158 - 	long ret;
   2.159 -+	int make_fd = 0;
   2.160 - 
   2.161 - 	ret = get_user(ctx, ctxp);
   2.162 - 	if (unlikely(ret))
   2.163 - 		goto out;
   2.164 - 
   2.165 - 	ret = -EINVAL;
   2.166 -+#ifdef CONFIG_EPOLL
   2.167 -+	if (ctx == 1) {
   2.168 -+		make_fd = 1;
   2.169 -+		ctx = 0;
   2.170 -+	}
   2.171 -+#endif
   2.172 - 	if (unlikely(ctx || nr_events == 0)) {
   2.173 - 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
   2.174 - 		         ctx, nr_events);
   2.175 -@@ -1273,8 +1370,12 @@ asmlinkage long sys_io_setup(unsigned nr
   2.176 - 	ret = PTR_ERR(ioctx);
   2.177 - 	if (!IS_ERR(ioctx)) {
   2.178 - 		ret = put_user(ioctx->user_id, ctxp);
   2.179 --		if (!ret)
   2.180 --			return 0;
   2.181 -+#ifdef CONFIG_EPOLL
   2.182 -+		if (make_fd && ret >= 0)
   2.183 -+			ret = make_aio_fd(ioctx);
   2.184 -+#endif
   2.185 -+		if (ret >= 0)
   2.186 -+			return ret;
   2.187 - 
   2.188 - 		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
   2.189 - 		io_destroy(ioctx);
   2.190 -diff -pruN ../orig-linux-2.6.16.29/fs/eventpoll.c ./fs/eventpoll.c
   2.191 ---- ../orig-linux-2.6.16.29/fs/eventpoll.c	2006-09-12 19:02:10.000000000 +0100
   2.192 -+++ ./fs/eventpoll.c	2006-09-19 13:58:49.000000000 +0100
   2.193 -@@ -235,8 +235,6 @@ struct ep_pqueue {
   2.194 - 
   2.195 - static void ep_poll_safewake_init(struct poll_safewake *psw);
   2.196 - static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
   2.197 --static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   2.198 --		    struct eventpoll *ep);
   2.199 - static int ep_alloc(struct eventpoll **pep);
   2.200 - static void ep_free(struct eventpoll *ep);
   2.201 - static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
   2.202 -@@ -266,7 +264,7 @@ static int ep_events_transfer(struct eve
   2.203 - static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
   2.204 - 		   int maxevents, long timeout);
   2.205 - static int eventpollfs_delete_dentry(struct dentry *dentry);
   2.206 --static struct inode *ep_eventpoll_inode(void);
   2.207 -+static struct inode *ep_eventpoll_inode(struct file_operations *fops);
   2.208 - static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
   2.209 - 					      int flags, const char *dev_name,
   2.210 - 					      void *data);
   2.211 -@@ -525,7 +523,7 @@ asmlinkage long sys_epoll_create(int siz
   2.212 - 	 * Creates all the items needed to setup an eventpoll file. That is,
   2.213 - 	 * a file structure, and inode and a free file descriptor.
   2.214 - 	 */
   2.215 --	error = ep_getfd(&fd, &inode, &file, ep);
   2.216 -+	error = ep_getfd(&fd, &inode, &file, ep, &eventpoll_fops);
   2.217 - 	if (error)
   2.218 - 		goto eexit_2;
   2.219 - 
   2.220 -@@ -710,8 +708,8 @@ eexit_1:
   2.221 - /*
   2.222 -  * Creates the file descriptor to be used by the epoll interface.
   2.223 -  */
   2.224 --static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   2.225 --		    struct eventpoll *ep)
   2.226 -+int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   2.227 -+		    struct eventpoll *ep, struct file_operations *fops)
   2.228 - {
   2.229 - 	struct qstr this;
   2.230 - 	char name[32];
   2.231 -@@ -727,7 +725,7 @@ static int ep_getfd(int *efd, struct ino
   2.232 - 		goto eexit_1;
   2.233 - 
   2.234 - 	/* Allocates an inode from the eventpoll file system */
   2.235 --	inode = ep_eventpoll_inode();
   2.236 -+	inode = ep_eventpoll_inode(fops);
   2.237 - 	error = PTR_ERR(inode);
   2.238 - 	if (IS_ERR(inode))
   2.239 - 		goto eexit_2;
   2.240 -@@ -758,7 +756,7 @@ static int ep_getfd(int *efd, struct ino
   2.241 - 
   2.242 - 	file->f_pos = 0;
   2.243 - 	file->f_flags = O_RDONLY;
   2.244 --	file->f_op = &eventpoll_fops;
   2.245 -+	file->f_op = fops;
   2.246 - 	file->f_mode = FMODE_READ;
   2.247 - 	file->f_version = 0;
   2.248 - 	file->private_data = ep;
   2.249 -@@ -1574,7 +1572,7 @@ static int eventpollfs_delete_dentry(str
   2.250 - }
   2.251 - 
   2.252 - 
   2.253 --static struct inode *ep_eventpoll_inode(void)
   2.254 -+static struct inode *ep_eventpoll_inode(struct file_operations *fops)
   2.255 - {
   2.256 - 	int error = -ENOMEM;
   2.257 - 	struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
   2.258 -@@ -1582,7 +1580,7 @@ static struct inode *ep_eventpoll_inode(
   2.259 - 	if (!inode)
   2.260 - 		goto eexit_1;
   2.261 - 
   2.262 --	inode->i_fop = &eventpoll_fops;
   2.263 -+	inode->i_fop = fops;
   2.264 - 
   2.265 - 	/*
   2.266 - 	 * Mark the inode dirty from the very beginning,
   2.267 -diff -pruN ../orig-linux-2.6.16.29/include/linux/aio.h ./include/linux/aio.h
   2.268 ---- ../orig-linux-2.6.16.29/include/linux/aio.h	2006-09-12 19:02:10.000000000 +0100
   2.269 -+++ ./include/linux/aio.h	2006-09-19 13:58:49.000000000 +0100
   2.270 -@@ -191,6 +191,11 @@ struct kioctx {
   2.271 - 	struct aio_ring_info	ring_info;
   2.272 - 
   2.273 - 	struct work_struct	wq;
   2.274 -+#ifdef CONFIG_EPOLL
   2.275 -+	// poll integration
   2.276 -+	wait_queue_head_t       poll_wait;
   2.277 -+	struct file		*file;
   2.278 -+#endif
   2.279 - };
   2.280 - 
   2.281 - /* prototypes */
   2.282 -diff -pruN ../orig-linux-2.6.16.29/include/linux/eventpoll.h ./include/linux/eventpoll.h
   2.283 ---- ../orig-linux-2.6.16.29/include/linux/eventpoll.h	2006-09-12 19:02:10.000000000 +0100
   2.284 -+++ ./include/linux/eventpoll.h	2006-09-19 13:58:49.000000000 +0100
   2.285 -@@ -86,6 +86,12 @@ static inline void eventpoll_release(str
   2.286 - }
   2.287 - 
   2.288 - 
   2.289 -+/*
   2.290 -+ * called by aio code to create fd that can poll the  aio event queueQ
   2.291 -+ */
   2.292 -+struct eventpoll;
   2.293 -+int ep_getfd(int *efd, struct inode **einode, struct file **efile,
   2.294 -+             struct eventpoll *ep, struct file_operations *fops);
   2.295 - #else
   2.296 - 
   2.297 - static inline void eventpoll_init_file(struct file *file) {}
     3.1 --- a/patches/linux-2.6.16.29/device_bind.patch	Mon Nov 27 13:22:21 2006 +0000
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,15 +0,0 @@
     3.4 -diff -pruN ../orig-linux-2.6.16.29/drivers/base/bus.c ./drivers/base/bus.c
     3.5 ---- ../orig-linux-2.6.16.29/drivers/base/bus.c	2006-09-12 19:02:10.000000000 +0100
     3.6 -+++ ./drivers/base/bus.c	2006-09-19 13:58:54.000000000 +0100
     3.7 -@@ -188,6 +188,11 @@ static ssize_t driver_bind(struct device
     3.8 - 		up(&dev->sem);
     3.9 - 		if (dev->parent)
    3.10 - 			up(&dev->parent->sem);
    3.11 -+
    3.12 -+		if (err > 0) 		/* success */
    3.13 -+			err = count;
    3.14 -+		else if (err == 0)	/* driver didn't accept device */
    3.15 -+			err = -ENODEV;
    3.16 - 	}
    3.17 - 	put_device(dev);
    3.18 - 	put_bus(bus);
     4.1 --- a/patches/linux-2.6.16.29/fix-hz-suspend.patch	Mon Nov 27 13:22:21 2006 +0000
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,26 +0,0 @@
     4.4 -diff -pruN ../orig-linux-2.6.16.29/kernel/timer.c ./kernel/timer.c
     4.5 ---- ../orig-linux-2.6.16.29/kernel/timer.c	2006-09-12 19:02:10.000000000 +0100
     4.6 -+++ ./kernel/timer.c	2006-09-19 13:58:58.000000000 +0100
     4.7 -@@ -555,6 +555,22 @@ found:
     4.8 - 	}
     4.9 - 	spin_unlock(&base->t_base.lock);
    4.10 - 
    4.11 -+	/*
    4.12 -+	 * It can happen that other CPUs service timer IRQs and increment
    4.13 -+	 * jiffies, but we have not yet got a local timer tick to process
    4.14 -+	 * the timer wheels.  In that case, the expiry time can be before
    4.15 -+	 * jiffies, but since the high-resolution timer here is relative to
    4.16 -+	 * jiffies, the default expression when high-resolution timers are
    4.17 -+	 * not active,
    4.18 -+	 *
    4.19 -+	 *   time_before(MAX_JIFFY_OFFSET + jiffies, expires)
    4.20 -+	 *
    4.21 -+	 * would falsely evaluate to true.  If that is the case, just
    4.22 -+	 * return jiffies so that we can immediately fire the local timer
    4.23 -+	 */
    4.24 -+	if (time_before(expires, jiffies))
    4.25 -+		return jiffies;
    4.26 -+
    4.27 - 	if (time_before(hr_expires, expires))
    4.28 - 		return hr_expires;
    4.29 - 
     5.1 --- a/patches/linux-2.6.16.29/fix-ide-cd-pio-mode.patch	Mon Nov 27 13:22:21 2006 +0000
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,18 +0,0 @@
     5.4 -diff -pruN ../orig-linux-2.6.16.29/drivers/ide/ide-lib.c ./drivers/ide/ide-lib.c
     5.5 ---- ../orig-linux-2.6.16.29/drivers/ide/ide-lib.c	2006-09-12 19:02:10.000000000 +0100
     5.6 -+++ ./drivers/ide/ide-lib.c	2006-09-19 13:59:03.000000000 +0100
     5.7 -@@ -410,10 +410,10 @@ void ide_toggle_bounce(ide_drive_t *driv
     5.8 - {
     5.9 - 	u64 addr = BLK_BOUNCE_HIGH;	/* dma64_addr_t */
    5.10 - 
    5.11 --	if (!PCI_DMA_BUS_IS_PHYS) {
    5.12 --		addr = BLK_BOUNCE_ANY;
    5.13 --	} else if (on && drive->media == ide_disk) {
    5.14 --		if (HWIF(drive)->pci_dev)
    5.15 -+	if (on && drive->media == ide_disk) {
    5.16 -+		if (!PCI_DMA_BUS_IS_PHYS)
    5.17 -+			addr = BLK_BOUNCE_ANY;
    5.18 -+		else if (HWIF(drive)->pci_dev)
    5.19 - 			addr = HWIF(drive)->pci_dev->dma_mask;
    5.20 - 	}
    5.21 - 
     6.1 --- a/patches/linux-2.6.16.29/i386-mach-io-check-nmi.patch	Mon Nov 27 13:22:21 2006 +0000
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,45 +0,0 @@
     6.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/traps.c ./arch/i386/kernel/traps.c
     6.5 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/traps.c	2006-09-12 19:02:10.000000000 +0100
     6.6 -+++ ./arch/i386/kernel/traps.c	2006-09-19 13:59:06.000000000 +0100
     6.7 -@@ -567,18 +567,11 @@ static void mem_parity_error(unsigned ch
     6.8 - 
     6.9 - static void io_check_error(unsigned char reason, struct pt_regs * regs)
    6.10 - {
    6.11 --	unsigned long i;
    6.12 --
    6.13 - 	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
    6.14 - 	show_registers(regs);
    6.15 - 
    6.16 - 	/* Re-enable the IOCK line, wait for a few seconds */
    6.17 --	reason = (reason & 0xf) | 8;
    6.18 --	outb(reason, 0x61);
    6.19 --	i = 2000;
    6.20 --	while (--i) udelay(1000);
    6.21 --	reason &= ~8;
    6.22 --	outb(reason, 0x61);
    6.23 -+	clear_io_check_error(reason);
    6.24 - }
    6.25 - 
    6.26 - static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
    6.27 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/mach-default/mach_traps.h ./include/asm-i386/mach-default/mach_traps.h
    6.28 ---- ../orig-linux-2.6.16.29/include/asm-i386/mach-default/mach_traps.h	2006-09-12 19:02:10.000000000 +0100
    6.29 -+++ ./include/asm-i386/mach-default/mach_traps.h	2006-09-19 13:59:06.000000000 +0100
    6.30 -@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
    6.31 - 	outb(reason, 0x61);
    6.32 - }
    6.33 - 
    6.34 -+static inline void clear_io_check_error(unsigned char reason)
    6.35 -+{
    6.36 -+	unsigned long i;
    6.37 -+
    6.38 -+	reason = (reason & 0xf) | 8;
    6.39 -+	outb(reason, 0x61);
    6.40 -+	i = 2000;
    6.41 -+	while (--i) udelay(1000);
    6.42 -+	reason &= ~8;
    6.43 -+	outb(reason, 0x61);
    6.44 -+}
    6.45 -+
    6.46 - static inline unsigned char get_nmi_reason(void)
    6.47 - {
    6.48 - 	return inb(0x61);
     7.1 --- a/patches/linux-2.6.16.29/ipv6-no-autoconf.patch	Mon Nov 27 13:22:21 2006 +0000
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,19 +0,0 @@
     7.4 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/addrconf.c ./net/ipv6/addrconf.c
     7.5 ---- ../orig-linux-2.6.16.29/net/ipv6/addrconf.c	2006-09-12 19:02:10.000000000 +0100
     7.6 -+++ ./net/ipv6/addrconf.c	2006-09-19 13:59:11.000000000 +0100
     7.7 -@@ -2471,6 +2471,7 @@ static void addrconf_dad_start(struct in
     7.8 - 	spin_lock_bh(&ifp->lock);
     7.9 - 
    7.10 - 	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
    7.11 -+	    !(dev->flags&IFF_MULTICAST) ||
    7.12 - 	    !(ifp->flags&IFA_F_TENTATIVE)) {
    7.13 - 		ifp->flags &= ~IFA_F_TENTATIVE;
    7.14 - 		spin_unlock_bh(&ifp->lock);
    7.15 -@@ -2555,6 +2556,7 @@ static void addrconf_dad_completed(struc
    7.16 - 	if (ifp->idev->cnf.forwarding == 0 &&
    7.17 - 	    ifp->idev->cnf.rtr_solicits > 0 &&
    7.18 - 	    (dev->flags&IFF_LOOPBACK) == 0 &&
    7.19 -+	    (dev->flags & IFF_MULTICAST) &&
    7.20 - 	    (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
    7.21 - 		struct in6_addr all_routers;
    7.22 - 
     8.1 --- a/patches/linux-2.6.16.29/net-csum.patch	Mon Nov 27 13:22:21 2006 +0000
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,63 +0,0 @@
     8.4 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_tcp.c ./net/ipv4/netfilter/ip_nat_proto_tcp.c
     8.5 ---- ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-12 19:02:10.000000000 +0100
     8.6 -+++ ./net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-19 13:59:15.000000000 +0100
     8.7 -@@ -129,7 +129,12 @@ tcp_manip_pkt(struct sk_buff **pskb,
     8.8 - 	if (hdrsize < sizeof(*hdr))
     8.9 - 		return 1;
    8.10 - 
    8.11 --	hdr->check = ip_nat_cheat_check(~oldip, newip,
    8.12 -+#ifdef CONFIG_XEN
    8.13 -+	if ((*pskb)->proto_csum_blank)
    8.14 -+		hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
    8.15 -+	else
    8.16 -+#endif
    8.17 -+		hdr->check = ip_nat_cheat_check(~oldip, newip,
    8.18 - 					ip_nat_cheat_check(oldport ^ 0xFFFF,
    8.19 - 							   newport,
    8.20 - 							   hdr->check));
    8.21 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_udp.c ./net/ipv4/netfilter/ip_nat_proto_udp.c
    8.22 ---- ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-12 19:02:10.000000000 +0100
    8.23 -+++ ./net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-19 13:59:15.000000000 +0100
    8.24 -@@ -113,11 +113,17 @@ udp_manip_pkt(struct sk_buff **pskb,
    8.25 - 		newport = tuple->dst.u.udp.port;
    8.26 - 		portptr = &hdr->dest;
    8.27 - 	}
    8.28 --	if (hdr->check) /* 0 is a special case meaning no checksum */
    8.29 --		hdr->check = ip_nat_cheat_check(~oldip, newip,
    8.30 -+	if (hdr->check) { /* 0 is a special case meaning no checksum */
    8.31 -+#ifdef CONFIG_XEN
    8.32 -+		if ((*pskb)->proto_csum_blank)
    8.33 -+			hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
    8.34 -+		else
    8.35 -+#endif
    8.36 -+			hdr->check = ip_nat_cheat_check(~oldip, newip,
    8.37 - 					ip_nat_cheat_check(*portptr ^ 0xFFFF,
    8.38 - 							   newport,
    8.39 - 							   hdr->check));
    8.40 -+	}
    8.41 - 	*portptr = newport;
    8.42 - 	return 1;
    8.43 - }
    8.44 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
    8.45 ---- ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c	2006-09-12 19:02:10.000000000 +0100
    8.46 -+++ ./net/ipv4/xfrm4_output.c	2006-09-19 13:59:15.000000000 +0100
    8.47 -@@ -17,6 +17,8 @@
    8.48 - #include <net/xfrm.h>
    8.49 - #include <net/icmp.h>
    8.50 - 
    8.51 -+extern int skb_checksum_setup(struct sk_buff *skb);
    8.52 -+
    8.53 - /* Add encapsulation header.
    8.54 -  *
    8.55 -  * In transport mode, the IP header will be moved forward to make space
    8.56 -@@ -103,6 +105,10 @@ static int xfrm4_output_one(struct sk_bu
    8.57 - 	struct xfrm_state *x = dst->xfrm;
    8.58 - 	int err;
    8.59 - 	
    8.60 -+	err = skb_checksum_setup(skb);
    8.61 -+	if (err)
    8.62 -+		goto error_nolock;
    8.63 -+
    8.64 - 	if (skb->ip_summed == CHECKSUM_HW) {
    8.65 - 		err = skb_checksum_help(skb, 0);
    8.66 - 		if (err)
     9.1 --- a/patches/linux-2.6.16.29/net-gso-0-base.patch	Mon Nov 27 13:22:21 2006 +0000
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,2835 +0,0 @@
     9.4 -diff -pruN ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt ./Documentation/networking/netdevices.txt
     9.5 ---- ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt	2006-09-12 19:02:10.000000000 +0100
     9.6 -+++ ./Documentation/networking/netdevices.txt	2006-09-19 13:59:20.000000000 +0100
     9.7 -@@ -42,9 +42,9 @@ dev->get_stats:
     9.8 - 	Context: nominally process, but don't sleep inside an rwlock
     9.9 - 
    9.10 - dev->hard_start_xmit:
    9.11 --	Synchronization: dev->xmit_lock spinlock.
    9.12 -+	Synchronization: netif_tx_lock spinlock.
    9.13 - 	When the driver sets NETIF_F_LLTX in dev->features this will be
    9.14 --	called without holding xmit_lock. In this case the driver 
    9.15 -+	called without holding netif_tx_lock. In this case the driver
    9.16 - 	has to lock by itself when needed. It is recommended to use a try lock
    9.17 - 	for this and return -1 when the spin lock fails. 
    9.18 - 	The locking there should also properly protect against 
    9.19 -@@ -62,12 +62,12 @@ dev->hard_start_xmit:
    9.20 - 	  Only valid when NETIF_F_LLTX is set.
    9.21 - 
    9.22 - dev->tx_timeout:
    9.23 --	Synchronization: dev->xmit_lock spinlock.
    9.24 -+	Synchronization: netif_tx_lock spinlock.
    9.25 - 	Context: BHs disabled
    9.26 - 	Notes: netif_queue_stopped() is guaranteed true
    9.27 - 
    9.28 - dev->set_multicast_list:
    9.29 --	Synchronization: dev->xmit_lock spinlock.
    9.30 -+	Synchronization: netif_tx_lock spinlock.
    9.31 - 	Context: BHs disabled
    9.32 - 
    9.33 - dev->poll:
    9.34 -diff -pruN ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c ./drivers/block/aoe/aoenet.c
    9.35 ---- ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c	2006-09-12 19:02:10.000000000 +0100
    9.36 -+++ ./drivers/block/aoe/aoenet.c	2006-09-19 13:59:20.000000000 +0100
    9.37 -@@ -95,9 +95,8 @@ mac_addr(char addr[6])
    9.38 - static struct sk_buff *
    9.39 - skb_check(struct sk_buff *skb)
    9.40 - {
    9.41 --	if (skb_is_nonlinear(skb))
    9.42 - 	if ((skb = skb_share_check(skb, GFP_ATOMIC)))
    9.43 --	if (skb_linearize(skb, GFP_ATOMIC) < 0) {
    9.44 -+	if (skb_linearize(skb)) {
    9.45 - 		dev_kfree_skb(skb);
    9.46 - 		return NULL;
    9.47 - 	}
    9.48 -diff -pruN ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c
    9.49 ---- ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-12 19:02:10.000000000 +0100
    9.50 -+++ ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-19 13:59:20.000000000 +0100
    9.51 -@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_
    9.52 - 
    9.53 - 	ipoib_mcast_stop_thread(dev, 0);
    9.54 - 
    9.55 --	spin_lock_irqsave(&dev->xmit_lock, flags);
    9.56 -+	local_irq_save(flags);
    9.57 -+	netif_tx_lock(dev);
    9.58 - 	spin_lock(&priv->lock);
    9.59 - 
    9.60 - 	/*
    9.61 -@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_
    9.62 - 	}
    9.63 - 
    9.64 - 	spin_unlock(&priv->lock);
    9.65 --	spin_unlock_irqrestore(&dev->xmit_lock, flags);
    9.66 -+	netif_tx_unlock(dev);
    9.67 -+	local_irq_restore(flags);
    9.68 - 
    9.69 - 	/* We have to cancel outside of the spinlock */
    9.70 - 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
    9.71 -diff -pruN ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c ./drivers/media/dvb/dvb-core/dvb_net.c
    9.72 ---- ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c	2006-09-12 19:02:10.000000000 +0100
    9.73 -+++ ./drivers/media/dvb/dvb-core/dvb_net.c	2006-09-19 13:59:20.000000000 +0100
    9.74 -@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void 
    9.75 - 
    9.76 - 	dvb_net_feed_stop(dev);
    9.77 - 	priv->rx_mode = RX_MODE_UNI;
    9.78 --	spin_lock_bh(&dev->xmit_lock);
    9.79 -+	netif_tx_lock_bh(dev);
    9.80 - 
    9.81 - 	if (dev->flags & IFF_PROMISC) {
    9.82 - 		dprintk("%s: promiscuous mode\n", dev->name);
    9.83 -@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void 
    9.84 - 		}
    9.85 - 	}
    9.86 - 
    9.87 --	spin_unlock_bh(&dev->xmit_lock);
    9.88 -+	netif_tx_unlock_bh(dev);
    9.89 - 	dvb_net_feed_start(dev);
    9.90 - }
    9.91 - 
    9.92 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/8139cp.c ./drivers/net/8139cp.c
    9.93 ---- ../orig-linux-2.6.16.29/drivers/net/8139cp.c	2006-09-12 19:02:10.000000000 +0100
    9.94 -+++ ./drivers/net/8139cp.c	2006-09-19 13:59:20.000000000 +0100
    9.95 -@@ -794,7 +794,7 @@ static int cp_start_xmit (struct sk_buff
    9.96 - 	entry = cp->tx_head;
    9.97 - 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
    9.98 - 	if (dev->features & NETIF_F_TSO)
    9.99 --		mss = skb_shinfo(skb)->tso_size;
   9.100 -+		mss = skb_shinfo(skb)->gso_size;
   9.101 - 
   9.102 - 	if (skb_shinfo(skb)->nr_frags == 0) {
   9.103 - 		struct cp_desc *txd = &cp->tx_ring[entry];
   9.104 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/bnx2.c ./drivers/net/bnx2.c
   9.105 ---- ../orig-linux-2.6.16.29/drivers/net/bnx2.c	2006-09-12 19:02:10.000000000 +0100
   9.106 -+++ ./drivers/net/bnx2.c	2006-09-19 13:59:20.000000000 +0100
   9.107 -@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
   9.108 - 		skb = tx_buf->skb;
   9.109 - #ifdef BCM_TSO 
   9.110 - 		/* partial BD completions possible with TSO packets */
   9.111 --		if (skb_shinfo(skb)->tso_size) {
   9.112 -+		if (skb_shinfo(skb)->gso_size) {
   9.113 - 			u16 last_idx, last_ring_idx;
   9.114 - 
   9.115 - 			last_idx = sw_cons +
   9.116 -@@ -1948,7 +1948,7 @@ bnx2_poll(struct net_device *dev, int *b
   9.117 - 	return 1;
   9.118 - }
   9.119 - 
   9.120 --/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
   9.121 -+/* Called with rtnl_lock from vlan functions and also netif_tx_lock
   9.122 -  * from set_multicast.
   9.123 -  */
   9.124 - static void
   9.125 -@@ -4403,7 +4403,7 @@ bnx2_vlan_rx_kill_vid(struct net_device 
   9.126 - }
   9.127 - #endif
   9.128 - 
   9.129 --/* Called with dev->xmit_lock.
   9.130 -+/* Called with netif_tx_lock.
   9.131 -  * hard_start_xmit is pseudo-lockless - a lock is only required when
   9.132 -  * the tx queue is full. This way, we get the benefit of lockless
   9.133 -  * operations most of the time without the complexities to handle
   9.134 -@@ -4441,7 +4441,7 @@ bnx2_start_xmit(struct sk_buff *skb, str
   9.135 - 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
   9.136 - 	}
   9.137 - #ifdef BCM_TSO 
   9.138 --	if ((mss = skb_shinfo(skb)->tso_size) &&
   9.139 -+	if ((mss = skb_shinfo(skb)->gso_size) &&
   9.140 - 		(skb->len > (bp->dev->mtu + ETH_HLEN))) {
   9.141 - 		u32 tcp_opt_len, ip_tcp_len;
   9.142 - 
   9.143 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c ./drivers/net/bonding/bond_main.c
   9.144 ---- ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c	2006-09-12 19:02:10.000000000 +0100
   9.145 -+++ ./drivers/net/bonding/bond_main.c	2006-09-19 13:59:20.000000000 +0100
   9.146 -@@ -1145,8 +1145,7 @@ int bond_sethwaddr(struct net_device *bo
   9.147 - }
   9.148 - 
   9.149 - #define BOND_INTERSECT_FEATURES \
   9.150 --	(NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\
   9.151 --	NETIF_F_TSO|NETIF_F_UFO)
   9.152 -+	(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
   9.153 - 
   9.154 - /* 
   9.155 -  * Compute the common dev->feature set available to all slaves.  Some
   9.156 -@@ -1164,9 +1163,7 @@ static int bond_compute_features(struct 
   9.157 - 		features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
   9.158 - 
   9.159 - 	if ((features & NETIF_F_SG) && 
   9.160 --	    !(features & (NETIF_F_IP_CSUM |
   9.161 --			  NETIF_F_NO_CSUM |
   9.162 --			  NETIF_F_HW_CSUM)))
   9.163 -+	    !(features & NETIF_F_ALL_CSUM))
   9.164 - 		features &= ~NETIF_F_SG;
   9.165 - 
   9.166 - 	/* 
   9.167 -@@ -4147,7 +4144,7 @@ static int bond_init(struct net_device *
   9.168 - 	 */
   9.169 - 	bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
   9.170 - 
   9.171 --	/* don't acquire bond device's xmit_lock when 
   9.172 -+	/* don't acquire bond device's netif_tx_lock when
   9.173 - 	 * transmitting */
   9.174 - 	bond_dev->features |= NETIF_F_LLTX;
   9.175 - 
   9.176 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c ./drivers/net/chelsio/sge.c
   9.177 ---- ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c	2006-09-12 19:02:10.000000000 +0100
   9.178 -+++ ./drivers/net/chelsio/sge.c	2006-09-19 13:59:20.000000000 +0100
   9.179 -@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
   9.180 - 	struct cpl_tx_pkt *cpl;
   9.181 - 
   9.182 - #ifdef NETIF_F_TSO
   9.183 --	if (skb_shinfo(skb)->tso_size) {
   9.184 -+	if (skb_shinfo(skb)->gso_size) {
   9.185 - 		int eth_type;
   9.186 - 		struct cpl_tx_pkt_lso *hdr;
   9.187 - 
   9.188 -@@ -1434,7 +1434,7 @@ int t1_start_xmit(struct sk_buff *skb, s
   9.189 - 		hdr->ip_hdr_words = skb->nh.iph->ihl;
   9.190 - 		hdr->tcp_hdr_words = skb->h.th->doff;
   9.191 - 		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
   9.192 --						skb_shinfo(skb)->tso_size));
   9.193 -+						skb_shinfo(skb)->gso_size));
   9.194 - 		hdr->len = htonl(skb->len - sizeof(*hdr));
   9.195 - 		cpl = (struct cpl_tx_pkt *)hdr;
   9.196 - 		sge->stats.tx_lso_pkts++;
   9.197 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c ./drivers/net/e1000/e1000_main.c
   9.198 ---- ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c	2006-09-12 19:02:10.000000000 +0100
   9.199 -+++ ./drivers/net/e1000/e1000_main.c	2006-09-19 13:59:20.000000000 +0100
   9.200 -@@ -2526,7 +2526,7 @@ e1000_tso(struct e1000_adapter *adapter,
   9.201 - 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
   9.202 - 	int err;
   9.203 - 
   9.204 --	if (skb_shinfo(skb)->tso_size) {
   9.205 -+	if (skb_shinfo(skb)->gso_size) {
   9.206 - 		if (skb_header_cloned(skb)) {
   9.207 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   9.208 - 			if (err)
   9.209 -@@ -2534,7 +2534,7 @@ e1000_tso(struct e1000_adapter *adapter,
   9.210 - 		}
   9.211 - 
   9.212 - 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
   9.213 --		mss = skb_shinfo(skb)->tso_size;
   9.214 -+		mss = skb_shinfo(skb)->gso_size;
   9.215 - 		if (skb->protocol == ntohs(ETH_P_IP)) {
   9.216 - 			skb->nh.iph->tot_len = 0;
   9.217 - 			skb->nh.iph->check = 0;
   9.218 -@@ -2651,7 +2651,7 @@ e1000_tx_map(struct e1000_adapter *adapt
   9.219 - 		 * tso gets written back prematurely before the data is fully
   9.220 - 		 * DMAd to the controller */
   9.221 - 		if (!skb->data_len && tx_ring->last_tx_tso &&
   9.222 --				!skb_shinfo(skb)->tso_size) {
   9.223 -+				!skb_shinfo(skb)->gso_size) {
   9.224 - 			tx_ring->last_tx_tso = 0;
   9.225 - 			size -= 4;
   9.226 - 		}
   9.227 -@@ -2893,7 +2893,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
   9.228 - 	}
   9.229 - 
   9.230 - #ifdef NETIF_F_TSO
   9.231 --	mss = skb_shinfo(skb)->tso_size;
   9.232 -+	mss = skb_shinfo(skb)->gso_size;
   9.233 - 	/* The controller does a simple calculation to 
   9.234 - 	 * make sure there is enough room in the FIFO before
   9.235 - 	 * initiating the DMA for each buffer.  The calc is:
   9.236 -@@ -2935,7 +2935,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
   9.237 - #ifdef NETIF_F_TSO
   9.238 - 	/* Controller Erratum workaround */
   9.239 - 	if (!skb->data_len && tx_ring->last_tx_tso &&
   9.240 --		!skb_shinfo(skb)->tso_size)
   9.241 -+		!skb_shinfo(skb)->gso_size)
   9.242 - 		count++;
   9.243 - #endif
   9.244 - 
   9.245 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/forcedeth.c ./drivers/net/forcedeth.c
   9.246 ---- ../orig-linux-2.6.16.29/drivers/net/forcedeth.c	2006-09-12 19:02:10.000000000 +0100
   9.247 -+++ ./drivers/net/forcedeth.c	2006-09-19 13:59:20.000000000 +0100
   9.248 -@@ -482,9 +482,9 @@ typedef union _ring_type {
   9.249 -  * critical parts:
   9.250 -  * - rx is (pseudo-) lockless: it relies on the single-threading provided
   9.251 -  *	by the arch code for interrupts.
   9.252 -- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
   9.253 -+ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
   9.254 -  *	needs dev->priv->lock :-(
   9.255 -- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
   9.256 -+ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
   9.257 -  */
   9.258 - 
   9.259 - /* in dev: base, irq */
   9.260 -@@ -1016,7 +1016,7 @@ static void drain_ring(struct net_device
   9.261 - 
   9.262 - /*
   9.263 -  * nv_start_xmit: dev->hard_start_xmit function
   9.264 -- * Called with dev->xmit_lock held.
   9.265 -+ * Called with netif_tx_lock held.
   9.266 -  */
   9.267 - static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
   9.268 - {
   9.269 -@@ -1105,8 +1105,8 @@ static int nv_start_xmit(struct sk_buff 
   9.270 - 	np->tx_skbuff[nr] = skb;
   9.271 - 
   9.272 - #ifdef NETIF_F_TSO
   9.273 --	if (skb_shinfo(skb)->tso_size)
   9.274 --		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
   9.275 -+	if (skb_shinfo(skb)->gso_size)
   9.276 -+		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
   9.277 - 	else
   9.278 - #endif
   9.279 - 	tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
   9.280 -@@ -1203,7 +1203,7 @@ static void nv_tx_done(struct net_device
   9.281 - 
   9.282 - /*
   9.283 -  * nv_tx_timeout: dev->tx_timeout function
   9.284 -- * Called with dev->xmit_lock held.
   9.285 -+ * Called with netif_tx_lock held.
   9.286 -  */
   9.287 - static void nv_tx_timeout(struct net_device *dev)
   9.288 - {
   9.289 -@@ -1524,7 +1524,7 @@ static int nv_change_mtu(struct net_devi
   9.290 - 		 * Changing the MTU is a rare event, it shouldn't matter.
   9.291 - 		 */
   9.292 - 		disable_irq(dev->irq);
   9.293 --		spin_lock_bh(&dev->xmit_lock);
   9.294 -+		netif_tx_lock_bh(dev);
   9.295 - 		spin_lock(&np->lock);
   9.296 - 		/* stop engines */
   9.297 - 		nv_stop_rx(dev);
   9.298 -@@ -1559,7 +1559,7 @@ static int nv_change_mtu(struct net_devi
   9.299 - 		nv_start_rx(dev);
   9.300 - 		nv_start_tx(dev);
   9.301 - 		spin_unlock(&np->lock);
   9.302 --		spin_unlock_bh(&dev->xmit_lock);
   9.303 -+		netif_tx_unlock_bh(dev);
   9.304 - 		enable_irq(dev->irq);
   9.305 - 	}
   9.306 - 	return 0;
   9.307 -@@ -1594,7 +1594,7 @@ static int nv_set_mac_address(struct net
   9.308 - 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
   9.309 - 
   9.310 - 	if (netif_running(dev)) {
   9.311 --		spin_lock_bh(&dev->xmit_lock);
   9.312 -+		netif_tx_lock_bh(dev);
   9.313 - 		spin_lock_irq(&np->lock);
   9.314 - 
   9.315 - 		/* stop rx engine */
   9.316 -@@ -1606,7 +1606,7 @@ static int nv_set_mac_address(struct net
   9.317 - 		/* restart rx engine */
   9.318 - 		nv_start_rx(dev);
   9.319 - 		spin_unlock_irq(&np->lock);
   9.320 --		spin_unlock_bh(&dev->xmit_lock);
   9.321 -+		netif_tx_unlock_bh(dev);
   9.322 - 	} else {
   9.323 - 		nv_copy_mac_to_hw(dev);
   9.324 - 	}
   9.325 -@@ -1615,7 +1615,7 @@ static int nv_set_mac_address(struct net
   9.326 - 
   9.327 - /*
   9.328 -  * nv_set_multicast: dev->set_multicast function
   9.329 -- * Called with dev->xmit_lock held.
   9.330 -+ * Called with netif_tx_lock held.
   9.331 -  */
   9.332 - static void nv_set_multicast(struct net_device *dev)
   9.333 - {
   9.334 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c ./drivers/net/hamradio/6pack.c
   9.335 ---- ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c	2006-09-12 19:02:10.000000000 +0100
   9.336 -+++ ./drivers/net/hamradio/6pack.c	2006-09-19 13:59:20.000000000 +0100
   9.337 -@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net
   9.338 - {
   9.339 - 	struct sockaddr_ax25 *sa = addr;
   9.340 - 
   9.341 --	spin_lock_irq(&dev->xmit_lock);
   9.342 -+	netif_tx_lock_bh(dev);
   9.343 - 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
   9.344 --	spin_unlock_irq(&dev->xmit_lock);
   9.345 -+	netif_tx_unlock_bh(dev);
   9.346 - 
   9.347 - 	return 0;
   9.348 - }
   9.349 -@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_stru
   9.350 - 			break;
   9.351 - 		}
   9.352 - 
   9.353 --		spin_lock_irq(&dev->xmit_lock);
   9.354 -+		netif_tx_lock_bh(dev);
   9.355 - 		memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
   9.356 --		spin_unlock_irq(&dev->xmit_lock);
   9.357 -+		netif_tx_unlock_bh(dev);
   9.358 - 
   9.359 - 		err = 0;
   9.360 - 		break;
   9.361 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c ./drivers/net/hamradio/mkiss.c
   9.362 ---- ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c	2006-09-12 19:02:10.000000000 +0100
   9.363 -+++ ./drivers/net/hamradio/mkiss.c	2006-09-19 13:59:20.000000000 +0100
   9.364 -@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net
   9.365 - {
   9.366 - 	struct sockaddr_ax25 *sa = addr;
   9.367 - 
   9.368 --	spin_lock_irq(&dev->xmit_lock);
   9.369 -+	netif_tx_lock_bh(dev);
   9.370 - 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
   9.371 --	spin_unlock_irq(&dev->xmit_lock);
   9.372 -+	netif_tx_unlock_bh(dev);
   9.373 - 
   9.374 - 	return 0;
   9.375 - }
   9.376 -@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct
   9.377 - 			break;
   9.378 - 		}
   9.379 - 
   9.380 --		spin_lock_irq(&dev->xmit_lock);
   9.381 -+		netif_tx_lock_bh(dev);
   9.382 - 		memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
   9.383 --		spin_unlock_irq(&dev->xmit_lock);
   9.384 -+		netif_tx_unlock_bh(dev);
   9.385 - 
   9.386 - 		err = 0;
   9.387 - 		break;
   9.388 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/ifb.c ./drivers/net/ifb.c
   9.389 ---- ../orig-linux-2.6.16.29/drivers/net/ifb.c	2006-09-12 19:02:10.000000000 +0100
   9.390 -+++ ./drivers/net/ifb.c	2006-09-19 13:59:20.000000000 +0100
   9.391 -@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev
   9.392 - 	dp->st_task_enter++;
   9.393 - 	if ((skb = skb_peek(&dp->tq)) == NULL) {
   9.394 - 		dp->st_txq_refl_try++;
   9.395 --		if (spin_trylock(&_dev->xmit_lock)) {
   9.396 -+		if (netif_tx_trylock(_dev)) {
   9.397 - 			dp->st_rxq_enter++;
   9.398 - 			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
   9.399 - 				skb_queue_tail(&dp->tq, skb);
   9.400 - 				dp->st_rx2tx_tran++;
   9.401 - 			}
   9.402 --			spin_unlock(&_dev->xmit_lock);
   9.403 -+			netif_tx_unlock(_dev);
   9.404 - 		} else {
   9.405 - 			/* reschedule */
   9.406 - 			dp->st_rxq_notenter++;
   9.407 -@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev
   9.408 - 		}
   9.409 - 	}
   9.410 - 
   9.411 --	if (spin_trylock(&_dev->xmit_lock)) {
   9.412 -+	if (netif_tx_trylock(_dev)) {
   9.413 - 		dp->st_rxq_check++;
   9.414 - 		if ((skb = skb_peek(&dp->rq)) == NULL) {
   9.415 - 			dp->tasklet_pending = 0;
   9.416 -@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev
   9.417 - 				netif_wake_queue(_dev);
   9.418 - 		} else {
   9.419 - 			dp->st_rxq_rsch++;
   9.420 --			spin_unlock(&_dev->xmit_lock);
   9.421 -+			netif_tx_unlock(_dev);
   9.422 - 			goto resched;
   9.423 - 		}
   9.424 --		spin_unlock(&_dev->xmit_lock);
   9.425 -+		netif_tx_unlock(_dev);
   9.426 - 	} else {
   9.427 - resched:
   9.428 - 		dp->tasklet_pending = 1;
   9.429 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c ./drivers/net/irda/vlsi_ir.c
   9.430 ---- ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c	2006-09-12 19:02:10.000000000 +0100
   9.431 -+++ ./drivers/net/irda/vlsi_ir.c	2006-09-19 13:59:20.000000000 +0100
   9.432 -@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct s
   9.433 - 			    ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
   9.434 - 			    	break;
   9.435 - 			udelay(100);
   9.436 --			/* must not sleep here - we are called under xmit_lock! */
   9.437 -+			/* must not sleep here - called under netif_tx_lock! */
   9.438 - 		}
   9.439 - 	}
   9.440 - 
   9.441 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c ./drivers/net/ixgb/ixgb_main.c
   9.442 ---- ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c	2006-09-12 19:02:10.000000000 +0100
   9.443 -+++ ./drivers/net/ixgb/ixgb_main.c	2006-09-19 13:59:20.000000000 +0100
   9.444 -@@ -1163,7 +1163,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
   9.445 - 	uint16_t ipcse, tucse, mss;
   9.446 - 	int err;
   9.447 - 
   9.448 --	if(likely(skb_shinfo(skb)->tso_size)) {
   9.449 -+	if(likely(skb_shinfo(skb)->gso_size)) {
   9.450 - 		if (skb_header_cloned(skb)) {
   9.451 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   9.452 - 			if (err)
   9.453 -@@ -1171,7 +1171,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
   9.454 - 		}
   9.455 - 
   9.456 - 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
   9.457 --		mss = skb_shinfo(skb)->tso_size;
   9.458 -+		mss = skb_shinfo(skb)->gso_size;
   9.459 - 		skb->nh.iph->tot_len = 0;
   9.460 - 		skb->nh.iph->check = 0;
   9.461 - 		skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
   9.462 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/loopback.c ./drivers/net/loopback.c
   9.463 ---- ../orig-linux-2.6.16.29/drivers/net/loopback.c	2006-09-12 19:02:10.000000000 +0100
   9.464 -+++ ./drivers/net/loopback.c	2006-09-19 13:59:20.000000000 +0100
   9.465 -@@ -74,7 +74,7 @@ static void emulate_large_send_offload(s
   9.466 - 	struct iphdr *iph = skb->nh.iph;
   9.467 - 	struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
   9.468 - 	unsigned int doffset = (iph->ihl + th->doff) * 4;
   9.469 --	unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
   9.470 -+	unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
   9.471 - 	unsigned int offset = 0;
   9.472 - 	u32 seq = ntohl(th->seq);
   9.473 - 	u16 id  = ntohs(iph->id);
   9.474 -@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff 
   9.475 - #endif
   9.476 - 
   9.477 - #ifdef LOOPBACK_TSO
   9.478 --	if (skb_shinfo(skb)->tso_size) {
   9.479 -+	if (skb_shinfo(skb)->gso_size) {
   9.480 - 		BUG_ON(skb->protocol != htons(ETH_P_IP));
   9.481 - 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
   9.482 - 
   9.483 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c ./drivers/net/mv643xx_eth.c
   9.484 ---- ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c	2006-09-12 19:02:10.000000000 +0100
   9.485 -+++ ./drivers/net/mv643xx_eth.c	2006-09-19 13:59:20.000000000 +0100
   9.486 -@@ -1107,7 +1107,7 @@ static int mv643xx_eth_start_xmit(struct
   9.487 - 
   9.488 - #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
   9.489 - 	if (has_tiny_unaligned_frags(skb)) {
   9.490 --		if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
   9.491 -+		if (__skb_linearize(skb)) {
   9.492 - 			stats->tx_dropped++;
   9.493 - 			printk(KERN_DEBUG "%s: failed to linearize tiny "
   9.494 - 					"unaligned fragment\n", dev->name);
   9.495 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/natsemi.c ./drivers/net/natsemi.c
   9.496 ---- ../orig-linux-2.6.16.29/drivers/net/natsemi.c	2006-09-12 19:02:10.000000000 +0100
   9.497 -+++ ./drivers/net/natsemi.c	2006-09-19 13:59:20.000000000 +0100
   9.498 -@@ -323,12 +323,12 @@ performance critical codepaths:
   9.499 - The rx process only runs in the interrupt handler. Access from outside
   9.500 - the interrupt handler is only permitted after disable_irq().
   9.501 - 
   9.502 --The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
   9.503 -+The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
   9.504 - is set, then access is permitted under spin_lock_irq(&np->lock).
   9.505 - 
   9.506 - Thus configuration functions that want to access everything must call
   9.507 - 	disable_irq(dev->irq);
   9.508 --	spin_lock_bh(dev->xmit_lock);
   9.509 -+	netif_tx_lock_bh(dev);
   9.510 - 	spin_lock_irq(&np->lock);
   9.511 - 
   9.512 - IV. Notes
   9.513 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/r8169.c ./drivers/net/r8169.c
   9.514 ---- ../orig-linux-2.6.16.29/drivers/net/r8169.c	2006-09-12 19:02:10.000000000 +0100
   9.515 -+++ ./drivers/net/r8169.c	2006-09-19 13:59:20.000000000 +0100
   9.516 -@@ -2171,7 +2171,7 @@ static int rtl8169_xmit_frags(struct rtl
   9.517 - static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
   9.518 - {
   9.519 - 	if (dev->features & NETIF_F_TSO) {
   9.520 --		u32 mss = skb_shinfo(skb)->tso_size;
   9.521 -+		u32 mss = skb_shinfo(skb)->gso_size;
   9.522 - 
   9.523 - 		if (mss)
   9.524 - 			return LargeSend | ((mss & MSSMask) << MSSShift);
   9.525 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/s2io.c ./drivers/net/s2io.c
   9.526 ---- ../orig-linux-2.6.16.29/drivers/net/s2io.c	2006-09-12 19:02:10.000000000 +0100
   9.527 -+++ ./drivers/net/s2io.c	2006-09-19 13:59:20.000000000 +0100
   9.528 -@@ -3522,8 +3522,8 @@ static int s2io_xmit(struct sk_buff *skb
   9.529 - 	txdp->Control_1 = 0;
   9.530 - 	txdp->Control_2 = 0;
   9.531 - #ifdef NETIF_F_TSO
   9.532 --	mss = skb_shinfo(skb)->tso_size;
   9.533 --	if (mss) {
   9.534 -+	mss = skb_shinfo(skb)->gso_size;
   9.535 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
   9.536 - 		txdp->Control_1 |= TXD_TCP_LSO_EN;
   9.537 - 		txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
   9.538 - 	}
   9.539 -@@ -3543,10 +3543,10 @@ static int s2io_xmit(struct sk_buff *skb
   9.540 - 	}
   9.541 - 
   9.542 - 	frg_len = skb->len - skb->data_len;
   9.543 --	if (skb_shinfo(skb)->ufo_size) {
   9.544 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
   9.545 - 		int ufo_size;
   9.546 - 
   9.547 --		ufo_size = skb_shinfo(skb)->ufo_size;
   9.548 -+		ufo_size = skb_shinfo(skb)->gso_size;
   9.549 - 		ufo_size &= ~7;
   9.550 - 		txdp->Control_1 |= TXD_UFO_EN;
   9.551 - 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
   9.552 -@@ -3572,7 +3572,7 @@ static int s2io_xmit(struct sk_buff *skb
   9.553 - 	txdp->Host_Control = (unsigned long) skb;
   9.554 - 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
   9.555 - 
   9.556 --	if (skb_shinfo(skb)->ufo_size)
   9.557 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   9.558 - 		txdp->Control_1 |= TXD_UFO_EN;
   9.559 - 
   9.560 - 	frg_cnt = skb_shinfo(skb)->nr_frags;
   9.561 -@@ -3587,12 +3587,12 @@ static int s2io_xmit(struct sk_buff *skb
   9.562 - 		    (sp->pdev, frag->page, frag->page_offset,
   9.563 - 		     frag->size, PCI_DMA_TODEVICE);
   9.564 - 		txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
   9.565 --		if (skb_shinfo(skb)->ufo_size)
   9.566 -+		if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   9.567 - 			txdp->Control_1 |= TXD_UFO_EN;
   9.568 - 	}
   9.569 - 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
   9.570 - 
   9.571 --	if (skb_shinfo(skb)->ufo_size)
   9.572 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   9.573 - 		frg_cnt++; /* as Txd0 was used for inband header */
   9.574 - 
   9.575 - 	tx_fifo = mac_control->tx_FIFO_start[queue];
   9.576 -@@ -3606,7 +3606,7 @@ static int s2io_xmit(struct sk_buff *skb
   9.577 - 	if (mss)
   9.578 - 		val64 |= TX_FIFO_SPECIAL_FUNC;
   9.579 - #endif
   9.580 --	if (skb_shinfo(skb)->ufo_size)
   9.581 -+	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   9.582 - 		val64 |= TX_FIFO_SPECIAL_FUNC;
   9.583 - 	writeq(val64, &tx_fifo->List_Control);
   9.584 - 
   9.585 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/sky2.c ./drivers/net/sky2.c
   9.586 ---- ../orig-linux-2.6.16.29/drivers/net/sky2.c	2006-09-12 19:02:10.000000000 +0100
   9.587 -+++ ./drivers/net/sky2.c	2006-09-19 13:59:20.000000000 +0100
   9.588 -@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
   9.589 - 	count = sizeof(dma_addr_t) / sizeof(u32);
   9.590 - 	count += skb_shinfo(skb)->nr_frags * count;
   9.591 - 
   9.592 --	if (skb_shinfo(skb)->tso_size)
   9.593 -+	if (skb_shinfo(skb)->gso_size)
   9.594 - 		++count;
   9.595 - 
   9.596 - 	if (skb->ip_summed == CHECKSUM_HW)
   9.597 -@@ -1197,7 +1197,7 @@ static int sky2_xmit_frame(struct sk_buf
   9.598 - 	}
   9.599 - 
   9.600 - 	/* Check for TCP Segmentation Offload */
   9.601 --	mss = skb_shinfo(skb)->tso_size;
   9.602 -+	mss = skb_shinfo(skb)->gso_size;
   9.603 - 	if (mss != 0) {
   9.604 - 		/* just drop the packet if non-linear expansion fails */
   9.605 - 		if (skb_header_cloned(skb) &&
   9.606 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/tg3.c ./drivers/net/tg3.c
   9.607 ---- ../orig-linux-2.6.16.29/drivers/net/tg3.c	2006-09-12 19:02:10.000000000 +0100
   9.608 -+++ ./drivers/net/tg3.c	2006-09-19 13:59:20.000000000 +0100
   9.609 -@@ -3664,7 +3664,7 @@ static int tg3_start_xmit(struct sk_buff
   9.610 - #if TG3_TSO_SUPPORT != 0
   9.611 - 	mss = 0;
   9.612 - 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
   9.613 --	    (mss = skb_shinfo(skb)->tso_size) != 0) {
   9.614 -+	    (mss = skb_shinfo(skb)->gso_size) != 0) {
   9.615 - 		int tcp_opt_len, ip_tcp_len;
   9.616 - 
   9.617 - 		if (skb_header_cloned(skb) &&
   9.618 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c ./drivers/net/tulip/winbond-840.c
   9.619 ---- ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c	2006-09-12 19:02:10.000000000 +0100
   9.620 -+++ ./drivers/net/tulip/winbond-840.c	2006-09-19 13:59:20.000000000 +0100
   9.621 -@@ -1605,11 +1605,11 @@ static void __devexit w840_remove1 (stru
   9.622 -  * - get_stats:
   9.623 -  * 	spin_lock_irq(np->lock), doesn't touch hw if not present
   9.624 -  * - hard_start_xmit:
   9.625 -- * 	netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
   9.626 -+ * 	synchronize_irq + netif_tx_disable;
   9.627 -  * - tx_timeout:
   9.628 -- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
   9.629 -+ * 	netif_device_detach + netif_tx_disable;
   9.630 -  * - set_multicast_list
   9.631 -- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
   9.632 -+ * 	netif_device_detach + netif_tx_disable;
   9.633 -  * - interrupt handler
   9.634 -  * 	doesn't touch hw if not present, synchronize_irq waits for
   9.635 -  * 	running instances of the interrupt handler.
   9.636 -@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev 
   9.637 - 		netif_device_detach(dev);
   9.638 - 		update_csr6(dev, 0);
   9.639 - 		iowrite32(0, ioaddr + IntrEnable);
   9.640 --		netif_stop_queue(dev);
   9.641 - 		spin_unlock_irq(&np->lock);
   9.642 - 
   9.643 --		spin_unlock_wait(&dev->xmit_lock);
   9.644 - 		synchronize_irq(dev->irq);
   9.645 -+		netif_tx_disable(dev);
   9.646 - 	
   9.647 - 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
   9.648 - 
   9.649 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/typhoon.c ./drivers/net/typhoon.c
   9.650 ---- ../orig-linux-2.6.16.29/drivers/net/typhoon.c	2006-09-12 19:02:10.000000000 +0100
   9.651 -+++ ./drivers/net/typhoon.c	2006-09-19 13:59:20.000000000 +0100
   9.652 -@@ -340,7 +340,7 @@ enum state_values {
   9.653 - #endif
   9.654 - 
   9.655 - #if defined(NETIF_F_TSO)
   9.656 --#define skb_tso_size(x)		(skb_shinfo(x)->tso_size)
   9.657 -+#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
   9.658 - #define TSO_NUM_DESCRIPTORS	2
   9.659 - #define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
   9.660 - #else
   9.661 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/via-velocity.c ./drivers/net/via-velocity.c
   9.662 ---- ../orig-linux-2.6.16.29/drivers/net/via-velocity.c	2006-09-12 19:02:10.000000000 +0100
   9.663 -+++ ./drivers/net/via-velocity.c	2006-09-19 13:59:20.000000000 +0100
   9.664 -@@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff 
   9.665 - 
   9.666 - 	int pktlen = skb->len;
   9.667 - 
   9.668 -+#ifdef VELOCITY_ZERO_COPY_SUPPORT
   9.669 -+	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
   9.670 -+		kfree_skb(skb);
   9.671 -+		return 0;
   9.672 -+	}
   9.673 -+#endif
   9.674 -+
   9.675 - 	spin_lock_irqsave(&vptr->lock, flags);
   9.676 - 
   9.677 - 	index = vptr->td_curr[qnum];
   9.678 -@@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff 
   9.679 - 	 */
   9.680 - 	if (pktlen < ETH_ZLEN) {
   9.681 - 		/* Cannot occur until ZC support */
   9.682 --		if(skb_linearize(skb, GFP_ATOMIC))
   9.683 --			return 0; 
   9.684 - 		pktlen = ETH_ZLEN;
   9.685 - 		memcpy(tdinfo->buf, skb->data, skb->len);
   9.686 - 		memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
   9.687 -@@ -1933,7 +1938,6 @@ static int velocity_xmit(struct sk_buff 
   9.688 - 		int nfrags = skb_shinfo(skb)->nr_frags;
   9.689 - 		tdinfo->skb = skb;
   9.690 - 		if (nfrags > 6) {
   9.691 --			skb_linearize(skb, GFP_ATOMIC);
   9.692 - 			memcpy(tdinfo->buf, skb->data, skb->len);
   9.693 - 			tdinfo->skb_dma[0] = tdinfo->buf_dma;
   9.694 - 			td_ptr->tdesc0.pktsize = 
   9.695 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c ./drivers/net/wireless/orinoco.c
   9.696 ---- ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c	2006-09-12 19:02:10.000000000 +0100
   9.697 -+++ ./drivers/net/wireless/orinoco.c	2006-09-19 13:59:20.000000000 +0100
   9.698 -@@ -1835,7 +1835,9 @@ static int __orinoco_program_rids(struct
   9.699 - 	/* Set promiscuity / multicast*/
   9.700 - 	priv->promiscuous = 0;
   9.701 - 	priv->mc_count = 0;
   9.702 --	__orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
   9.703 -+
   9.704 -+	/* FIXME: what about netif_tx_lock */
   9.705 -+	__orinoco_set_multicast_list(dev);
   9.706 - 
   9.707 - 	return 0;
   9.708 - }
   9.709 -diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c ./drivers/s390/net/qeth_eddp.c
   9.710 ---- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c	2006-09-12 19:02:10.000000000 +0100
   9.711 -+++ ./drivers/s390/net/qeth_eddp.c	2006-09-19 13:59:20.000000000 +0100
   9.712 -@@ -421,7 +421,7 @@ __qeth_eddp_fill_context_tcp(struct qeth
   9.713 -        }
   9.714 - 	tcph = eddp->skb->h.th;
   9.715 - 	while (eddp->skb_offset < eddp->skb->len) {
   9.716 --		data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
   9.717 -+		data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
   9.718 - 			       (int)(eddp->skb->len - eddp->skb_offset));
   9.719 - 		/* prepare qdio hdr */
   9.720 - 		if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
   9.721 -@@ -516,20 +516,20 @@ qeth_eddp_calc_num_pages(struct qeth_edd
   9.722 - 	
   9.723 - 	QETH_DBF_TEXT(trace, 5, "eddpcanp");
   9.724 - 	/* can we put multiple skbs in one page? */
   9.725 --	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
   9.726 -+	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
   9.727 - 	if (skbs_per_page > 1){
   9.728 --		ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
   9.729 -+		ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
   9.730 - 				 skbs_per_page + 1;
   9.731 - 		ctx->elements_per_skb = 1;
   9.732 - 	} else {
   9.733 - 		/* no -> how many elements per skb? */
   9.734 --		ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
   9.735 -+		ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
   9.736 - 				     PAGE_SIZE) >> PAGE_SHIFT;
   9.737 - 		ctx->num_pages = ctx->elements_per_skb *
   9.738 --				 (skb_shinfo(skb)->tso_segs + 1);
   9.739 -+				 (skb_shinfo(skb)->gso_segs + 1);
   9.740 - 	}
   9.741 - 	ctx->num_elements = ctx->elements_per_skb *
   9.742 --			    (skb_shinfo(skb)->tso_segs + 1);
   9.743 -+			    (skb_shinfo(skb)->gso_segs + 1);
   9.744 - }
   9.745 - 
   9.746 - static inline struct qeth_eddp_context *
   9.747 -diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c ./drivers/s390/net/qeth_main.c
   9.748 ---- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c	2006-09-12 19:02:10.000000000 +0100
   9.749 -+++ ./drivers/s390/net/qeth_main.c	2006-09-19 13:59:20.000000000 +0100
   9.750 -@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
   9.751 - 	queue = card->qdio.out_qs
   9.752 - 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
   9.753 - 
   9.754 --	if (skb_shinfo(skb)->tso_size)
   9.755 -+	if (skb_shinfo(skb)->gso_size)
   9.756 - 		large_send = card->options.large_send;
   9.757 - 
   9.758 - 	/*are we able to do TSO ? If so ,prepare and send it from here */
   9.759 -@@ -4501,7 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
   9.760 - 		card->stats.tx_packets++;
   9.761 - 		card->stats.tx_bytes += skb->len;
   9.762 - #ifdef CONFIG_QETH_PERF_STATS
   9.763 --		if (skb_shinfo(skb)->tso_size &&
   9.764 -+		if (skb_shinfo(skb)->gso_size &&
   9.765 - 		   !(large_send == QETH_LARGE_SEND_NO)) {
   9.766 - 			card->perf_stats.large_send_bytes += skb->len;
   9.767 - 			card->perf_stats.large_send_cnt++;
   9.768 -diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h ./drivers/s390/net/qeth_tso.h
   9.769 ---- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h	2006-09-12 19:02:10.000000000 +0100
   9.770 -+++ ./drivers/s390/net/qeth_tso.h	2006-09-19 13:59:20.000000000 +0100
   9.771 -@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *c
   9.772 - 	hdr->ext.hdr_version = 1;
   9.773 - 	hdr->ext.hdr_len     = 28;
   9.774 - 	/*insert non-fix values */
   9.775 --	hdr->ext.mss = skb_shinfo(skb)->tso_size;
   9.776 -+	hdr->ext.mss = skb_shinfo(skb)->gso_size;
   9.777 - 	hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
   9.778 - 	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
   9.779 - 				       sizeof(struct qeth_hdr_tso));
   9.780 -diff -pruN ../orig-linux-2.6.16.29/include/linux/ethtool.h ./include/linux/ethtool.h
   9.781 ---- ../orig-linux-2.6.16.29/include/linux/ethtool.h	2006-09-12 19:02:10.000000000 +0100
   9.782 -+++ ./include/linux/ethtool.h	2006-09-19 13:59:20.000000000 +0100
   9.783 -@@ -408,6 +408,8 @@ struct ethtool_ops {
   9.784 - #define ETHTOOL_GPERMADDR	0x00000020 /* Get permanent hardware address */
   9.785 - #define ETHTOOL_GUFO		0x00000021 /* Get UFO enable (ethtool_value) */
   9.786 - #define ETHTOOL_SUFO		0x00000022 /* Set UFO enable (ethtool_value) */
   9.787 -+#define ETHTOOL_GGSO		0x00000023 /* Get GSO enable (ethtool_value) */
   9.788 -+#define ETHTOOL_SGSO		0x00000024 /* Set GSO enable (ethtool_value) */
   9.789 - 
   9.790 - /* compatibility with older code */
   9.791 - #define SPARC_ETH_GSET		ETHTOOL_GSET
   9.792 -diff -pruN ../orig-linux-2.6.16.29/include/linux/netdevice.h ./include/linux/netdevice.h
   9.793 ---- ../orig-linux-2.6.16.29/include/linux/netdevice.h	2006-09-12 19:02:10.000000000 +0100
   9.794 -+++ ./include/linux/netdevice.h	2006-09-19 13:59:20.000000000 +0100
   9.795 -@@ -230,7 +230,8 @@ enum netdev_state_t
   9.796 - 	__LINK_STATE_SCHED,
   9.797 - 	__LINK_STATE_NOCARRIER,
   9.798 - 	__LINK_STATE_RX_SCHED,
   9.799 --	__LINK_STATE_LINKWATCH_PENDING
   9.800 -+	__LINK_STATE_LINKWATCH_PENDING,
   9.801 -+	__LINK_STATE_QDISC_RUNNING,
   9.802 - };
   9.803 - 
   9.804 - 
   9.805 -@@ -306,9 +307,17 @@ struct net_device
   9.806 - #define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */
   9.807 - #define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */
   9.808 - #define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */
   9.809 --#define NETIF_F_TSO		2048	/* Can offload TCP/IP segmentation */
   9.810 -+#define NETIF_F_GSO		2048	/* Enable software GSO. */
   9.811 - #define NETIF_F_LLTX		4096	/* LockLess TX */
   9.812 --#define NETIF_F_UFO             8192    /* Can offload UDP Large Send*/
   9.813 -+
   9.814 -+	/* Segmentation offload features */
   9.815 -+#define NETIF_F_GSO_SHIFT	16
   9.816 -+#define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
   9.817 -+#define NETIF_F_UFO		(SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
   9.818 -+#define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
   9.819 -+
   9.820 -+#define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
   9.821 -+#define NETIF_F_ALL_CSUM	(NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
   9.822 - 
   9.823 - 	struct net_device	*next_sched;
   9.824 - 
   9.825 -@@ -394,6 +403,9 @@ struct net_device
   9.826 - 	struct list_head	qdisc_list;
   9.827 - 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
   9.828 - 
   9.829 -+	/* Partially transmitted GSO packet. */
   9.830 -+	struct sk_buff		*gso_skb;
   9.831 -+
   9.832 - 	/* ingress path synchronizer */
   9.833 - 	spinlock_t		ingress_lock;
   9.834 - 	struct Qdisc		*qdisc_ingress;
   9.835 -@@ -402,7 +414,7 @@ struct net_device
   9.836 -  * One part is mostly used on xmit path (device)
   9.837 -  */
   9.838 - 	/* hard_start_xmit synchronizer */
   9.839 --	spinlock_t		xmit_lock ____cacheline_aligned_in_smp;
   9.840 -+	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
   9.841 - 	/* cpu id of processor entered to hard_start_xmit or -1,
   9.842 - 	   if nobody entered there.
   9.843 - 	 */
   9.844 -@@ -527,6 +539,8 @@ struct packet_type {
   9.845 - 					 struct net_device *,
   9.846 - 					 struct packet_type *,
   9.847 - 					 struct net_device *);
   9.848 -+	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
   9.849 -+						int features);
   9.850 - 	void			*af_packet_priv;
   9.851 - 	struct list_head	list;
   9.852 - };
   9.853 -@@ -693,7 +707,8 @@ extern int		dev_change_name(struct net_d
   9.854 - extern int		dev_set_mtu(struct net_device *, int);
   9.855 - extern int		dev_set_mac_address(struct net_device *,
   9.856 - 					    struct sockaddr *);
   9.857 --extern void		dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
   9.858 -+extern int		dev_hard_start_xmit(struct sk_buff *skb,
   9.859 -+					    struct net_device *dev);
   9.860 - 
   9.861 - extern void		dev_init(void);
   9.862 - 
   9.863 -@@ -900,11 +915,43 @@ static inline void __netif_rx_complete(s
   9.864 - 	clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
   9.865 - }
   9.866 - 
   9.867 -+static inline void netif_tx_lock(struct net_device *dev)
   9.868 -+{
   9.869 -+	spin_lock(&dev->_xmit_lock);
   9.870 -+	dev->xmit_lock_owner = smp_processor_id();
   9.871 -+}
   9.872 -+
   9.873 -+static inline void netif_tx_lock_bh(struct net_device *dev)
   9.874 -+{
   9.875 -+	spin_lock_bh(&dev->_xmit_lock);
   9.876 -+	dev->xmit_lock_owner = smp_processor_id();
   9.877 -+}
   9.878 -+
   9.879 -+static inline int netif_tx_trylock(struct net_device *dev)
   9.880 -+{
   9.881 -+	int err = spin_trylock(&dev->_xmit_lock);
   9.882 -+	if (!err)
   9.883 -+		dev->xmit_lock_owner = smp_processor_id();
   9.884 -+	return err;
   9.885 -+}
   9.886 -+
   9.887 -+static inline void netif_tx_unlock(struct net_device *dev)
   9.888 -+{
   9.889 -+	dev->xmit_lock_owner = -1;
   9.890 -+	spin_unlock(&dev->_xmit_lock);
   9.891 -+}
   9.892 -+
   9.893 -+static inline void netif_tx_unlock_bh(struct net_device *dev)
   9.894 -+{
   9.895 -+	dev->xmit_lock_owner = -1;
   9.896 -+	spin_unlock_bh(&dev->_xmit_lock);
   9.897 -+}
   9.898 -+
   9.899 - static inline void netif_tx_disable(struct net_device *dev)
   9.900 - {
   9.901 --	spin_lock_bh(&dev->xmit_lock);
   9.902 -+	netif_tx_lock_bh(dev);
   9.903 - 	netif_stop_queue(dev);
   9.904 --	spin_unlock_bh(&dev->xmit_lock);
   9.905 -+	netif_tx_unlock_bh(dev);
   9.906 - }
   9.907 - 
   9.908 - /* These functions live elsewhere (drivers/net/net_init.c, but related) */
   9.909 -@@ -932,6 +979,7 @@ extern int		netdev_max_backlog;
   9.910 - extern int		weight_p;
   9.911 - extern int		netdev_set_master(struct net_device *dev, struct net_device *master);
   9.912 - extern int skb_checksum_help(struct sk_buff *skb, int inward);
   9.913 -+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
   9.914 - #ifdef CONFIG_BUG
   9.915 - extern void netdev_rx_csum_fault(struct net_device *dev);
   9.916 - #else
   9.917 -@@ -951,6 +999,18 @@ extern void dev_seq_stop(struct seq_file
   9.918 - 
   9.919 - extern void linkwatch_run_queue(void);
   9.920 - 
   9.921 -+static inline int skb_gso_ok(struct sk_buff *skb, int features)
   9.922 -+{
   9.923 -+	int feature = skb_shinfo(skb)->gso_size ?
   9.924 -+		      skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
   9.925 -+	return (features & feature) == feature;
   9.926 -+}
   9.927 -+
   9.928 -+static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
   9.929 -+{
   9.930 -+	return !skb_gso_ok(skb, dev->features);
   9.931 -+}
   9.932 -+
   9.933 - #endif /* __KERNEL__ */
   9.934 - 
   9.935 - #endif	/* _LINUX_DEV_H */
   9.936 -diff -pruN ../orig-linux-2.6.16.29/include/linux/skbuff.h ./include/linux/skbuff.h
   9.937 ---- ../orig-linux-2.6.16.29/include/linux/skbuff.h	2006-09-12 19:02:10.000000000 +0100
   9.938 -+++ ./include/linux/skbuff.h	2006-09-19 13:59:20.000000000 +0100
   9.939 -@@ -134,9 +134,10 @@ struct skb_frag_struct {
   9.940 - struct skb_shared_info {
   9.941 - 	atomic_t	dataref;
   9.942 - 	unsigned short	nr_frags;
   9.943 --	unsigned short	tso_size;
   9.944 --	unsigned short	tso_segs;
   9.945 --	unsigned short  ufo_size;
   9.946 -+	unsigned short	gso_size;
   9.947 -+	/* Warning: this field is not always filled in (UFO)! */
   9.948 -+	unsigned short	gso_segs;
   9.949 -+	unsigned short  gso_type;
   9.950 - 	unsigned int    ip6_frag_id;
   9.951 - 	struct sk_buff	*frag_list;
   9.952 - 	skb_frag_t	frags[MAX_SKB_FRAGS];
   9.953 -@@ -168,6 +169,14 @@ enum {
   9.954 - 	SKB_FCLONE_CLONE,
   9.955 - };
   9.956 - 
   9.957 -+enum {
   9.958 -+	SKB_GSO_TCPV4 = 1 << 0,
   9.959 -+	SKB_GSO_UDPV4 = 1 << 1,
   9.960 -+
   9.961 -+	/* This indicates the skb is from an untrusted source. */
   9.962 -+	SKB_GSO_DODGY = 1 << 2,
   9.963 -+};
   9.964 -+
   9.965 - /** 
   9.966 -  *	struct sk_buff - socket buffer
   9.967 -  *	@next: Next buffer in list
   9.968 -@@ -1148,18 +1157,34 @@ static inline int skb_can_coalesce(struc
   9.969 - 	return 0;
   9.970 - }
   9.971 - 
   9.972 -+static inline int __skb_linearize(struct sk_buff *skb)
   9.973 -+{
   9.974 -+	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
   9.975 -+}
   9.976 -+
   9.977 - /**
   9.978 -  *	skb_linearize - convert paged skb to linear one
   9.979 -  *	@skb: buffer to linarize
   9.980 -- *	@gfp: allocation mode
   9.981 -  *
   9.982 -  *	If there is no free memory -ENOMEM is returned, otherwise zero
   9.983 -  *	is returned and the old skb data released.
   9.984 -  */
   9.985 --extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
   9.986 --static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
   9.987 -+static inline int skb_linearize(struct sk_buff *skb)
   9.988 -+{
   9.989 -+	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
   9.990 -+}
   9.991 -+
   9.992 -+/**
   9.993 -+ *	skb_linearize_cow - make sure skb is linear and writable
   9.994 -+ *	@skb: buffer to process
   9.995 -+ *
   9.996 -+ *	If there is no free memory -ENOMEM is returned, otherwise zero
   9.997 -+ *	is returned and the old skb data released.
   9.998 -+ */
   9.999 -+static inline int skb_linearize_cow(struct sk_buff *skb)
  9.1000 - {
  9.1001 --	return __skb_linearize(skb, gfp);
  9.1002 -+	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
  9.1003 -+	       __skb_linearize(skb) : 0;
  9.1004 - }
  9.1005 - 
  9.1006 - /**
  9.1007 -@@ -1254,6 +1279,7 @@ extern void	       skb_split(struct sk_b
  9.1008 - 				 struct sk_buff *skb1, const u32 len);
  9.1009 - 
  9.1010 - extern void	       skb_release_data(struct sk_buff *skb);
  9.1011 -+extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
  9.1012 - 
  9.1013 - static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
  9.1014 - 				       int len, void *buffer)
  9.1015 -diff -pruN ../orig-linux-2.6.16.29/include/net/pkt_sched.h ./include/net/pkt_sched.h
  9.1016 ---- ../orig-linux-2.6.16.29/include/net/pkt_sched.h	2006-09-12 19:02:10.000000000 +0100
  9.1017 -+++ ./include/net/pkt_sched.h	2006-09-19 13:59:20.000000000 +0100
  9.1018 -@@ -218,12 +218,13 @@ extern struct qdisc_rate_table *qdisc_ge
  9.1019 - 		struct rtattr *tab);
  9.1020 - extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
  9.1021 - 
  9.1022 --extern int qdisc_restart(struct net_device *dev);
  9.1023 -+extern void __qdisc_run(struct net_device *dev);
  9.1024 - 
  9.1025 - static inline void qdisc_run(struct net_device *dev)
  9.1026 - {
  9.1027 --	while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
  9.1028 --		/* NOTHING */;
  9.1029 -+	if (!netif_queue_stopped(dev) &&
  9.1030 -+	    !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
  9.1031 -+		__qdisc_run(dev);
  9.1032 - }
  9.1033 - 
  9.1034 - extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
  9.1035 -diff -pruN ../orig-linux-2.6.16.29/include/net/protocol.h ./include/net/protocol.h
  9.1036 ---- ../orig-linux-2.6.16.29/include/net/protocol.h	2006-09-12 19:02:10.000000000 +0100
  9.1037 -+++ ./include/net/protocol.h	2006-09-19 13:59:20.000000000 +0100
  9.1038 -@@ -37,6 +37,8 @@
  9.1039 - struct net_protocol {
  9.1040 - 	int			(*handler)(struct sk_buff *skb);
  9.1041 - 	void			(*err_handler)(struct sk_buff *skb, u32 info);
  9.1042 -+	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
  9.1043 -+					       int features);
  9.1044 - 	int			no_policy;
  9.1045 - };
  9.1046 - 
  9.1047 -diff -pruN ../orig-linux-2.6.16.29/include/net/sock.h ./include/net/sock.h
  9.1048 ---- ../orig-linux-2.6.16.29/include/net/sock.h	2006-09-12 19:02:10.000000000 +0100
  9.1049 -+++ ./include/net/sock.h	2006-09-19 13:59:20.000000000 +0100
  9.1050 -@@ -1064,9 +1064,13 @@ static inline void sk_setup_caps(struct 
  9.1051 - {
  9.1052 - 	__sk_dst_set(sk, dst);
  9.1053 - 	sk->sk_route_caps = dst->dev->features;
  9.1054 -+	if (sk->sk_route_caps & NETIF_F_GSO)
  9.1055 -+		sk->sk_route_caps |= NETIF_F_TSO;
  9.1056 - 	if (sk->sk_route_caps & NETIF_F_TSO) {
  9.1057 - 		if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
  9.1058 - 			sk->sk_route_caps &= ~NETIF_F_TSO;
  9.1059 -+		else 
  9.1060 -+			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
  9.1061 - 	}
  9.1062 - }
  9.1063 - 
  9.1064 -diff -pruN ../orig-linux-2.6.16.29/include/net/tcp.h ./include/net/tcp.h
  9.1065 ---- ../orig-linux-2.6.16.29/include/net/tcp.h	2006-09-12 19:02:10.000000000 +0100
  9.1066 -+++ ./include/net/tcp.h	2006-09-19 13:59:20.000000000 +0100
  9.1067 -@@ -552,13 +552,13 @@ struct tcp_skb_cb {
  9.1068 -  */
  9.1069 - static inline int tcp_skb_pcount(const struct sk_buff *skb)
  9.1070 - {
  9.1071 --	return skb_shinfo(skb)->tso_segs;
  9.1072 -+	return skb_shinfo(skb)->gso_segs;
  9.1073 - }
  9.1074 - 
  9.1075 - /* This is valid iff tcp_skb_pcount() > 1. */
  9.1076 - static inline int tcp_skb_mss(const struct sk_buff *skb)
  9.1077 - {
  9.1078 --	return skb_shinfo(skb)->tso_size;
  9.1079 -+	return skb_shinfo(skb)->gso_size;
  9.1080 - }
  9.1081 - 
  9.1082 - static inline void tcp_dec_pcount_approx(__u32 *count,
  9.1083 -@@ -1063,6 +1063,8 @@ extern struct request_sock_ops tcp_reque
  9.1084 - 
  9.1085 - extern int tcp_v4_destroy_sock(struct sock *sk);
  9.1086 - 
  9.1087 -+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
  9.1088 -+
  9.1089 - #ifdef CONFIG_PROC_FS
  9.1090 - extern int  tcp4_proc_init(void);
  9.1091 - extern void tcp4_proc_exit(void);
  9.1092 -diff -pruN ../orig-linux-2.6.16.29/net/atm/clip.c ./net/atm/clip.c
  9.1093 ---- ../orig-linux-2.6.16.29/net/atm/clip.c	2006-09-12 19:02:10.000000000 +0100
  9.1094 -+++ ./net/atm/clip.c	2006-09-19 13:59:20.000000000 +0100
  9.1095 -@@ -101,7 +101,7 @@ static void unlink_clip_vcc(struct clip_
  9.1096 - 		printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc);
  9.1097 - 		return;
  9.1098 - 	}
  9.1099 --	spin_lock_bh(&entry->neigh->dev->xmit_lock);	/* block clip_start_xmit() */
  9.1100 -+	netif_tx_lock_bh(entry->neigh->dev);	/* block clip_start_xmit() */
  9.1101 - 	entry->neigh->used = jiffies;
  9.1102 - 	for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
  9.1103 - 		if (*walk == clip_vcc) {
  9.1104 -@@ -125,7 +125,7 @@ static void unlink_clip_vcc(struct clip_
  9.1105 - 	printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
  9.1106 - 	  "0x%p)\n",entry,clip_vcc);
  9.1107 - out:
  9.1108 --	spin_unlock_bh(&entry->neigh->dev->xmit_lock);
  9.1109 -+	netif_tx_unlock_bh(entry->neigh->dev);
  9.1110 - }
  9.1111 - 
  9.1112 - /* The neighbour entry n->lock is held. */
  9.1113 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_device.c ./net/bridge/br_device.c
  9.1114 ---- ../orig-linux-2.6.16.29/net/bridge/br_device.c	2006-09-12 19:02:10.000000000 +0100
  9.1115 -+++ ./net/bridge/br_device.c	2006-09-19 13:59:20.000000000 +0100
  9.1116 -@@ -146,9 +146,9 @@ static int br_set_tx_csum(struct net_dev
  9.1117 - 	struct net_bridge *br = netdev_priv(dev);
  9.1118 - 
  9.1119 - 	if (data)
  9.1120 --		br->feature_mask |= NETIF_F_IP_CSUM;
  9.1121 -+		br->feature_mask |= NETIF_F_NO_CSUM;
  9.1122 - 	else
  9.1123 --		br->feature_mask &= ~NETIF_F_IP_CSUM;
  9.1124 -+		br->feature_mask &= ~NETIF_F_ALL_CSUM;
  9.1125 - 
  9.1126 - 	br_features_recompute(br);
  9.1127 - 	return 0;
  9.1128 -@@ -185,6 +185,6 @@ void br_dev_setup(struct net_device *dev
  9.1129 - 	dev->set_mac_address = br_set_mac_address;
  9.1130 - 	dev->priv_flags = IFF_EBRIDGE;
  9.1131 - 
  9.1132 -- 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
  9.1133 -- 		| NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM;
  9.1134 -+ 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
  9.1135 -+ 			NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
  9.1136 - }
  9.1137 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_forward.c ./net/bridge/br_forward.c
  9.1138 ---- ../orig-linux-2.6.16.29/net/bridge/br_forward.c	2006-09-12 19:02:10.000000000 +0100
  9.1139 -+++ ./net/bridge/br_forward.c	2006-09-19 13:59:20.000000000 +0100
  9.1140 -@@ -32,7 +32,7 @@ static inline int should_deliver(const s
  9.1141 - int br_dev_queue_push_xmit(struct sk_buff *skb)
  9.1142 - {
  9.1143 - 	/* drop mtu oversized packets except tso */
  9.1144 --	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
  9.1145 -+	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
  9.1146 - 		kfree_skb(skb);
  9.1147 - 	else {
  9.1148 - #ifdef CONFIG_BRIDGE_NETFILTER
  9.1149 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_if.c ./net/bridge/br_if.c
  9.1150 ---- ../orig-linux-2.6.16.29/net/bridge/br_if.c	2006-09-12 19:02:10.000000000 +0100
  9.1151 -+++ ./net/bridge/br_if.c	2006-09-19 13:59:20.000000000 +0100
  9.1152 -@@ -385,17 +385,28 @@ void br_features_recompute(struct net_br
  9.1153 - 	struct net_bridge_port *p;
  9.1154 - 	unsigned long features, checksum;
  9.1155 - 
  9.1156 --	features = br->feature_mask &~ NETIF_F_IP_CSUM;
  9.1157 --	checksum = br->feature_mask & NETIF_F_IP_CSUM;
  9.1158 -+	checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
  9.1159 -+	features = br->feature_mask & ~NETIF_F_ALL_CSUM;
  9.1160 - 
  9.1161 - 	list_for_each_entry(p, &br->port_list, list) {
  9.1162 --		if (!(p->dev->features 
  9.1163 --		      & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)))
  9.1164 -+		unsigned long feature = p->dev->features;
  9.1165 -+
  9.1166 -+		if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
  9.1167 -+			checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
  9.1168 -+		if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
  9.1169 -+			checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
  9.1170 -+		if (!(feature & NETIF_F_IP_CSUM))
  9.1171 - 			checksum = 0;
  9.1172 --		features &= p->dev->features;
  9.1173 -+
  9.1174 -+		if (feature & NETIF_F_GSO)
  9.1175 -+			feature |= NETIF_F_TSO;
  9.1176 -+		feature |= NETIF_F_GSO;
  9.1177 -+
  9.1178 -+		features &= feature;
  9.1179 - 	}
  9.1180 - 
  9.1181 --	br->dev->features = features | checksum | NETIF_F_LLTX;
  9.1182 -+	br->dev->features = features | checksum | NETIF_F_LLTX |
  9.1183 -+			    NETIF_F_GSO_ROBUST;
  9.1184 - }
  9.1185 - 
  9.1186 - /* called with RTNL */
  9.1187 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c ./net/bridge/br_netfilter.c
  9.1188 ---- ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c	2006-09-12 19:02:10.000000000 +0100
  9.1189 -+++ ./net/bridge/br_netfilter.c	2006-09-19 13:59:20.000000000 +0100
  9.1190 -@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
  9.1191 - {
  9.1192 - 	if (skb->protocol == htons(ETH_P_IP) &&
  9.1193 - 	    skb->len > skb->dev->mtu &&
  9.1194 --	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
  9.1195 -+	    !skb_shinfo(skb)->gso_size)
  9.1196 - 		return ip_fragment(skb, br_dev_queue_push_xmit);
  9.1197 - 	else
  9.1198 - 		return br_dev_queue_push_xmit(skb);
  9.1199 -diff -pruN ../orig-linux-2.6.16.29/net/core/dev.c ./net/core/dev.c
  9.1200 ---- ../orig-linux-2.6.16.29/net/core/dev.c	2006-09-12 19:02:10.000000000 +0100
  9.1201 -+++ ./net/core/dev.c	2006-09-19 13:59:20.000000000 +0100
  9.1202 -@@ -115,6 +115,7 @@
  9.1203 - #include <net/iw_handler.h>
  9.1204 - #endif	/* CONFIG_NET_RADIO */
  9.1205 - #include <asm/current.h>
  9.1206 -+#include <linux/err.h>
  9.1207 - 
  9.1208 - /*
  9.1209 -  *	The list of packet types we will receive (as opposed to discard)
  9.1210 -@@ -1032,7 +1033,7 @@ static inline void net_timestamp(struct 
  9.1211 -  *	taps currently in use.
  9.1212 -  */
  9.1213 - 
  9.1214 --void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  9.1215 -+static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  9.1216 - {
  9.1217 - 	struct packet_type *ptype;
  9.1218 - 
  9.1219 -@@ -1106,6 +1107,45 @@ out:	
  9.1220 - 	return ret;
  9.1221 - }
  9.1222 - 
  9.1223 -+/**
  9.1224 -+ *	skb_gso_segment - Perform segmentation on skb.
  9.1225 -+ *	@skb: buffer to segment
  9.1226 -+ *	@features: features for the output path (see dev->features)
  9.1227 -+ *
  9.1228 -+ *	This function segments the given skb and returns a list of segments.
  9.1229 -+ *
  9.1230 -+ *	It may return NULL if the skb requires no segmentation.  This is
  9.1231 -+ *	only possible when GSO is used for verifying header integrity.
  9.1232 -+ */
  9.1233 -+struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
  9.1234 -+{
  9.1235 -+	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  9.1236 -+	struct packet_type *ptype;
  9.1237 -+	int type = skb->protocol;
  9.1238 -+
  9.1239 -+	BUG_ON(skb_shinfo(skb)->frag_list);
  9.1240 -+	BUG_ON(skb->ip_summed != CHECKSUM_HW);
  9.1241 -+
  9.1242 -+	skb->mac.raw = skb->data;
  9.1243 -+	skb->mac_len = skb->nh.raw - skb->data;
  9.1244 -+	__skb_pull(skb, skb->mac_len);
  9.1245 -+
  9.1246 -+	rcu_read_lock();
  9.1247 -+	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
  9.1248 -+		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
  9.1249 -+			segs = ptype->gso_segment(skb, features);
  9.1250 -+			break;
  9.1251 -+		}
  9.1252 -+	}
  9.1253 -+	rcu_read_unlock();
  9.1254 -+
  9.1255 -+	__skb_push(skb, skb->data - skb->mac.raw);
  9.1256 -+
  9.1257 -+	return segs;
  9.1258 -+}
  9.1259 -+
  9.1260 -+EXPORT_SYMBOL(skb_gso_segment);
  9.1261 -+
  9.1262 - /* Take action when hardware reception checksum errors are detected. */
  9.1263 - #ifdef CONFIG_BUG
  9.1264 - void netdev_rx_csum_fault(struct net_device *dev)
  9.1265 -@@ -1142,75 +1182,108 @@ static inline int illegal_highdma(struct
  9.1266 - #define illegal_highdma(dev, skb)	(0)
  9.1267 - #endif
  9.1268 - 
  9.1269 --/* Keep head the same: replace data */
  9.1270 --int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
  9.1271 -+struct dev_gso_cb {
  9.1272 -+	void (*destructor)(struct sk_buff *skb);
  9.1273 -+};
  9.1274 -+
  9.1275 -+#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
  9.1276 -+
  9.1277 -+static void dev_gso_skb_destructor(struct sk_buff *skb)
  9.1278 -+{
  9.1279 -+	struct dev_gso_cb *cb;
  9.1280 -+
  9.1281 -+	do {
  9.1282 -+		struct sk_buff *nskb = skb->next;
  9.1283 -+
  9.1284 -+		skb->next = nskb->next;
  9.1285 -+		nskb->next = NULL;
  9.1286 -+		kfree_skb(nskb);
  9.1287 -+	} while (skb->next);
  9.1288 -+
  9.1289 -+	cb = DEV_GSO_CB(skb);
  9.1290 -+	if (cb->destructor)
  9.1291 -+		cb->destructor(skb);
  9.1292 -+}
  9.1293 -+
  9.1294 -+/**
  9.1295 -+ *	dev_gso_segment - Perform emulated hardware segmentation on skb.
  9.1296 -+ *	@skb: buffer to segment
  9.1297 -+ *
  9.1298 -+ *	This function segments the given skb and stores the list of segments
  9.1299 -+ *	in skb->next.
  9.1300 -+ */
  9.1301 -+static int dev_gso_segment(struct sk_buff *skb)
  9.1302 - {
  9.1303 --	unsigned int size;
  9.1304 --	u8 *data;
  9.1305 --	long offset;
  9.1306 --	struct skb_shared_info *ninfo;
  9.1307 --	int headerlen = skb->data - skb->head;
  9.1308 --	int expand = (skb->tail + skb->data_len) - skb->end;
  9.1309 --
  9.1310 --	if (skb_shared(skb))
  9.1311 --		BUG();
  9.1312 --
  9.1313 --	if (expand <= 0)
  9.1314 --		expand = 0;
  9.1315 --
  9.1316 --	size = skb->end - skb->head + expand;
  9.1317 --	size = SKB_DATA_ALIGN(size);
  9.1318 --	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
  9.1319 --	if (!data)
  9.1320 --		return -ENOMEM;
  9.1321 --
  9.1322 --	/* Copy entire thing */
  9.1323 --	if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
  9.1324 --		BUG();
  9.1325 --
  9.1326 --	/* Set up shinfo */
  9.1327 --	ninfo = (struct skb_shared_info*)(data + size);
  9.1328 --	atomic_set(&ninfo->dataref, 1);
  9.1329 --	ninfo->tso_size = skb_shinfo(skb)->tso_size;
  9.1330 --	ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
  9.1331 --	ninfo->nr_frags = 0;
  9.1332 --	ninfo->frag_list = NULL;
  9.1333 --
  9.1334 --	/* Offset between the two in bytes */
  9.1335 --	offset = data - skb->head;
  9.1336 --
  9.1337 --	/* Free old data. */
  9.1338 --	skb_release_data(skb);
  9.1339 --
  9.1340 --	skb->head = data;
  9.1341 --	skb->end  = data + size;
  9.1342 --
  9.1343 --	/* Set up new pointers */
  9.1344 --	skb->h.raw   += offset;
  9.1345 --	skb->nh.raw  += offset;
  9.1346 --	skb->mac.raw += offset;
  9.1347 --	skb->tail    += offset;
  9.1348 --	skb->data    += offset;
  9.1349 -+	struct net_device *dev = skb->dev;
  9.1350 -+	struct sk_buff *segs;
  9.1351 -+	int features = dev->features & ~(illegal_highdma(dev, skb) ?
  9.1352 -+					 NETIF_F_SG : 0);
  9.1353 -+
  9.1354 -+	segs = skb_gso_segment(skb, features);
  9.1355 -+
  9.1356 -+	/* Verifying header integrity only. */
  9.1357 -+	if (!segs)
  9.1358 -+		return 0;
  9.1359 - 
  9.1360 --	/* We are no longer a clone, even if we were. */
  9.1361 --	skb->cloned    = 0;
  9.1362 -+	if (unlikely(IS_ERR(segs)))
  9.1363 -+		return PTR_ERR(segs);
  9.1364 -+
  9.1365 -+	skb->next = segs;
  9.1366 -+	DEV_GSO_CB(skb)->destructor = skb->destructor;
  9.1367 -+	skb->destructor = dev_gso_skb_destructor;
  9.1368 - 
  9.1369 --	skb->tail     += skb->data_len;
  9.1370 --	skb->data_len  = 0;
  9.1371 -+	return 0;
  9.1372 -+}
  9.1373 -+
  9.1374 -+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  9.1375 -+{
  9.1376 -+	if (likely(!skb->next)) {
  9.1377 -+		if (netdev_nit)
  9.1378 -+			dev_queue_xmit_nit(skb, dev);
  9.1379 -+
  9.1380 -+		if (netif_needs_gso(dev, skb)) {
  9.1381 -+			if (unlikely(dev_gso_segment(skb)))
  9.1382 -+				goto out_kfree_skb;
  9.1383 -+			if (skb->next)
  9.1384 -+				goto gso;
  9.1385 -+		}
  9.1386 -+
  9.1387 -+		return dev->hard_start_xmit(skb, dev);
  9.1388 -+	}
  9.1389 -+
  9.1390 -+gso:
  9.1391 -+	do {
  9.1392 -+		struct sk_buff *nskb = skb->next;
  9.1393 -+		int rc;
  9.1394 -+
  9.1395 -+		skb->next = nskb->next;
  9.1396 -+		nskb->next = NULL;
  9.1397 -+		rc = dev->hard_start_xmit(nskb, dev);
  9.1398 -+		if (unlikely(rc)) {
  9.1399 -+			nskb->next = skb->next;
  9.1400 -+			skb->next = nskb;
  9.1401 -+			return rc;
  9.1402 -+		}
  9.1403 -+		if (unlikely(netif_queue_stopped(dev) && skb->next))
  9.1404 -+			return NETDEV_TX_BUSY;
  9.1405 -+	} while (skb->next);
  9.1406 -+	
  9.1407 -+	skb->destructor = DEV_GSO_CB(skb)->destructor;
  9.1408 -+
  9.1409 -+out_kfree_skb:
  9.1410 -+	kfree_skb(skb);
  9.1411 - 	return 0;
  9.1412 - }
  9.1413 - 
  9.1414 - #define HARD_TX_LOCK(dev, cpu) {			\
  9.1415 - 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
  9.1416 --		spin_lock(&dev->xmit_lock);		\
  9.1417 --		dev->xmit_lock_owner = cpu;		\
  9.1418 -+		netif_tx_lock(dev);			\
  9.1419 - 	}						\
  9.1420 - }
  9.1421 - 
  9.1422 - #define HARD_TX_UNLOCK(dev) {				\
  9.1423 - 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
  9.1424 --		dev->xmit_lock_owner = -1;		\
  9.1425 --		spin_unlock(&dev->xmit_lock);		\
  9.1426 -+		netif_tx_unlock(dev);			\
  9.1427 - 	}						\
  9.1428 - }
  9.1429 - 
  9.1430 -@@ -1246,9 +1319,13 @@ int dev_queue_xmit(struct sk_buff *skb)
  9.1431 - 	struct Qdisc *q;
  9.1432 - 	int rc = -ENOMEM;
  9.1433 - 
  9.1434 -+	/* GSO will handle the following emulations directly. */
  9.1435 -+	if (netif_needs_gso(dev, skb))
  9.1436 -+		goto gso;
  9.1437 -+
  9.1438 - 	if (skb_shinfo(skb)->frag_list &&
  9.1439 - 	    !(dev->features & NETIF_F_FRAGLIST) &&
  9.1440 --	    __skb_linearize(skb, GFP_ATOMIC))
  9.1441 -+	    __skb_linearize(skb))
  9.1442 - 		goto out_kfree_skb;
  9.1443 - 
  9.1444 - 	/* Fragmented skb is linearized if device does not support SG,
  9.1445 -@@ -1257,25 +1334,26 @@ int dev_queue_xmit(struct sk_buff *skb)
  9.1446 - 	 */
  9.1447 - 	if (skb_shinfo(skb)->nr_frags &&
  9.1448 - 	    (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
  9.1449 --	    __skb_linearize(skb, GFP_ATOMIC))
  9.1450 -+	    __skb_linearize(skb))
  9.1451 - 		goto out_kfree_skb;
  9.1452 - 
  9.1453 - 	/* If packet is not checksummed and device does not support
  9.1454 - 	 * checksumming for this protocol, complete checksumming here.
  9.1455 - 	 */
  9.1456 - 	if (skb->ip_summed == CHECKSUM_HW &&
  9.1457 --	    (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
  9.1458 -+	    (!(dev->features & NETIF_F_GEN_CSUM) &&
  9.1459 - 	     (!(dev->features & NETIF_F_IP_CSUM) ||
  9.1460 - 	      skb->protocol != htons(ETH_P_IP))))
  9.1461 - 	      	if (skb_checksum_help(skb, 0))
  9.1462 - 	      		goto out_kfree_skb;
  9.1463 - 
  9.1464 -+gso:
  9.1465 - 	spin_lock_prefetch(&dev->queue_lock);
  9.1466 - 
  9.1467 - 	/* Disable soft irqs for various locks below. Also 
  9.1468 - 	 * stops preemption for RCU. 
  9.1469 - 	 */
  9.1470 --	local_bh_disable(); 
  9.1471 -+	rcu_read_lock_bh(); 
  9.1472 - 
  9.1473 - 	/* Updates of qdisc are serialized by queue_lock. 
  9.1474 - 	 * The struct Qdisc which is pointed to by qdisc is now a 
  9.1475 -@@ -1309,8 +1387,8 @@ int dev_queue_xmit(struct sk_buff *skb)
  9.1476 - 	/* The device has no queue. Common case for software devices:
  9.1477 - 	   loopback, all the sorts of tunnels...
  9.1478 - 
  9.1479 --	   Really, it is unlikely that xmit_lock protection is necessary here.
  9.1480 --	   (f.e. loopback and IP tunnels are clean ignoring statistics
  9.1481 -+	   Really, it is unlikely that netif_tx_lock protection is necessary
  9.1482 -+	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
  9.1483 - 	   counters.)
  9.1484 - 	   However, it is possible, that they rely on protection
  9.1485 - 	   made by us here.
  9.1486 -@@ -1326,11 +1404,8 @@ int dev_queue_xmit(struct sk_buff *skb)
  9.1487 - 			HARD_TX_LOCK(dev, cpu);
  9.1488 - 
  9.1489 - 			if (!netif_queue_stopped(dev)) {
  9.1490 --				if (netdev_nit)
  9.1491 --					dev_queue_xmit_nit(skb, dev);
  9.1492 --
  9.1493 - 				rc = 0;
  9.1494 --				if (!dev->hard_start_xmit(skb, dev)) {
  9.1495 -+				if (!dev_hard_start_xmit(skb, dev)) {
  9.1496 - 					HARD_TX_UNLOCK(dev);
  9.1497 - 					goto out;
  9.1498 - 				}
  9.1499 -@@ -1349,13 +1424,13 @@ int dev_queue_xmit(struct sk_buff *skb)
  9.1500 - 	}
  9.1501 - 
  9.1502 - 	rc = -ENETDOWN;
  9.1503 --	local_bh_enable();
  9.1504 -+	rcu_read_unlock_bh();
  9.1505 - 
  9.1506 - out_kfree_skb:
  9.1507 - 	kfree_skb(skb);
  9.1508 - 	return rc;
  9.1509 - out:
  9.1510 --	local_bh_enable();
  9.1511 -+	rcu_read_unlock_bh();
  9.1512 - 	return rc;
  9.1513 - }
  9.1514 - 
  9.1515 -@@ -2670,7 +2745,7 @@ int register_netdevice(struct net_device
  9.1516 - 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
  9.1517 - 
  9.1518 - 	spin_lock_init(&dev->queue_lock);
  9.1519 --	spin_lock_init(&dev->xmit_lock);
  9.1520 -+	spin_lock_init(&dev->_xmit_lock);
  9.1521 - 	dev->xmit_lock_owner = -1;
  9.1522 - #ifdef CONFIG_NET_CLS_ACT
  9.1523 - 	spin_lock_init(&dev->ingress_lock);
  9.1524 -@@ -2714,9 +2789,7 @@ int register_netdevice(struct net_device
  9.1525 - 
  9.1526 - 	/* Fix illegal SG+CSUM combinations. */
  9.1527 - 	if ((dev->features & NETIF_F_SG) &&
  9.1528 --	    !(dev->features & (NETIF_F_IP_CSUM |
  9.1529 --			       NETIF_F_NO_CSUM |
  9.1530 --			       NETIF_F_HW_CSUM))) {
  9.1531 -+	    !(dev->features & NETIF_F_ALL_CSUM)) {
  9.1532 - 		printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
  9.1533 - 		       dev->name);
  9.1534 - 		dev->features &= ~NETIF_F_SG;
  9.1535 -@@ -3268,7 +3341,6 @@ subsys_initcall(net_dev_init);
  9.1536 - EXPORT_SYMBOL(__dev_get_by_index);
  9.1537 - EXPORT_SYMBOL(__dev_get_by_name);
  9.1538 - EXPORT_SYMBOL(__dev_remove_pack);
  9.1539 --EXPORT_SYMBOL(__skb_linearize);
  9.1540 - EXPORT_SYMBOL(dev_valid_name);
  9.1541 - EXPORT_SYMBOL(dev_add_pack);
  9.1542 - EXPORT_SYMBOL(dev_alloc_name);
  9.1543 -diff -pruN ../orig-linux-2.6.16.29/net/core/dev_mcast.c ./net/core/dev_mcast.c
  9.1544 ---- ../orig-linux-2.6.16.29/net/core/dev_mcast.c	2006-09-12 19:02:10.000000000 +0100
  9.1545 -+++ ./net/core/dev_mcast.c	2006-09-19 13:59:20.000000000 +0100
  9.1546 -@@ -62,7 +62,7 @@
  9.1547 -  *	Device mc lists are changed by bh at least if IPv6 is enabled,
  9.1548 -  *	so that it must be bh protected.
  9.1549 -  *
  9.1550 -- *	We block accesses to device mc filters with dev->xmit_lock.
  9.1551 -+ *	We block accesses to device mc filters with netif_tx_lock.
  9.1552 -  */
  9.1553 - 
  9.1554 - /*
  9.1555 -@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_d
  9.1556 - 
  9.1557 - void dev_mc_upload(struct net_device *dev)
  9.1558 - {
  9.1559 --	spin_lock_bh(&dev->xmit_lock);
  9.1560 -+	netif_tx_lock_bh(dev);
  9.1561 - 	__dev_mc_upload(dev);
  9.1562 --	spin_unlock_bh(&dev->xmit_lock);
  9.1563 -+	netif_tx_unlock_bh(dev);
  9.1564 - }
  9.1565 - 
  9.1566 - /*
  9.1567 -@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev
  9.1568 - 	int err = 0;
  9.1569 - 	struct dev_mc_list *dmi, **dmip;
  9.1570 - 
  9.1571 --	spin_lock_bh(&dev->xmit_lock);
  9.1572 -+	netif_tx_lock_bh(dev);
  9.1573 - 
  9.1574 - 	for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
  9.1575 - 		/*
  9.1576 -@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev
  9.1577 - 			 */
  9.1578 - 			__dev_mc_upload(dev);
  9.1579 - 			
  9.1580 --			spin_unlock_bh(&dev->xmit_lock);
  9.1581 -+			netif_tx_unlock_bh(dev);
  9.1582 - 			return 0;
  9.1583 - 		}
  9.1584 - 	}
  9.1585 - 	err = -ENOENT;
  9.1586 - done:
  9.1587 --	spin_unlock_bh(&dev->xmit_lock);
  9.1588 -+	netif_tx_unlock_bh(dev);
  9.1589 - 	return err;
  9.1590 - }
  9.1591 - 
  9.1592 -@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, v
  9.1593 - 
  9.1594 - 	dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
  9.1595 - 
  9.1596 --	spin_lock_bh(&dev->xmit_lock);
  9.1597 -+	netif_tx_lock_bh(dev);
  9.1598 - 	for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
  9.1599 - 		if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
  9.1600 - 		    dmi->dmi_addrlen == alen) {
  9.1601 -@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, v
  9.1602 - 	}
  9.1603 - 
  9.1604 - 	if ((dmi = dmi1) == NULL) {
  9.1605 --		spin_unlock_bh(&dev->xmit_lock);
  9.1606 -+		netif_tx_unlock_bh(dev);
  9.1607 - 		return -ENOMEM;
  9.1608 - 	}
  9.1609 - 	memcpy(dmi->dmi_addr, addr, alen);
  9.1610 -@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, v
  9.1611 - 
  9.1612 - 	__dev_mc_upload(dev);
  9.1613 - 	
  9.1614 --	spin_unlock_bh(&dev->xmit_lock);
  9.1615 -+	netif_tx_unlock_bh(dev);
  9.1616 - 	return 0;
  9.1617 - 
  9.1618 - done:
  9.1619 --	spin_unlock_bh(&dev->xmit_lock);
  9.1620 -+	netif_tx_unlock_bh(dev);
  9.1621 - 	kfree(dmi1);
  9.1622 - 	return err;
  9.1623 - }
  9.1624 -@@ -204,7 +204,7 @@ done:
  9.1625 - 
  9.1626 - void dev_mc_discard(struct net_device *dev)
  9.1627 - {
  9.1628 --	spin_lock_bh(&dev->xmit_lock);
  9.1629 -+	netif_tx_lock_bh(dev);
  9.1630 - 	
  9.1631 - 	while (dev->mc_list != NULL) {
  9.1632 - 		struct dev_mc_list *tmp = dev->mc_list;
  9.1633 -@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *d
  9.1634 - 	}
  9.1635 - 	dev->mc_count = 0;
  9.1636 - 
  9.1637 --	spin_unlock_bh(&dev->xmit_lock);
  9.1638 -+	netif_tx_unlock_bh(dev);
  9.1639 - }
  9.1640 - 
  9.1641 - #ifdef CONFIG_PROC_FS
  9.1642 -@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_fi
  9.1643 - 	struct dev_mc_list *m;
  9.1644 - 	struct net_device *dev = v;
  9.1645 - 
  9.1646 --	spin_lock_bh(&dev->xmit_lock);
  9.1647 -+	netif_tx_lock_bh(dev);
  9.1648 - 	for (m = dev->mc_list; m; m = m->next) {
  9.1649 - 		int i;
  9.1650 - 
  9.1651 -@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_fi
  9.1652 - 
  9.1653 - 		seq_putc(seq, '\n');
  9.1654 - 	}
  9.1655 --	spin_unlock_bh(&dev->xmit_lock);
  9.1656 -+	netif_tx_unlock_bh(dev);
  9.1657 - 	return 0;
  9.1658 - }
  9.1659 - 
  9.1660 -diff -pruN ../orig-linux-2.6.16.29/net/core/ethtool.c ./net/core/ethtool.c
  9.1661 ---- ../orig-linux-2.6.16.29/net/core/ethtool.c	2006-09-12 19:02:10.000000000 +0100
  9.1662 -+++ ./net/core/ethtool.c	2006-09-19 13:59:20.000000000 +0100
  9.1663 -@@ -30,7 +30,7 @@ u32 ethtool_op_get_link(struct net_devic
  9.1664 - 
  9.1665 - u32 ethtool_op_get_tx_csum(struct net_device *dev)
  9.1666 - {
  9.1667 --	return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0;
  9.1668 -+	return (dev->features & NETIF_F_ALL_CSUM) != 0;
  9.1669 - }
  9.1670 - 
  9.1671 - int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
  9.1672 -@@ -551,9 +551,7 @@ static int ethtool_set_sg(struct net_dev
  9.1673 - 		return -EFAULT;
  9.1674 - 
  9.1675 - 	if (edata.data && 
  9.1676 --	    !(dev->features & (NETIF_F_IP_CSUM |
  9.1677 --			       NETIF_F_NO_CSUM |
  9.1678 --			       NETIF_F_HW_CSUM)))
  9.1679 -+	    !(dev->features & NETIF_F_ALL_CSUM))
  9.1680 - 		return -EINVAL;
  9.1681 - 
  9.1682 - 	return __ethtool_set_sg(dev, edata.data);
  9.1683 -@@ -561,7 +559,7 @@ static int ethtool_set_sg(struct net_dev
  9.1684 - 
  9.1685 - static int ethtool_get_tso(struct net_device *dev, char __user *useraddr)
  9.1686 - {
  9.1687 --	struct ethtool_value edata = { ETHTOOL_GTSO };
  9.1688 -+	struct ethtool_value edata = { ETHTOOL_GUFO };
  9.1689 - 
  9.1690 - 	if (!dev->ethtool_ops->get_tso)
  9.1691 - 		return -EOPNOTSUPP;
  9.1692 -@@ -616,6 +614,29 @@ static int ethtool_set_ufo(struct net_de
  9.1693 - 	return dev->ethtool_ops->set_ufo(dev, edata.data);
  9.1694 - }
  9.1695 - 
  9.1696 -+static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
  9.1697 -+{
  9.1698 -+	struct ethtool_value edata = { ETHTOOL_GGSO };
  9.1699 -+
  9.1700 -+	edata.data = dev->features & NETIF_F_GSO;
  9.1701 -+	if (copy_to_user(useraddr, &edata, sizeof(edata)))
  9.1702 -+		 return -EFAULT;
  9.1703 -+	return 0;
  9.1704 -+}
  9.1705 -+
  9.1706 -+static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
  9.1707 -+{
  9.1708 -+	struct ethtool_value edata;
  9.1709 -+
  9.1710 -+	if (copy_from_user(&edata, useraddr, sizeof(edata)))
  9.1711 -+		return -EFAULT;
  9.1712 -+	if (edata.data)
  9.1713 -+		dev->features |= NETIF_F_GSO;
  9.1714 -+	else
  9.1715 -+		dev->features &= ~NETIF_F_GSO;
  9.1716 -+	return 0;
  9.1717 -+}
  9.1718 -+
  9.1719 - static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
  9.1720 - {
  9.1721 - 	struct ethtool_test test;
  9.1722 -@@ -907,6 +928,12 @@ int dev_ethtool(struct ifreq *ifr)
  9.1723 - 	case ETHTOOL_SUFO:
  9.1724 - 		rc = ethtool_set_ufo(dev, useraddr);
  9.1725 - 		break;
  9.1726 -+	case ETHTOOL_GGSO:
  9.1727 -+		rc = ethtool_get_gso(dev, useraddr);
  9.1728 -+		break;
  9.1729 -+	case ETHTOOL_SGSO:
  9.1730 -+		rc = ethtool_set_gso(dev, useraddr);
  9.1731 -+		break;
  9.1732 - 	default:
  9.1733 - 		rc =  -EOPNOTSUPP;
  9.1734 - 	}
  9.1735 -diff -pruN ../orig-linux-2.6.16.29/net/core/netpoll.c ./net/core/netpoll.c
  9.1736 ---- ../orig-linux-2.6.16.29/net/core/netpoll.c	2006-09-12 19:02:10.000000000 +0100
  9.1737 -+++ ./net/core/netpoll.c	2006-09-19 13:59:20.000000000 +0100
  9.1738 -@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netp
  9.1739 - 
  9.1740 - 	do {
  9.1741 - 		npinfo->tries--;
  9.1742 --		spin_lock(&np->dev->xmit_lock);
  9.1743 --		np->dev->xmit_lock_owner = smp_processor_id();
  9.1744 -+		netif_tx_lock(np->dev);
  9.1745 - 
  9.1746 - 		/*
  9.1747 - 		 * network drivers do not expect to be called if the queue is
  9.1748 - 		 * stopped.
  9.1749 - 		 */
  9.1750 - 		if (netif_queue_stopped(np->dev)) {
  9.1751 --			np->dev->xmit_lock_owner = -1;
  9.1752 --			spin_unlock(&np->dev->xmit_lock);
  9.1753 -+			netif_tx_unlock(np->dev);
  9.1754 - 			netpoll_poll(np);
  9.1755 - 			udelay(50);
  9.1756 - 			continue;
  9.1757 - 		}
  9.1758 - 
  9.1759 - 		status = np->dev->hard_start_xmit(skb, np->dev);
  9.1760 --		np->dev->xmit_lock_owner = -1;
  9.1761 --		spin_unlock(&np->dev->xmit_lock);
  9.1762 -+		netif_tx_unlock(np->dev);
  9.1763 - 
  9.1764 - 		/* success */
  9.1765 - 		if(!status) {
  9.1766 -diff -pruN ../orig-linux-2.6.16.29/net/core/pktgen.c ./net/core/pktgen.c
  9.1767 ---- ../orig-linux-2.6.16.29/net/core/pktgen.c	2006-09-12 19:02:10.000000000 +0100
  9.1768 -+++ ./net/core/pktgen.c	2006-09-19 13:59:20.000000000 +0100
  9.1769 -@@ -2586,7 +2586,7 @@ static __inline__ void pktgen_xmit(struc
  9.1770 - 		}
  9.1771 - 	}
  9.1772 - 	
  9.1773 --	spin_lock_bh(&odev->xmit_lock);
  9.1774 -+	netif_tx_lock_bh(odev);
  9.1775 - 	if (!netif_queue_stopped(odev)) {
  9.1776 - 
  9.1777 - 		atomic_inc(&(pkt_dev->skb->users));
  9.1778 -@@ -2631,7 +2631,7 @@ retry_now:
  9.1779 - 		pkt_dev->next_tx_ns = 0;
  9.1780 -         }
  9.1781 - 
  9.1782 --	spin_unlock_bh(&odev->xmit_lock);
  9.1783 -+	netif_tx_unlock_bh(odev);
  9.1784 - 	
  9.1785 - 	/* If pkt_dev->count is zero, then run forever */
  9.1786 - 	if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
  9.1787 -diff -pruN ../orig-linux-2.6.16.29/net/core/skbuff.c ./net/core/skbuff.c
  9.1788 ---- ../orig-linux-2.6.16.29/net/core/skbuff.c	2006-09-12 19:02:10.000000000 +0100
  9.1789 -+++ ./net/core/skbuff.c	2006-09-19 13:59:20.000000000 +0100
  9.1790 -@@ -164,9 +164,9 @@ struct sk_buff *__alloc_skb(unsigned int
  9.1791 - 	shinfo = skb_shinfo(skb);
  9.1792 - 	atomic_set(&shinfo->dataref, 1);
  9.1793 - 	shinfo->nr_frags  = 0;
  9.1794 --	shinfo->tso_size = 0;
  9.1795 --	shinfo->tso_segs = 0;
  9.1796 --	shinfo->ufo_size = 0;
  9.1797 -+	shinfo->gso_size = 0;
  9.1798 -+	shinfo->gso_segs = 0;
  9.1799 -+	shinfo->gso_type = 0;
  9.1800 - 	shinfo->ip6_frag_id = 0;
  9.1801 - 	shinfo->frag_list = NULL;
  9.1802 - 
  9.1803 -@@ -230,8 +230,9 @@ struct sk_buff *alloc_skb_from_cache(kme
  9.1804 - 
  9.1805 - 	atomic_set(&(skb_shinfo(skb)->dataref), 1);
  9.1806 - 	skb_shinfo(skb)->nr_frags  = 0;
  9.1807 --	skb_shinfo(skb)->tso_size = 0;
  9.1808 --	skb_shinfo(skb)->tso_segs = 0;
  9.1809 -+	skb_shinfo(skb)->gso_size = 0;
  9.1810 -+	skb_shinfo(skb)->gso_segs = 0;
  9.1811 -+	skb_shinfo(skb)->gso_type = 0;
  9.1812 - 	skb_shinfo(skb)->frag_list = NULL;
  9.1813 - out:
  9.1814 - 	return skb;
  9.1815 -@@ -501,8 +502,9 @@ static void copy_skb_header(struct sk_bu
  9.1816 - 	new->tc_index	= old->tc_index;
  9.1817 - #endif
  9.1818 - 	atomic_set(&new->users, 1);
  9.1819 --	skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
  9.1820 --	skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
  9.1821 -+	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
  9.1822 -+	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
  9.1823 -+	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
  9.1824 - }
  9.1825 - 
  9.1826 - /**
  9.1827 -@@ -1777,6 +1779,133 @@ int skb_append_datato_frags(struct sock 
  9.1828 - 	return 0;
  9.1829 - }
  9.1830 - 
  9.1831 -+/**
  9.1832 -+ *	skb_segment - Perform protocol segmentation on skb.
  9.1833 -+ *	@skb: buffer to segment
  9.1834 -+ *	@features: features for the output path (see dev->features)
  9.1835 -+ *
  9.1836 -+ *	This function performs segmentation on the given skb.  It returns
  9.1837 -+ *	the segment at the given position.  It returns NULL if there are
  9.1838 -+ *	no more segments to generate, or when an error is encountered.
  9.1839 -+ */
  9.1840 -+struct sk_buff *skb_segment(struct sk_buff *skb, int features)
  9.1841 -+{
  9.1842 -+	struct sk_buff *segs = NULL;
  9.1843 -+	struct sk_buff *tail = NULL;
  9.1844 -+	unsigned int mss = skb_shinfo(skb)->gso_size;
  9.1845 -+	unsigned int doffset = skb->data - skb->mac.raw;
  9.1846 -+	unsigned int offset = doffset;
  9.1847 -+	unsigned int headroom;
  9.1848 -+	unsigned int len;
  9.1849 -+	int sg = features & NETIF_F_SG;
  9.1850 -+	int nfrags = skb_shinfo(skb)->nr_frags;
  9.1851 -+	int err = -ENOMEM;
  9.1852 -+	int i = 0;
  9.1853 -+	int pos;
  9.1854 -+
  9.1855 -+	__skb_push(skb, doffset);
  9.1856 -+	headroom = skb_headroom(skb);
  9.1857 -+	pos = skb_headlen(skb);
  9.1858 -+
  9.1859 -+	do {
  9.1860 -+		struct sk_buff *nskb;
  9.1861 -+		skb_frag_t *frag;
  9.1862 -+		int hsize, nsize;
  9.1863 -+		int k;
  9.1864 -+		int size;
  9.1865 -+
  9.1866 -+		len = skb->len - offset;
  9.1867 -+		if (len > mss)
  9.1868 -+			len = mss;
  9.1869 -+
  9.1870 -+		hsize = skb_headlen(skb) - offset;
  9.1871 -+		if (hsize < 0)
  9.1872 -+			hsize = 0;
  9.1873 -+		nsize = hsize + doffset;
  9.1874 -+		if (nsize > len + doffset || !sg)
  9.1875 -+			nsize = len + doffset;
  9.1876 -+
  9.1877 -+		nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
  9.1878 -+		if (unlikely(!nskb))
  9.1879 -+			goto err;
  9.1880 -+
  9.1881 -+		if (segs)
  9.1882 -+			tail->next = nskb;
  9.1883 -+		else
  9.1884 -+			segs = nskb;
  9.1885 -+		tail = nskb;
  9.1886 -+
  9.1887 -+		nskb->dev = skb->dev;
  9.1888 -+		nskb->priority = skb->priority;
  9.1889 -+		nskb->protocol = skb->protocol;
  9.1890 -+		nskb->dst = dst_clone(skb->dst);
  9.1891 -+		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
  9.1892 -+		nskb->pkt_type = skb->pkt_type;
  9.1893 -+		nskb->mac_len = skb->mac_len;
  9.1894 -+
  9.1895 -+		skb_reserve(nskb, headroom);
  9.1896 -+		nskb->mac.raw = nskb->data;
  9.1897 -+		nskb->nh.raw = nskb->data + skb->mac_len;
  9.1898 -+		nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
  9.1899 -+		memcpy(skb_put(nskb, doffset), skb->data, doffset);
  9.1900 -+
  9.1901 -+		if (!sg) {
  9.1902 -+			nskb->csum = skb_copy_and_csum_bits(skb, offset,
  9.1903 -+							    skb_put(nskb, len),
  9.1904 -+							    len, 0);
  9.1905 -+			continue;
  9.1906 -+		}
  9.1907 -+
  9.1908 -+		frag = skb_shinfo(nskb)->frags;
  9.1909 -+		k = 0;
  9.1910 -+
  9.1911 -+		nskb->ip_summed = CHECKSUM_HW;
  9.1912 -+		nskb->csum = skb->csum;
  9.1913 -+		memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
  9.1914 -+
  9.1915 -+		while (pos < offset + len) {
  9.1916 -+			BUG_ON(i >= nfrags);
  9.1917 -+
  9.1918 -+			*frag = skb_shinfo(skb)->frags[i];
  9.1919 -+			get_page(frag->page);
  9.1920 -+			size = frag->size;
  9.1921 -+
  9.1922 -+			if (pos < offset) {
  9.1923 -+				frag->page_offset += offset - pos;
  9.1924 -+				frag->size -= offset - pos;
  9.1925 -+			}
  9.1926 -+
  9.1927 -+			k++;
  9.1928 -+
  9.1929 -+			if (pos + size <= offset + len) {
  9.1930 -+				i++;
  9.1931 -+				pos += size;
  9.1932 -+			} else {
  9.1933 -+				frag->size -= pos + size - (offset + len);
  9.1934 -+				break;
  9.1935 -+			}
  9.1936 -+
  9.1937 -+			frag++;
  9.1938 -+		}
  9.1939 -+
  9.1940 -+		skb_shinfo(nskb)->nr_frags = k;
  9.1941 -+		nskb->data_len = len - hsize;
  9.1942 -+		nskb->len += nskb->data_len;
  9.1943 -+		nskb->truesize += nskb->data_len;
  9.1944 -+	} while ((offset += len) < skb->len);
  9.1945 -+
  9.1946 -+	return segs;
  9.1947 -+
  9.1948 -+err:
  9.1949 -+	while ((skb = segs)) {
  9.1950 -+		segs = skb->next;
  9.1951 -+		kfree(skb);
  9.1952 -+	}
  9.1953 -+	return ERR_PTR(err);
  9.1954 -+}
  9.1955 -+
  9.1956 -+EXPORT_SYMBOL_GPL(skb_segment);
  9.1957 -+
  9.1958 - void __init skb_init(void)
  9.1959 - {
  9.1960 - 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
  9.1961 -diff -pruN ../orig-linux-2.6.16.29/net/decnet/dn_nsp_in.c ./net/decnet/dn_nsp_in.c
  9.1962 ---- ../orig-linux-2.6.16.29/net/decnet/dn_nsp_in.c	2006-09-12 19:02:10.000000000 +0100
  9.1963 -+++ ./net/decnet/dn_nsp_in.c	2006-09-19 13:59:20.000000000 +0100
  9.1964 -@@ -801,8 +801,7 @@ got_it:
  9.1965 - 		 * We linearize everything except data segments here.
  9.1966 - 		 */
  9.1967 - 		if (cb->nsp_flags & ~0x60) {
  9.1968 --			if (unlikely(skb_is_nonlinear(skb)) &&
  9.1969 --			    skb_linearize(skb, GFP_ATOMIC) != 0)
  9.1970 -+			if (unlikely(skb_linearize(skb)))
  9.1971 - 				goto free_out;
  9.1972 - 		}
  9.1973 - 
  9.1974 -diff -pruN ../orig-linux-2.6.16.29/net/decnet/dn_route.c ./net/decnet/dn_route.c
  9.1975 ---- ../orig-linux-2.6.16.29/net/decnet/dn_route.c	2006-09-12 19:02:10.000000000 +0100
  9.1976 -+++ ./net/decnet/dn_route.c	2006-09-19 13:59:20.000000000 +0100
  9.1977 -@@ -629,8 +629,7 @@ int dn_route_rcv(struct sk_buff *skb, st
  9.1978 - 			padlen);
  9.1979 - 
  9.1980 -         if (flags & DN_RT_PKT_CNTL) {
  9.1981 --		if (unlikely(skb_is_nonlinear(skb)) &&
  9.1982 --		    skb_linearize(skb, GFP_ATOMIC) != 0)
  9.1983 -+		if (unlikely(skb_linearize(skb)))
  9.1984 - 			goto dump_it;
  9.1985 - 
  9.1986 -                 switch(flags & DN_RT_CNTL_MSK) {
  9.1987 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/af_inet.c ./net/ipv4/af_inet.c
  9.1988 ---- ../orig-linux-2.6.16.29/net/ipv4/af_inet.c	2006-09-12 19:02:10.000000000 +0100
  9.1989 -+++ ./net/ipv4/af_inet.c	2006-09-19 13:59:20.000000000 +0100
  9.1990 -@@ -68,6 +68,7 @@
  9.1991 -  */
  9.1992 - 
  9.1993 - #include <linux/config.h>
  9.1994 -+#include <linux/err.h>
  9.1995 - #include <linux/errno.h>
  9.1996 - #include <linux/types.h>
  9.1997 - #include <linux/socket.h>
  9.1998 -@@ -1084,6 +1085,54 @@ int inet_sk_rebuild_header(struct sock *
  9.1999 - 
  9.2000 - EXPORT_SYMBOL(inet_sk_rebuild_header);
  9.2001 - 
  9.2002 -+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
  9.2003 -+{
  9.2004 -+	struct sk_buff *segs = ERR_PTR(-EINVAL);
  9.2005 -+	struct iphdr *iph;
  9.2006 -+	struct net_protocol *ops;
  9.2007 -+	int proto;
  9.2008 -+	int ihl;
  9.2009 -+	int id;
  9.2010 -+
  9.2011 -+	if (!pskb_may_pull(skb, sizeof(*iph)))
  9.2012 -+		goto out;
  9.2013 -+
  9.2014 -+	iph = skb->nh.iph;
  9.2015 -+	ihl = iph->ihl * 4;
  9.2016 -+	if (ihl < sizeof(*iph))
  9.2017 -+		goto out;
  9.2018 -+
  9.2019 -+	if (!pskb_may_pull(skb, ihl))
  9.2020 -+		goto out;
  9.2021 -+
  9.2022 -+	skb->h.raw = __skb_pull(skb, ihl);
  9.2023 -+	iph = skb->nh.iph;
  9.2024 -+	id = ntohs(iph->id);
  9.2025 -+	proto = iph->protocol & (MAX_INET_PROTOS - 1);
  9.2026 -+	segs = ERR_PTR(-EPROTONOSUPPORT);
  9.2027 -+
  9.2028 -+	rcu_read_lock();
  9.2029 -+	ops = rcu_dereference(inet_protos[proto]);
  9.2030 -+	if (ops && ops->gso_segment)
  9.2031 -+		segs = ops->gso_segment(skb, features);
  9.2032 -+	rcu_read_unlock();
  9.2033 -+
  9.2034 -+	if (!segs || unlikely(IS_ERR(segs)))
  9.2035 -+		goto out;
  9.2036 -+
  9.2037 -+	skb = segs;
  9.2038 -+	do {
  9.2039 -+		iph = skb->nh.iph;
  9.2040 -+		iph->id = htons(id++);
  9.2041 -+		iph->tot_len = htons(skb->len - skb->mac_len);
  9.2042 -+		iph->check = 0;
  9.2043 -+		iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
  9.2044 -+	} while ((skb = skb->next));
  9.2045 -+
  9.2046 -+out:
  9.2047 -+	return segs;
  9.2048 -+}
  9.2049 -+
  9.2050 - #ifdef CONFIG_IP_MULTICAST
  9.2051 - static struct net_protocol igmp_protocol = {
  9.2052 - 	.handler =	igmp_rcv,
  9.2053 -@@ -1093,6 +1142,7 @@ static struct net_protocol igmp_protocol
  9.2054 - static struct net_protocol tcp_protocol = {
  9.2055 - 	.handler =	tcp_v4_rcv,
  9.2056 - 	.err_handler =	tcp_v4_err,
  9.2057 -+	.gso_segment =	tcp_tso_segment,
  9.2058 - 	.no_policy =	1,
  9.2059 - };
  9.2060 - 
  9.2061 -@@ -1138,6 +1188,7 @@ static int ipv4_proc_init(void);
  9.2062 - static struct packet_type ip_packet_type = {
  9.2063 - 	.type = __constant_htons(ETH_P_IP),
  9.2064 - 	.func = ip_rcv,
  9.2065 -+	.gso_segment = inet_gso_segment,
  9.2066 - };
  9.2067 - 
  9.2068 - static int __init inet_init(void)
  9.2069 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/ip_output.c ./net/ipv4/ip_output.c
  9.2070 ---- ../orig-linux-2.6.16.29/net/ipv4/ip_output.c	2006-09-12 19:02:10.000000000 +0100
  9.2071 -+++ ./net/ipv4/ip_output.c	2006-09-19 13:59:20.000000000 +0100
  9.2072 -@@ -210,8 +210,7 @@ static inline int ip_finish_output(struc
  9.2073 - 		return dst_output(skb);
  9.2074 - 	}
  9.2075 - #endif
  9.2076 --	if (skb->len > dst_mtu(skb->dst) &&
  9.2077 --	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
  9.2078 -+	if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
  9.2079 - 		return ip_fragment(skb, ip_finish_output2);
  9.2080 - 	else
  9.2081 - 		return ip_finish_output2(skb);
  9.2082 -@@ -362,7 +361,7 @@ packet_routed:
  9.2083 - 	}
  9.2084 - 
  9.2085 - 	ip_select_ident_more(iph, &rt->u.dst, sk,
  9.2086 --			     (skb_shinfo(skb)->tso_segs ?: 1) - 1);
  9.2087 -+			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
  9.2088 - 
  9.2089 - 	/* Add an IP checksum. */
  9.2090 - 	ip_send_check(iph);
  9.2091 -@@ -743,7 +742,8 @@ static inline int ip_ufo_append_data(str
  9.2092 - 			       (length - transhdrlen));
  9.2093 - 	if (!err) {
  9.2094 - 		/* specify the length of each IP datagram fragment*/
  9.2095 --		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
  9.2096 -+		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
  9.2097 -+		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
  9.2098 - 		__skb_queue_tail(&sk->sk_write_queue, skb);
  9.2099 - 
  9.2100 - 		return 0;
  9.2101 -@@ -839,7 +839,7 @@ int ip_append_data(struct sock *sk,
  9.2102 - 	 */
  9.2103 - 	if (transhdrlen &&
  9.2104 - 	    length + fragheaderlen <= mtu &&
  9.2105 --	    rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
  9.2106 -+	    rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
  9.2107 - 	    !exthdrlen)
  9.2108 - 		csummode = CHECKSUM_HW;
  9.2109 - 
  9.2110 -@@ -1086,14 +1086,16 @@ ssize_t	ip_append_page(struct sock *sk, 
  9.2111 - 
  9.2112 - 	inet->cork.length += size;
  9.2113 - 	if ((sk->sk_protocol == IPPROTO_UDP) &&
  9.2114 --	    (rt->u.dst.dev->features & NETIF_F_UFO))
  9.2115 --		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
  9.2116 -+	    (rt->u.dst.dev->features & NETIF_F_UFO)) {
  9.2117 -+		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
  9.2118 -+		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
  9.2119 -+	}
  9.2120 - 
  9.2121 - 
  9.2122 - 	while (size > 0) {
  9.2123 - 		int i;
  9.2124 - 
  9.2125 --		if (skb_shinfo(skb)->ufo_size)
  9.2126 -+		if (skb_shinfo(skb)->gso_size)
  9.2127 - 			len = size;
  9.2128 - 		else {
  9.2129 - 
  9.2130 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/ipcomp.c ./net/ipv4/ipcomp.c
  9.2131 ---- ../orig-linux-2.6.16.29/net/ipv4/ipcomp.c	2006-09-12 19:02:10.000000000 +0100
  9.2132 -+++ ./net/ipv4/ipcomp.c	2006-09-19 13:59:20.000000000 +0100
  9.2133 -@@ -84,7 +84,7 @@ static int ipcomp_input(struct xfrm_stat
  9.2134 -                         struct xfrm_decap_state *decap, struct sk_buff *skb)
  9.2135 - {
  9.2136 - 	u8 nexthdr;
  9.2137 --	int err = 0;
  9.2138 -+	int err = -ENOMEM;
  9.2139 - 	struct iphdr *iph;
  9.2140 - 	union {
  9.2141 - 		struct iphdr	iph;
  9.2142 -@@ -92,11 +92,8 @@ static int ipcomp_input(struct xfrm_stat
  9.2143 - 	} tmp_iph;
  9.2144 - 
  9.2145 - 
  9.2146 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  9.2147 --	    skb_linearize(skb, GFP_ATOMIC) != 0) {
  9.2148 --	    	err = -ENOMEM;
  9.2149 -+	if (skb_linearize_cow(skb))
  9.2150 - 	    	goto out;
  9.2151 --	}
  9.2152 - 
  9.2153 - 	skb->ip_summed = CHECKSUM_NONE;
  9.2154 - 
  9.2155 -@@ -171,10 +168,8 @@ static int ipcomp_output(struct xfrm_sta
  9.2156 - 		goto out_ok;
  9.2157 - 	}
  9.2158 - 
  9.2159 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  9.2160 --	    skb_linearize(skb, GFP_ATOMIC) != 0) {
  9.2161 -+	if (skb_linearize_cow(skb))
  9.2162 - 		goto out_ok;
  9.2163 --	}
  9.2164 - 	
  9.2165 - 	err = ipcomp_compress(x, skb);
  9.2166 - 	iph = skb->nh.iph;
  9.2167 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp.c ./net/ipv4/tcp.c
  9.2168 ---- ../orig-linux-2.6.16.29/net/ipv4/tcp.c	2006-09-12 19:02:10.000000000 +0100
  9.2169 -+++ ./net/ipv4/tcp.c	2006-09-19 13:59:20.000000000 +0100
  9.2170 -@@ -257,6 +257,7 @@
  9.2171 - #include <linux/fs.h>
  9.2172 - #include <linux/random.h>
  9.2173 - #include <linux/bootmem.h>
  9.2174 -+#include <linux/err.h>
  9.2175 - 
  9.2176 - #include <net/icmp.h>
  9.2177 - #include <net/tcp.h>
  9.2178 -@@ -570,7 +571,7 @@ new_segment:
  9.2179 - 		skb->ip_summed = CHECKSUM_HW;
  9.2180 - 		tp->write_seq += copy;
  9.2181 - 		TCP_SKB_CB(skb)->end_seq += copy;
  9.2182 --		skb_shinfo(skb)->tso_segs = 0;
  9.2183 -+		skb_shinfo(skb)->gso_segs = 0;
  9.2184 - 
  9.2185 - 		if (!copied)
  9.2186 - 			TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
  9.2187 -@@ -621,14 +622,10 @@ ssize_t tcp_sendpage(struct socket *sock
  9.2188 - 	ssize_t res;
  9.2189 - 	struct sock *sk = sock->sk;
  9.2190 - 
  9.2191 --#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  9.2192 --
  9.2193 - 	if (!(sk->sk_route_caps & NETIF_F_SG) ||
  9.2194 --	    !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
  9.2195 -+	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
  9.2196 - 		return sock_no_sendpage(sock, page, offset, size, flags);
  9.2197 - 
  9.2198 --#undef TCP_ZC_CSUM_FLAGS
  9.2199 --
  9.2200 - 	lock_sock(sk);
  9.2201 - 	TCP_CHECK_TIMER(sk);
  9.2202 - 	res = do_tcp_sendpages(sk, &page, offset, size, flags);
  9.2203 -@@ -725,9 +722,7 @@ new_segment:
  9.2204 - 				/*
  9.2205 - 				 * Check whether we can use HW checksum.
  9.2206 - 				 */
  9.2207 --				if (sk->sk_route_caps &
  9.2208 --				    (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
  9.2209 --				     NETIF_F_HW_CSUM))
  9.2210 -+				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
  9.2211 - 					skb->ip_summed = CHECKSUM_HW;
  9.2212 - 
  9.2213 - 				skb_entail(sk, tp, skb);
  9.2214 -@@ -823,7 +818,7 @@ new_segment:
  9.2215 - 
  9.2216 - 			tp->write_seq += copy;
  9.2217 - 			TCP_SKB_CB(skb)->end_seq += copy;
  9.2218 --			skb_shinfo(skb)->tso_segs = 0;
  9.2219 -+			skb_shinfo(skb)->gso_segs = 0;
  9.2220 - 
  9.2221 - 			from += copy;
  9.2222 - 			copied += copy;
  9.2223 -@@ -2026,6 +2021,71 @@ int tcp_getsockopt(struct sock *sk, int 
  9.2224 - }
  9.2225 - 
  9.2226 - 
  9.2227 -+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
  9.2228 -+{
  9.2229 -+	struct sk_buff *segs = ERR_PTR(-EINVAL);
  9.2230 -+	struct tcphdr *th;
  9.2231 -+	unsigned thlen;
  9.2232 -+	unsigned int seq;
  9.2233 -+	unsigned int delta;
  9.2234 -+	unsigned int oldlen;
  9.2235 -+	unsigned int len;
  9.2236 -+
  9.2237 -+	if (!pskb_may_pull(skb, sizeof(*th)))
  9.2238 -+		goto out;
  9.2239 -+
  9.2240 -+	th = skb->h.th;
  9.2241 -+	thlen = th->doff * 4;
  9.2242 -+	if (thlen < sizeof(*th))
  9.2243 -+		goto out;
  9.2244 -+
  9.2245 -+	if (!pskb_may_pull(skb, thlen))
  9.2246 -+		goto out;
  9.2247 -+
  9.2248 -+	segs = NULL;
  9.2249 -+	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
  9.2250 -+		goto out;
  9.2251 -+
  9.2252 -+	oldlen = (u16)~skb->len;
  9.2253 -+	__skb_pull(skb, thlen);
  9.2254 -+
  9.2255 -+	segs = skb_segment(skb, features);
  9.2256 -+	if (IS_ERR(segs))
  9.2257 -+		goto out;
  9.2258 -+
  9.2259 -+	len = skb_shinfo(skb)->gso_size;
  9.2260 -+	delta = htonl(oldlen + (thlen + len));
  9.2261 -+
  9.2262 -+	skb = segs;
  9.2263 -+	th = skb->h.th;
  9.2264 -+	seq = ntohl(th->seq);
  9.2265 -+
  9.2266 -+	do {
  9.2267 -+		th->fin = th->psh = 0;
  9.2268 -+
  9.2269 -+		th->check = ~csum_fold(th->check + delta);
  9.2270 -+		if (skb->ip_summed != CHECKSUM_HW)
  9.2271 -+			th->check = csum_fold(csum_partial(skb->h.raw, thlen,
  9.2272 -+							   skb->csum));
  9.2273 -+
  9.2274 -+		seq += len;
  9.2275 -+		skb = skb->next;
  9.2276 -+		th = skb->h.th;
  9.2277 -+
  9.2278 -+		th->seq = htonl(seq);
  9.2279 -+		th->cwr = 0;
  9.2280 -+	} while (skb->next);
  9.2281 -+
  9.2282 -+	delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
  9.2283 -+	th->check = ~csum_fold(th->check + delta);
  9.2284 -+	if (skb->ip_summed != CHECKSUM_HW)
  9.2285 -+		th->check = csum_fold(csum_partial(skb->h.raw, thlen,
  9.2286 -+						   skb->csum));
  9.2287 -+
  9.2288 -+out:
  9.2289 -+	return segs;
  9.2290 -+}
  9.2291 -+
  9.2292 - extern void __skb_cb_too_small_for_tcp(int, int);
  9.2293 - extern struct tcp_congestion_ops tcp_reno;
  9.2294 - 
  9.2295 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp_input.c ./net/ipv4/tcp_input.c
  9.2296 ---- ../orig-linux-2.6.16.29/net/ipv4/tcp_input.c	2006-09-12 19:02:10.000000000 +0100
  9.2297 -+++ ./net/ipv4/tcp_input.c	2006-09-19 13:59:20.000000000 +0100
  9.2298 -@@ -1072,7 +1072,7 @@ tcp_sacktag_write_queue(struct sock *sk,
  9.2299 - 				else
  9.2300 - 					pkt_len = (end_seq -
  9.2301 - 						   TCP_SKB_CB(skb)->seq);
  9.2302 --				if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
  9.2303 -+				if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
  9.2304 - 					break;
  9.2305 - 				pcount = tcp_skb_pcount(skb);
  9.2306 - 			}
  9.2307 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp_output.c ./net/ipv4/tcp_output.c
  9.2308 ---- ../orig-linux-2.6.16.29/net/ipv4/tcp_output.c	2006-09-12 19:02:10.000000000 +0100
  9.2309 -+++ ./net/ipv4/tcp_output.c	2006-09-19 13:59:20.000000000 +0100
  9.2310 -@@ -497,15 +497,17 @@ static void tcp_set_skb_tso_segs(struct 
  9.2311 - 		/* Avoid the costly divide in the normal
  9.2312 - 		 * non-TSO case.
  9.2313 - 		 */
  9.2314 --		skb_shinfo(skb)->tso_segs = 1;
  9.2315 --		skb_shinfo(skb)->tso_size = 0;
  9.2316 -+		skb_shinfo(skb)->gso_segs = 1;
  9.2317 -+		skb_shinfo(skb)->gso_size = 0;
  9.2318 -+		skb_shinfo(skb)->gso_type = 0;
  9.2319 - 	} else {
  9.2320 - 		unsigned int factor;
  9.2321 - 
  9.2322 - 		factor = skb->len + (mss_now - 1);
  9.2323 - 		factor /= mss_now;
  9.2324 --		skb_shinfo(skb)->tso_segs = factor;
  9.2325 --		skb_shinfo(skb)->tso_size = mss_now;
  9.2326 -+		skb_shinfo(skb)->gso_segs = factor;
  9.2327 -+		skb_shinfo(skb)->gso_size = mss_now;
  9.2328 -+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  9.2329 - 	}
  9.2330 - }
  9.2331 - 
  9.2332 -@@ -850,7 +852,7 @@ static int tcp_init_tso_segs(struct sock
  9.2333 - 
  9.2334 - 	if (!tso_segs ||
  9.2335 - 	    (tso_segs > 1 &&
  9.2336 --	     skb_shinfo(skb)->tso_size != mss_now)) {
  9.2337 -+	     tcp_skb_mss(skb) != mss_now)) {
  9.2338 - 		tcp_set_skb_tso_segs(sk, skb, mss_now);
  9.2339 - 		tso_segs = tcp_skb_pcount(skb);
  9.2340 - 	}
  9.2341 -@@ -1510,8 +1512,9 @@ int tcp_retransmit_skb(struct sock *sk, 
  9.2342 - 	   tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
  9.2343 - 		if (!pskb_trim(skb, 0)) {
  9.2344 - 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
  9.2345 --			skb_shinfo(skb)->tso_segs = 1;
  9.2346 --			skb_shinfo(skb)->tso_size = 0;
  9.2347 -+			skb_shinfo(skb)->gso_segs = 1;
  9.2348 -+			skb_shinfo(skb)->gso_size = 0;
  9.2349 -+			skb_shinfo(skb)->gso_type = 0;
  9.2350 - 			skb->ip_summed = CHECKSUM_NONE;
  9.2351 - 			skb->csum = 0;
  9.2352 - 		}
  9.2353 -@@ -1716,8 +1719,9 @@ void tcp_send_fin(struct sock *sk)
  9.2354 - 		skb->csum = 0;
  9.2355 - 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
  9.2356 - 		TCP_SKB_CB(skb)->sacked = 0;
  9.2357 --		skb_shinfo(skb)->tso_segs = 1;
  9.2358 --		skb_shinfo(skb)->tso_size = 0;
  9.2359 -+		skb_shinfo(skb)->gso_segs = 1;
  9.2360 -+		skb_shinfo(skb)->gso_size = 0;
  9.2361 -+		skb_shinfo(skb)->gso_type = 0;
  9.2362 - 
  9.2363 - 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
  9.2364 - 		TCP_SKB_CB(skb)->seq = tp->write_seq;
  9.2365 -@@ -1749,8 +1753,9 @@ void tcp_send_active_reset(struct sock *
  9.2366 - 	skb->csum = 0;
  9.2367 - 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
  9.2368 - 	TCP_SKB_CB(skb)->sacked = 0;
  9.2369 --	skb_shinfo(skb)->tso_segs = 1;
  9.2370 --	skb_shinfo(skb)->tso_size = 0;
  9.2371 -+	skb_shinfo(skb)->gso_segs = 1;
  9.2372 -+	skb_shinfo(skb)->gso_size = 0;
  9.2373 -+	skb_shinfo(skb)->gso_type = 0;
  9.2374 - 
  9.2375 - 	/* Send it off. */
  9.2376 - 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
  9.2377 -@@ -1833,8 +1838,9 @@ struct sk_buff * tcp_make_synack(struct 
  9.2378 - 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
  9.2379 - 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
  9.2380 - 	TCP_SKB_CB(skb)->sacked = 0;
  9.2381 --	skb_shinfo(skb)->tso_segs = 1;
  9.2382 --	skb_shinfo(skb)->tso_size = 0;
  9.2383 -+	skb_shinfo(skb)->gso_segs = 1;
  9.2384 -+	skb_shinfo(skb)->gso_size = 0;
  9.2385 -+	skb_shinfo(skb)->gso_type = 0;
  9.2386 - 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
  9.2387 - 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
  9.2388 - 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
  9.2389 -@@ -1937,8 +1943,9 @@ int tcp_connect(struct sock *sk)
  9.2390 - 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
  9.2391 - 	TCP_ECN_send_syn(sk, tp, buff);
  9.2392 - 	TCP_SKB_CB(buff)->sacked = 0;
  9.2393 --	skb_shinfo(buff)->tso_segs = 1;
  9.2394 --	skb_shinfo(buff)->tso_size = 0;
  9.2395 -+	skb_shinfo(buff)->gso_segs = 1;
  9.2396 -+	skb_shinfo(buff)->gso_size = 0;
  9.2397 -+	skb_shinfo(buff)->gso_type = 0;
  9.2398 - 	buff->csum = 0;
  9.2399 - 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
  9.2400 - 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
  9.2401 -@@ -2042,8 +2049,9 @@ void tcp_send_ack(struct sock *sk)
  9.2402 - 		buff->csum = 0;
  9.2403 - 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
  9.2404 - 		TCP_SKB_CB(buff)->sacked = 0;
  9.2405 --		skb_shinfo(buff)->tso_segs = 1;
  9.2406 --		skb_shinfo(buff)->tso_size = 0;
  9.2407 -+		skb_shinfo(buff)->gso_segs = 1;
  9.2408 -+		skb_shinfo(buff)->gso_size = 0;
  9.2409 -+		skb_shinfo(buff)->gso_type = 0;
  9.2410 - 
  9.2411 - 		/* Send it off, this clears delayed acks for us. */
  9.2412 - 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
  9.2413 -@@ -2078,8 +2086,9 @@ static int tcp_xmit_probe_skb(struct soc
  9.2414 - 	skb->csum = 0;
  9.2415 - 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
  9.2416 - 	TCP_SKB_CB(skb)->sacked = urgent;
  9.2417 --	skb_shinfo(skb)->tso_segs = 1;
  9.2418 --	skb_shinfo(skb)->tso_size = 0;
  9.2419 -+	skb_shinfo(skb)->gso_segs = 1;
  9.2420 -+	skb_shinfo(skb)->gso_size = 0;
  9.2421 -+	skb_shinfo(skb)->gso_type = 0;
  9.2422 - 
  9.2423 - 	/* Use a previous sequence.  This should cause the other
  9.2424 - 	 * end to send an ack.  Don't queue or clone SKB, just
  9.2425 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
  9.2426 ---- ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c	2006-09-19 13:59:15.000000000 +0100
  9.2427 -+++ ./net/ipv4/xfrm4_output.c	2006-09-19 13:59:20.000000000 +0100
  9.2428 -@@ -9,6 +9,8 @@
  9.2429 -  */
  9.2430 - 
  9.2431 - #include <linux/compiler.h>
  9.2432 -+#include <linux/if_ether.h>
  9.2433 -+#include <linux/kernel.h>
  9.2434 - #include <linux/skbuff.h>
  9.2435 - #include <linux/spinlock.h>
  9.2436 - #include <linux/netfilter_ipv4.h>
  9.2437 -@@ -158,16 +160,10 @@ error_nolock:
  9.2438 - 	goto out_exit;
  9.2439 - }
  9.2440 - 
  9.2441 --static int xfrm4_output_finish(struct sk_buff *skb)
  9.2442 -+static int xfrm4_output_finish2(struct sk_buff *skb)
  9.2443 - {
  9.2444 - 	int err;
  9.2445 - 
  9.2446 --#ifdef CONFIG_NETFILTER
  9.2447 --	if (!skb->dst->xfrm) {
  9.2448 --		IPCB(skb)->flags |= IPSKB_REROUTED;
  9.2449 --		return dst_output(skb);
  9.2450 --	}
  9.2451 --#endif
  9.2452 - 	while (likely((err = xfrm4_output_one(skb)) == 0)) {
  9.2453 - 		nf_reset(skb);
  9.2454 - 
  9.2455 -@@ -180,7 +176,7 @@ static int xfrm4_output_finish(struct sk
  9.2456 - 			return dst_output(skb);
  9.2457 - 
  9.2458 - 		err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL,
  9.2459 --			      skb->dst->dev, xfrm4_output_finish);
  9.2460 -+			      skb->dst->dev, xfrm4_output_finish2);
  9.2461 - 		if (unlikely(err != 1))
  9.2462 - 			break;
  9.2463 - 	}
  9.2464 -@@ -188,6 +184,48 @@ static int xfrm4_output_finish(struct sk
  9.2465 - 	return err;
  9.2466 - }
  9.2467 - 
  9.2468 -+static int xfrm4_output_finish(struct sk_buff *skb)
  9.2469 -+{
  9.2470 -+	struct sk_buff *segs;
  9.2471 -+
  9.2472 -+#ifdef CONFIG_NETFILTER
  9.2473 -+	if (!skb->dst->xfrm) {
  9.2474 -+		IPCB(skb)->flags |= IPSKB_REROUTED;
  9.2475 -+		return dst_output(skb);
  9.2476 -+	}
  9.2477 -+#endif
  9.2478 -+
  9.2479 -+	if (!skb_shinfo(skb)->gso_size)
  9.2480 -+		return xfrm4_output_finish2(skb);
  9.2481 -+
  9.2482 -+	skb->protocol = htons(ETH_P_IP);
  9.2483 -+	segs = skb_gso_segment(skb, 0);
  9.2484 -+	kfree_skb(skb);
  9.2485 -+	if (unlikely(IS_ERR(segs)))
  9.2486 -+		return PTR_ERR(segs);
  9.2487 -+
  9.2488 -+	do {
  9.2489 -+		struct sk_buff *nskb = segs->next;
  9.2490 -+		int err;
  9.2491 -+
  9.2492 -+		segs->next = NULL;
  9.2493 -+		err = xfrm4_output_finish2(segs);
  9.2494 -+
  9.2495 -+		if (unlikely(err)) {
  9.2496 -+			while ((segs = nskb)) {
  9.2497 -+				nskb = segs->next;
  9.2498 -+				segs->next = NULL;
  9.2499 -+				kfree_skb(segs);
  9.2500 -+			}
  9.2501 -+			return err;
  9.2502 -+		}
  9.2503 -+
  9.2504 -+		segs = nskb;
  9.2505 -+	} while (segs);
  9.2506 -+
  9.2507 -+	return 0;
  9.2508 -+}
  9.2509 -+
  9.2510 - int xfrm4_output(struct sk_buff *skb)
  9.2511 - {
  9.2512 - 	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
  9.2513 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/ip6_output.c ./net/ipv6/ip6_output.c
  9.2514 ---- ../orig-linux-2.6.16.29/net/ipv6/ip6_output.c	2006-09-12 19:02:10.000000000 +0100
  9.2515 -+++ ./net/ipv6/ip6_output.c	2006-09-19 13:59:20.000000000 +0100
  9.2516 -@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s
  9.2517 - 
  9.2518 - int ip6_output(struct sk_buff *skb)
  9.2519 - {
  9.2520 --	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) ||
  9.2521 -+	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
  9.2522 - 				dst_allfrag(skb->dst))
  9.2523 - 		return ip6_fragment(skb, ip6_output2);
  9.2524 - 	else
  9.2525 -@@ -829,8 +829,9 @@ static inline int ip6_ufo_append_data(st
  9.2526 - 		struct frag_hdr fhdr;
  9.2527 - 
  9.2528 - 		/* specify the length of each IP datagram fragment*/
  9.2529 --		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) - 
  9.2530 --						sizeof(struct frag_hdr);
  9.2531 -+		skb_shinfo(skb)->gso_size = mtu - fragheaderlen - 
  9.2532 -+					    sizeof(struct frag_hdr);
  9.2533 -+		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
  9.2534 - 		ipv6_select_ident(skb, &fhdr);
  9.2535 - 		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  9.2536 - 		__skb_queue_tail(&sk->sk_write_queue, skb);
  9.2537 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/ipcomp6.c ./net/ipv6/ipcomp6.c
  9.2538 ---- ../orig-linux-2.6.16.29/net/ipv6/ipcomp6.c	2006-09-12 19:02:10.000000000 +0100
  9.2539 -+++ ./net/ipv6/ipcomp6.c	2006-09-19 13:59:20.000000000 +0100
  9.2540 -@@ -64,7 +64,7 @@ static LIST_HEAD(ipcomp6_tfms_list);
  9.2541 - 
  9.2542 - static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
  9.2543 - {
  9.2544 --	int err = 0;
  9.2545 -+	int err = -ENOMEM;
  9.2546 - 	u8 nexthdr = 0;
  9.2547 - 	int hdr_len = skb->h.raw - skb->nh.raw;
  9.2548 - 	unsigned char *tmp_hdr = NULL;
  9.2549 -@@ -75,11 +75,8 @@ static int ipcomp6_input(struct xfrm_sta
  9.2550 - 	struct crypto_tfm *tfm;
  9.2551 - 	int cpu;
  9.2552 - 
  9.2553 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  9.2554 --		skb_linearize(skb, GFP_ATOMIC) != 0) {
  9.2555 --		err = -ENOMEM;
  9.2556 -+	if (skb_linearize_cow(skb))
  9.2557 - 		goto out;
  9.2558 --	}
  9.2559 - 
  9.2560 - 	skb->ip_summed = CHECKSUM_NONE;
  9.2561 - 
  9.2562 -@@ -158,10 +155,8 @@ static int ipcomp6_output(struct xfrm_st
  9.2563 - 		goto out_ok;
  9.2564 - 	}
  9.2565 - 
  9.2566 --	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  9.2567 --		skb_linearize(skb, GFP_ATOMIC) != 0) {
  9.2568 -+	if (skb_linearize_cow(skb))
  9.2569 - 		goto out_ok;
  9.2570 --	}
  9.2571 - 
  9.2572 - 	/* compression */
  9.2573 - 	plen = skb->len - hdr_len;
  9.2574 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/xfrm6_output.c ./net/ipv6/xfrm6_output.c
  9.2575 ---- ../orig-linux-2.6.16.29/net/ipv6/xfrm6_output.c	2006-09-12 19:02:10.000000000 +0100
  9.2576 -+++ ./net/ipv6/xfrm6_output.c	2006-09-19 13:59:20.000000000 +0100
  9.2577 -@@ -151,7 +151,7 @@ error_nolock:
  9.2578 - 	goto out_exit;
  9.2579 - }
  9.2580 - 
  9.2581 --static int xfrm6_output_finish(struct sk_buff *skb)
  9.2582 -+static int xfrm6_output_finish2(struct sk_buff *skb)
  9.2583 - {
  9.2584 - 	int err;
  9.2585 - 
  9.2586 -@@ -167,7 +167,7 @@ static int xfrm6_output_finish(struct sk
  9.2587 - 			return dst_output(skb);
  9.2588 - 
  9.2589 - 		err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL,
  9.2590 --			      skb->dst->dev, xfrm6_output_finish);
  9.2591 -+			      skb->dst->dev, xfrm6_output_finish2);
  9.2592 - 		if (unlikely(err != 1))
  9.2593 - 			break;
  9.2594 - 	}
  9.2595 -@@ -175,6 +175,41 @@ static int xfrm6_output_finish(struct sk
  9.2596 - 	return err;
  9.2597 - }
  9.2598 - 
  9.2599 -+static int xfrm6_output_finish(struct sk_buff *skb)
  9.2600 -+{
  9.2601 -+	struct sk_buff *segs;
  9.2602 -+
  9.2603 -+	if (!skb_shinfo(skb)->gso_size)
  9.2604 -+		return xfrm6_output_finish2(skb);
  9.2605 -+
  9.2606 -+	skb->protocol = htons(ETH_P_IP);
  9.2607 -+	segs = skb_gso_segment(skb, 0);
  9.2608 -+	kfree_skb(skb);
  9.2609 -+	if (unlikely(IS_ERR(segs)))
  9.2610 -+		return PTR_ERR(segs);
  9.2611 -+
  9.2612 -+	do {
  9.2613 -+		struct sk_buff *nskb = segs->next;
  9.2614 -+		int err;
  9.2615 -+
  9.2616 -+		segs->next = NULL;
  9.2617 -+		err = xfrm6_output_finish2(segs);
  9.2618 -+
  9.2619 -+		if (unlikely(err)) {
  9.2620 -+			while ((segs = nskb)) {
  9.2621 -+				nskb = segs->next;
  9.2622 -+				segs->next = NULL;
  9.2623 -+				kfree_skb(segs);
  9.2624 -+			}
  9.2625 -+			return err;
  9.2626 -+		}
  9.2627 -+
  9.2628 -+		segs = nskb;
  9.2629 -+	} while (segs);
  9.2630 -+
  9.2631 -+	return 0;
  9.2632 -+}
  9.2633 -+
  9.2634 - int xfrm6_output(struct sk_buff *skb)
  9.2635 - {
  9.2636 - 	return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dst->dev,
  9.2637 -diff -pruN ../orig-linux-2.6.16.29/net/sched/sch_generic.c ./net/sched/sch_generic.c
  9.2638 ---- ../orig-linux-2.6.16.29/net/sched/sch_generic.c	2006-09-12 19:02:10.000000000 +0100
  9.2639 -+++ ./net/sched/sch_generic.c	2006-09-19 13:59:20.000000000 +0100
  9.2640 -@@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device
  9.2641 -    dev->queue_lock serializes queue accesses for this device
  9.2642 -    AND dev->qdisc pointer itself.
  9.2643 - 
  9.2644 --   dev->xmit_lock serializes accesses to device driver.
  9.2645 -+   netif_tx_lock serializes accesses to device driver.
  9.2646 - 
  9.2647 --   dev->queue_lock and dev->xmit_lock are mutually exclusive,
  9.2648 -+   dev->queue_lock and netif_tx_lock are mutually exclusive,
  9.2649 -    if one is grabbed, another must be free.
  9.2650 -  */
  9.2651 - 
  9.2652 -@@ -90,14 +90,17 @@ void qdisc_unlock_tree(struct net_device
  9.2653 -    NOTE: Called under dev->queue_lock with locally disabled BH.
  9.2654 - */
  9.2655 - 
  9.2656 --int qdisc_restart(struct net_device *dev)
  9.2657 -+static inline int qdisc_restart(struct net_device *dev)
  9.2658 - {
  9.2659 - 	struct Qdisc *q = dev->qdisc;
  9.2660 - 	struct sk_buff *skb;
  9.2661 - 
  9.2662 - 	/* Dequeue packet */
  9.2663 --	if ((skb = q->dequeue(q)) != NULL) {
  9.2664 -+	if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
  9.2665 - 		unsigned nolock = (dev->features & NETIF_F_LLTX);
  9.2666 -+
  9.2667 -+		dev->gso_skb = NULL;
  9.2668 -+
  9.2669 - 		/*
  9.2670 - 		 * When the driver has LLTX set it does its own locking
  9.2671 - 		 * in start_xmit. No need to add additional overhead by
  9.2672 -@@ -108,7 +111,7 @@ int qdisc_restart(struct net_device *dev
  9.2673 - 		 * will be requeued.
  9.2674 - 		 */
  9.2675 - 		if (!nolock) {
  9.2676 --			if (!spin_trylock(&dev->xmit_lock)) {
  9.2677 -+			if (!netif_tx_trylock(dev)) {
  9.2678 - 			collision:
  9.2679 - 				/* So, someone grabbed the driver. */
  9.2680 - 				
  9.2681 -@@ -126,8 +129,6 @@ int qdisc_restart(struct net_device *dev
  9.2682 - 				__get_cpu_var(netdev_rx_stat).cpu_collision++;
  9.2683 - 				goto requeue;
  9.2684 - 			}
  9.2685 --			/* Remember that the driver is grabbed by us. */
  9.2686 --			dev->xmit_lock_owner = smp_processor_id();
  9.2687 - 		}
  9.2688 - 		
  9.2689 - 		{
  9.2690 -@@ -136,14 +137,11 @@ int qdisc_restart(struct net_device *dev
  9.2691 - 
  9.2692 - 			if (!netif_queue_stopped(dev)) {
  9.2693 - 				int ret;
  9.2694 --				if (netdev_nit)
  9.2695 --					dev_queue_xmit_nit(skb, dev);
  9.2696 - 
  9.2697 --				ret = dev->hard_start_xmit(skb, dev);
  9.2698 -+				ret = dev_hard_start_xmit(skb, dev);
  9.2699 - 				if (ret == NETDEV_TX_OK) { 
  9.2700 - 					if (!nolock) {
  9.2701 --						dev->xmit_lock_owner = -1;
  9.2702 --						spin_unlock(&dev->xmit_lock);
  9.2703 -+						netif_tx_unlock(dev);
  9.2704 - 					}
  9.2705 - 					spin_lock(&dev->queue_lock);
  9.2706 - 					return -1;
  9.2707 -@@ -157,8 +155,7 @@ int qdisc_restart(struct net_device *dev
  9.2708 - 			/* NETDEV_TX_BUSY - we need to requeue */
  9.2709 - 			/* Release the driver */
  9.2710 - 			if (!nolock) { 
  9.2711 --				dev->xmit_lock_owner = -1;
  9.2712 --				spin_unlock(&dev->xmit_lock);
  9.2713 -+				netif_tx_unlock(dev);
  9.2714 - 			} 
  9.2715 - 			spin_lock(&dev->queue_lock);
  9.2716 - 			q = dev->qdisc;
  9.2717 -@@ -175,7 +172,10 @@ int qdisc_restart(struct net_device *dev
  9.2718 - 		 */
  9.2719 - 
  9.2720 - requeue:
  9.2721 --		q->ops->requeue(skb, q);
  9.2722 -+		if (skb->next)
  9.2723 -+			dev->gso_skb = skb;
  9.2724 -+		else
  9.2725 -+			q->ops->requeue(skb, q);
  9.2726 - 		netif_schedule(dev);
  9.2727 - 		return 1;
  9.2728 - 	}
  9.2729 -@@ -183,11 +183,23 @@ requeue:
  9.2730 - 	return q->q.qlen;
  9.2731 - }
  9.2732 - 
  9.2733 -+void __qdisc_run(struct net_device *dev)
  9.2734 -+{
  9.2735 -+	if (unlikely(dev->qdisc == &noop_qdisc))
  9.2736 -+		goto out;
  9.2737 -+
  9.2738 -+	while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
  9.2739 -+		/* NOTHING */;
  9.2740 -+
  9.2741 -+out:
  9.2742 -+	clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
  9.2743 -+}
  9.2744 -+
  9.2745 - static void dev_watchdog(unsigned long arg)
  9.2746 - {
  9.2747 - 	struct net_device *dev = (struct net_device *)arg;
  9.2748 - 
  9.2749 --	spin_lock(&dev->xmit_lock);
  9.2750 -+	netif_tx_lock(dev);
  9.2751 - 	if (dev->qdisc != &noop_qdisc) {
  9.2752 - 		if (netif_device_present(dev) &&
  9.2753 - 		    netif_running(dev) &&
  9.2754 -@@ -201,7 +213,7 @@ static void dev_watchdog(unsigned long a
  9.2755 - 				dev_hold(dev);
  9.2756 - 		}
  9.2757 - 	}
  9.2758 --	spin_unlock(&dev->xmit_lock);
  9.2759 -+	netif_tx_unlock(dev);
  9.2760 - 
  9.2761 - 	dev_put(dev);
  9.2762 - }
  9.2763 -@@ -225,17 +237,17 @@ void __netdev_watchdog_up(struct net_dev
  9.2764 - 
  9.2765 - static void dev_watchdog_up(struct net_device *dev)
  9.2766 - {
  9.2767 --	spin_lock_bh(&dev->xmit_lock);
  9.2768 -+	netif_tx_lock_bh(dev);
  9.2769 - 	__netdev_watchdog_up(dev);
  9.2770 --	spin_unlock_bh(&dev->xmit_lock);
  9.2771 -+	netif_tx_unlock_bh(dev);
  9.2772 - }
  9.2773 - 
  9.2774 - static void dev_watchdog_down(struct net_device *dev)
  9.2775 - {
  9.2776 --	spin_lock_bh(&dev->xmit_lock);
  9.2777 -+	netif_tx_lock_bh(dev);
  9.2778 - 	if (del_timer(&dev->watchdog_timer))
  9.2779 - 		__dev_put(dev);
  9.2780 --	spin_unlock_bh(&dev->xmit_lock);
  9.2781 -+	netif_tx_unlock_bh(dev);
  9.2782 - }
  9.2783 - 
  9.2784 - void netif_carrier_on(struct net_device *dev)
  9.2785 -@@ -577,10 +589,17 @@ void dev_deactivate(struct net_device *d
  9.2786 - 
  9.2787 - 	dev_watchdog_down(dev);
  9.2788 - 
  9.2789 --	while (test_bit(__LINK_STATE_SCHED, &dev->state))
  9.2790 -+	/* Wait for outstanding dev_queue_xmit calls. */
  9.2791 -+	synchronize_rcu();
  9.2792 -+
  9.2793 -+	/* Wait for outstanding qdisc_run calls. */
  9.2794 -+	while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
  9.2795 - 		yield();
  9.2796 - 
  9.2797 --	spin_unlock_wait(&dev->xmit_lock);
  9.2798 -+	if (dev->gso_skb) {
  9.2799 -+		kfree_skb(dev->gso_skb);
  9.2800 -+		dev->gso_skb = NULL;
  9.2801 -+	}
  9.2802 - }
  9.2803 - 
  9.2804 - void dev_init_scheduler(struct net_device *dev)
  9.2805 -@@ -622,6 +641,5 @@ EXPORT_SYMBOL(qdisc_create_dflt);
  9.2806 - EXPORT_SYMBOL(qdisc_alloc);
  9.2807 - EXPORT_SYMBOL(qdisc_destroy);
  9.2808 - EXPORT_SYMBOL(qdisc_reset);
  9.2809 --EXPORT_SYMBOL(qdisc_restart);
  9.2810 - EXPORT_SYMBOL(qdisc_lock_tree);
  9.2811 - EXPORT_SYMBOL(qdisc_unlock_tree);
  9.2812 -diff -pruN ../orig-linux-2.6.16.29/net/sched/sch_teql.c ./net/sched/sch_teql.c
  9.2813 ---- ../orig-linux-2.6.16.29/net/sched/sch_teql.c	2006-09-12 19:02:10.000000000 +0100
  9.2814 -+++ ./net/sched/sch_teql.c	2006-09-19 13:59:20.000000000 +0100
  9.2815 -@@ -302,20 +302,17 @@ restart:
  9.2816 - 
  9.2817 - 		switch (teql_resolve(skb, skb_res, slave)) {
  9.2818 - 		case 0:
  9.2819 --			if (spin_trylock(&slave->xmit_lock)) {
  9.2820 --				slave->xmit_lock_owner = smp_processor_id();
  9.2821 -+			if (netif_tx_trylock(slave)) {
  9.2822 - 				if (!netif_queue_stopped(slave) &&
  9.2823 - 				    slave->hard_start_xmit(skb, slave) == 0) {
  9.2824 --					slave->xmit_lock_owner = -1;
  9.2825 --					spin_unlock(&slave->xmit_lock);
  9.2826 -+					netif_tx_unlock(slave);
  9.2827 - 					master->slaves = NEXT_SLAVE(q);
  9.2828 - 					netif_wake_queue(dev);
  9.2829 - 					master->stats.tx_packets++;
  9.2830 - 					master->stats.tx_bytes += len;
  9.2831 - 					return 0;
  9.2832 - 				}
  9.2833 --				slave->xmit_lock_owner = -1;
  9.2834 --				spin_unlock(&slave->xmit_lock);
  9.2835 -+				netif_tx_unlock(slave);
  9.2836 - 			}
  9.2837 - 			if (netif_queue_stopped(dev))
  9.2838 - 				busy = 1;
    10.1 --- a/patches/linux-2.6.16.29/net-gso-1-check-dodgy.patch	Mon Nov 27 13:22:21 2006 +0000
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,27 +0,0 @@
    10.4 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp.c ./net/ipv4/tcp.c
    10.5 ---- ../orig-linux-2.6.16.29/net/ipv4/tcp.c	2006-09-19 13:59:20.000000000 +0100
    10.6 -+++ ./net/ipv4/tcp.c	2006-09-19 13:59:42.000000000 +0100
    10.7 -@@ -2042,13 +2042,19 @@ struct sk_buff *tcp_tso_segment(struct s
    10.8 - 	if (!pskb_may_pull(skb, thlen))
    10.9 - 		goto out;
   10.10 - 
   10.11 --	segs = NULL;
   10.12 --	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
   10.13 --		goto out;
   10.14 --
   10.15 - 	oldlen = (u16)~skb->len;
   10.16 - 	__skb_pull(skb, thlen);
   10.17 - 
   10.18 -+	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
   10.19 -+		/* Packet is from an untrusted source, reset gso_segs. */
   10.20 -+		int mss = skb_shinfo(skb)->gso_size;
   10.21 -+
   10.22 -+		skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
   10.23 -+
   10.24 -+		segs = NULL;
   10.25 -+		goto out;
   10.26 -+	}
   10.27 -+
   10.28 - 	segs = skb_segment(skb, features);
   10.29 - 	if (IS_ERR(segs))
   10.30 - 		goto out;
    11.1 --- a/patches/linux-2.6.16.29/net-gso-2-checksum-fix.patch	Mon Nov 27 13:22:21 2006 +0000
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,451 +0,0 @@
    11.4 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/bnx2.c ./drivers/net/bnx2.c
    11.5 ---- ../orig-linux-2.6.16.29/drivers/net/bnx2.c	2006-09-19 13:59:20.000000000 +0100
    11.6 -+++ ./drivers/net/bnx2.c	2006-09-19 13:59:46.000000000 +0100
    11.7 -@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
    11.8 - 		skb = tx_buf->skb;
    11.9 - #ifdef BCM_TSO 
   11.10 - 		/* partial BD completions possible with TSO packets */
   11.11 --		if (skb_shinfo(skb)->gso_size) {
   11.12 -+		if (skb_is_gso(skb)) {
   11.13 - 			u16 last_idx, last_ring_idx;
   11.14 - 
   11.15 - 			last_idx = sw_cons +
   11.16 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c ./drivers/net/chelsio/sge.c
   11.17 ---- ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c	2006-09-19 13:59:20.000000000 +0100
   11.18 -+++ ./drivers/net/chelsio/sge.c	2006-09-19 13:59:46.000000000 +0100
   11.19 -@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
   11.20 - 	struct cpl_tx_pkt *cpl;
   11.21 - 
   11.22 - #ifdef NETIF_F_TSO
   11.23 --	if (skb_shinfo(skb)->gso_size) {
   11.24 -+	if (skb_is_gso(skb)) {
   11.25 - 		int eth_type;
   11.26 - 		struct cpl_tx_pkt_lso *hdr;
   11.27 - 
   11.28 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c ./drivers/net/e1000/e1000_main.c
   11.29 ---- ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c	2006-09-19 13:59:20.000000000 +0100
   11.30 -+++ ./drivers/net/e1000/e1000_main.c	2006-09-19 13:59:46.000000000 +0100
   11.31 -@@ -2526,7 +2526,7 @@ e1000_tso(struct e1000_adapter *adapter,
   11.32 - 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
   11.33 - 	int err;
   11.34 - 
   11.35 --	if (skb_shinfo(skb)->gso_size) {
   11.36 -+	if (skb_is_gso(skb)) {
   11.37 - 		if (skb_header_cloned(skb)) {
   11.38 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   11.39 - 			if (err)
   11.40 -@@ -2651,7 +2651,7 @@ e1000_tx_map(struct e1000_adapter *adapt
   11.41 - 		 * tso gets written back prematurely before the data is fully
   11.42 - 		 * DMAd to the controller */
   11.43 - 		if (!skb->data_len && tx_ring->last_tx_tso &&
   11.44 --				!skb_shinfo(skb)->gso_size) {
   11.45 -+		    !skb_is_gso(skb)) {
   11.46 - 			tx_ring->last_tx_tso = 0;
   11.47 - 			size -= 4;
   11.48 - 		}
   11.49 -@@ -2934,8 +2934,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
   11.50 - 
   11.51 - #ifdef NETIF_F_TSO
   11.52 - 	/* Controller Erratum workaround */
   11.53 --	if (!skb->data_len && tx_ring->last_tx_tso &&
   11.54 --		!skb_shinfo(skb)->gso_size)
   11.55 -+	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
   11.56 - 		count++;
   11.57 - #endif
   11.58 - 
   11.59 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/forcedeth.c ./drivers/net/forcedeth.c
   11.60 ---- ../orig-linux-2.6.16.29/drivers/net/forcedeth.c	2006-09-19 13:59:20.000000000 +0100
   11.61 -+++ ./drivers/net/forcedeth.c	2006-09-19 13:59:46.000000000 +0100
   11.62 -@@ -1105,7 +1105,7 @@ static int nv_start_xmit(struct sk_buff 
   11.63 - 	np->tx_skbuff[nr] = skb;
   11.64 - 
   11.65 - #ifdef NETIF_F_TSO
   11.66 --	if (skb_shinfo(skb)->gso_size)
   11.67 -+	if (skb_is_gso(skb))
   11.68 - 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
   11.69 - 	else
   11.70 - #endif
   11.71 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c ./drivers/net/ixgb/ixgb_main.c
   11.72 ---- ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c	2006-09-19 13:59:20.000000000 +0100
   11.73 -+++ ./drivers/net/ixgb/ixgb_main.c	2006-09-19 13:59:46.000000000 +0100
   11.74 -@@ -1163,7 +1163,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
   11.75 - 	uint16_t ipcse, tucse, mss;
   11.76 - 	int err;
   11.77 - 
   11.78 --	if(likely(skb_shinfo(skb)->gso_size)) {
   11.79 -+	if (likely(skb_is_gso(skb))) {
   11.80 - 		if (skb_header_cloned(skb)) {
   11.81 - 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   11.82 - 			if (err)
   11.83 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/loopback.c ./drivers/net/loopback.c
   11.84 ---- ../orig-linux-2.6.16.29/drivers/net/loopback.c	2006-09-19 13:59:20.000000000 +0100
   11.85 -+++ ./drivers/net/loopback.c	2006-09-19 13:59:46.000000000 +0100
   11.86 -@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff 
   11.87 - #endif
   11.88 - 
   11.89 - #ifdef LOOPBACK_TSO
   11.90 --	if (skb_shinfo(skb)->gso_size) {
   11.91 -+	if (skb_is_gso(skb)) {
   11.92 - 		BUG_ON(skb->protocol != htons(ETH_P_IP));
   11.93 - 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
   11.94 - 
   11.95 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/sky2.c ./drivers/net/sky2.c
   11.96 ---- ../orig-linux-2.6.16.29/drivers/net/sky2.c	2006-09-19 13:59:20.000000000 +0100
   11.97 -+++ ./drivers/net/sky2.c	2006-09-19 13:59:46.000000000 +0100
   11.98 -@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
   11.99 - 	count = sizeof(dma_addr_t) / sizeof(u32);
  11.100 - 	count += skb_shinfo(skb)->nr_frags * count;
  11.101 - 
  11.102 --	if (skb_shinfo(skb)->gso_size)
  11.103 -+	if (skb_is_gso(skb))
  11.104 - 		++count;
  11.105 - 
  11.106 - 	if (skb->ip_summed == CHECKSUM_HW)
  11.107 -diff -pruN ../orig-linux-2.6.16.29/drivers/net/typhoon.c ./drivers/net/typhoon.c
  11.108 ---- ../orig-linux-2.6.16.29/drivers/net/typhoon.c	2006-09-19 13:59:20.000000000 +0100
  11.109 -+++ ./drivers/net/typhoon.c	2006-09-19 13:59:46.000000000 +0100
  11.110 -@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, st
  11.111 - 	 * If problems develop with TSO, check this first.
  11.112 - 	 */
  11.113 - 	numDesc = skb_shinfo(skb)->nr_frags + 1;
  11.114 --	if(skb_tso_size(skb))
  11.115 -+	if (skb_is_gso(skb))
  11.116 - 		numDesc++;
  11.117 - 
  11.118 - 	/* When checking for free space in the ring, we need to also
  11.119 -@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, st
  11.120 - 				TYPHOON_TX_PF_VLAN_TAG_SHIFT);
  11.121 - 	}
  11.122 - 
  11.123 --	if(skb_tso_size(skb)) {
  11.124 -+	if (skb_is_gso(skb)) {
  11.125 - 		first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
  11.126 - 		first_txd->numDesc++;
  11.127 - 
  11.128 -diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c ./drivers/s390/net/qeth_main.c
  11.129 ---- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c	2006-09-19 13:59:20.000000000 +0100
  11.130 -+++ ./drivers/s390/net/qeth_main.c	2006-09-19 13:59:46.000000000 +0100
  11.131 -@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
  11.132 - 	queue = card->qdio.out_qs
  11.133 - 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
  11.134 - 
  11.135 --	if (skb_shinfo(skb)->gso_size)
  11.136 -+	if (skb_is_gso(skb))
  11.137 - 		large_send = card->options.large_send;
  11.138 - 
  11.139 - 	/*are we able to do TSO ? If so ,prepare and send it from here */
  11.140 -@@ -4501,8 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
  11.141 - 		card->stats.tx_packets++;
  11.142 - 		card->stats.tx_bytes += skb->len;
  11.143 - #ifdef CONFIG_QETH_PERF_STATS
  11.144 --		if (skb_shinfo(skb)->gso_size &&
  11.145 --		   !(large_send == QETH_LARGE_SEND_NO)) {
  11.146 -+		if (skb_is_gso(skb) && !(large_send == QETH_LARGE_SEND_NO)) {
  11.147 - 			card->perf_stats.large_send_bytes += skb->len;
  11.148 - 			card->perf_stats.large_send_cnt++;
  11.149 - 		}
  11.150 -diff -pruN ../orig-linux-2.6.16.29/include/linux/netdevice.h ./include/linux/netdevice.h
  11.151 ---- ../orig-linux-2.6.16.29/include/linux/netdevice.h	2006-09-19 13:59:20.000000000 +0100
  11.152 -+++ ./include/linux/netdevice.h	2006-09-19 13:59:46.000000000 +0100
  11.153 -@@ -541,6 +541,7 @@ struct packet_type {
  11.154 - 					 struct net_device *);
  11.155 - 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
  11.156 - 						int features);
  11.157 -+	int			(*gso_send_check)(struct sk_buff *skb);
  11.158 - 	void			*af_packet_priv;
  11.159 - 	struct list_head	list;
  11.160 - };
  11.161 -@@ -1001,14 +1002,15 @@ extern void linkwatch_run_queue(void);
  11.162 - 
  11.163 - static inline int skb_gso_ok(struct sk_buff *skb, int features)
  11.164 - {
  11.165 --	int feature = skb_shinfo(skb)->gso_size ?
  11.166 --		      skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
  11.167 -+	int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT;
  11.168 - 	return (features & feature) == feature;
  11.169 - }
  11.170 - 
  11.171 - static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  11.172 - {
  11.173 --	return !skb_gso_ok(skb, dev->features);
  11.174 -+	return skb_is_gso(skb) &&
  11.175 -+	       (!skb_gso_ok(skb, dev->features) ||
  11.176 -+		unlikely(skb->ip_summed != CHECKSUM_HW));
  11.177 - }
  11.178 - 
  11.179 - #endif /* __KERNEL__ */
  11.180 -diff -pruN ../orig-linux-2.6.16.29/include/linux/skbuff.h ./include/linux/skbuff.h
  11.181 ---- ../orig-linux-2.6.16.29/include/linux/skbuff.h	2006-09-19 13:59:20.000000000 +0100
  11.182 -+++ ./include/linux/skbuff.h	2006-09-19 13:59:46.000000000 +0100
  11.183 -@@ -1403,5 +1403,10 @@ static inline void nf_bridge_get(struct 
  11.184 - static inline void nf_reset(struct sk_buff *skb) {}
  11.185 - #endif /* CONFIG_NETFILTER */
  11.186 - 
  11.187 -+static inline int skb_is_gso(const struct sk_buff *skb)
  11.188 -+{
  11.189 -+	return skb_shinfo(skb)->gso_size;
  11.190 -+}
  11.191 -+
  11.192 - #endif	/* __KERNEL__ */
  11.193 - #endif	/* _LINUX_SKBUFF_H */
  11.194 -diff -pruN ../orig-linux-2.6.16.29/include/net/protocol.h ./include/net/protocol.h
  11.195 ---- ../orig-linux-2.6.16.29/include/net/protocol.h	2006-09-19 13:59:20.000000000 +0100
  11.196 -+++ ./include/net/protocol.h	2006-09-19 13:59:46.000000000 +0100
  11.197 -@@ -37,6 +37,7 @@
  11.198 - struct net_protocol {
  11.199 - 	int			(*handler)(struct sk_buff *skb);
  11.200 - 	void			(*err_handler)(struct sk_buff *skb, u32 info);
  11.201 -+	int			(*gso_send_check)(struct sk_buff *skb);
  11.202 - 	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
  11.203 - 					       int features);
  11.204 - 	int			no_policy;
  11.205 -diff -pruN ../orig-linux-2.6.16.29/include/net/tcp.h ./include/net/tcp.h
  11.206 ---- ../orig-linux-2.6.16.29/include/net/tcp.h	2006-09-19 13:59:20.000000000 +0100
  11.207 -+++ ./include/net/tcp.h	2006-09-19 13:59:46.000000000 +0100
  11.208 -@@ -1063,6 +1063,7 @@ extern struct request_sock_ops tcp_reque
  11.209 - 
  11.210 - extern int tcp_v4_destroy_sock(struct sock *sk);
  11.211 - 
  11.212 -+extern int tcp_v4_gso_send_check(struct sk_buff *skb);
  11.213 - extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
  11.214 - 
  11.215 - #ifdef CONFIG_PROC_FS
  11.216 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_forward.c ./net/bridge/br_forward.c
  11.217 ---- ../orig-linux-2.6.16.29/net/bridge/br_forward.c	2006-09-19 13:59:20.000000000 +0100
  11.218 -+++ ./net/bridge/br_forward.c	2006-09-19 13:59:46.000000000 +0100
  11.219 -@@ -32,7 +32,7 @@ static inline int should_deliver(const s
  11.220 - int br_dev_queue_push_xmit(struct sk_buff *skb)
  11.221 - {
  11.222 - 	/* drop mtu oversized packets except tso */
  11.223 --	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
  11.224 -+	if (skb->len > skb->dev->mtu && !skb_is_gso(skb))
  11.225 - 		kfree_skb(skb);
  11.226 - 	else {
  11.227 - #ifdef CONFIG_BRIDGE_NETFILTER
  11.228 -diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c ./net/bridge/br_netfilter.c
  11.229 ---- ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c	2006-09-19 13:59:20.000000000 +0100
  11.230 -+++ ./net/bridge/br_netfilter.c	2006-09-19 13:59:46.000000000 +0100
  11.231 -@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
  11.232 - {
  11.233 - 	if (skb->protocol == htons(ETH_P_IP) &&
  11.234 - 	    skb->len > skb->dev->mtu &&
  11.235 --	    !skb_shinfo(skb)->gso_size)
  11.236 -+	    !skb_is_gso(skb))
  11.237 - 		return ip_fragment(skb, br_dev_queue_push_xmit);
  11.238 - 	else
  11.239 - 		return br_dev_queue_push_xmit(skb);
  11.240 -diff -pruN ../orig-linux-2.6.16.29/net/core/dev.c ./net/core/dev.c
  11.241 ---- ../orig-linux-2.6.16.29/net/core/dev.c	2006-09-19 13:59:20.000000000 +0100
  11.242 -+++ ./net/core/dev.c	2006-09-19 13:59:46.000000000 +0100
  11.243 -@@ -1083,9 +1083,17 @@ int skb_checksum_help(struct sk_buff *sk
  11.244 - 	unsigned int csum;
  11.245 - 	int ret = 0, offset = skb->h.raw - skb->data;
  11.246 - 
  11.247 --	if (inward) {
  11.248 --		skb->ip_summed = CHECKSUM_NONE;
  11.249 --		goto out;
  11.250 -+	if (inward)
  11.251 -+		goto out_set_summed;
  11.252 -+
  11.253 -+	if (unlikely(skb_shinfo(skb)->gso_size)) {
  11.254 -+		static int warned;
  11.255 -+
  11.256 -+		WARN_ON(!warned);
  11.257 -+		warned = 1;
  11.258 -+
  11.259 -+		/* Let GSO fix up the checksum. */
  11.260 -+		goto out_set_summed;
  11.261 - 	}
  11.262 - 
  11.263 - 	if (skb_cloned(skb)) {
  11.264 -@@ -1102,6 +1110,8 @@ int skb_checksum_help(struct sk_buff *sk
  11.265 - 	BUG_ON(skb->csum + 2 > offset);
  11.266 - 
  11.267 - 	*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
  11.268 -+
  11.269 -+out_set_summed:
  11.270 - 	skb->ip_summed = CHECKSUM_NONE;
  11.271 - out:	
  11.272 - 	return ret;
  11.273 -@@ -1122,17 +1132,35 @@ struct sk_buff *skb_gso_segment(struct s
  11.274 - 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  11.275 - 	struct packet_type *ptype;
  11.276 - 	int type = skb->protocol;
  11.277 -+	int err;
  11.278 - 
  11.279 - 	BUG_ON(skb_shinfo(skb)->frag_list);
  11.280 --	BUG_ON(skb->ip_summed != CHECKSUM_HW);
  11.281 - 
  11.282 - 	skb->mac.raw = skb->data;
  11.283 - 	skb->mac_len = skb->nh.raw - skb->data;
  11.284 - 	__skb_pull(skb, skb->mac_len);
  11.285 - 
  11.286 -+	if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
  11.287 -+		static int warned;
  11.288 -+
  11.289 -+		WARN_ON(!warned);
  11.290 -+		warned = 1;
  11.291 -+
  11.292 -+		if (skb_header_cloned(skb) &&
  11.293 -+		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
  11.294 -+			return ERR_PTR(err);
  11.295 -+	}
  11.296 -+
  11.297 - 	rcu_read_lock();
  11.298 - 	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
  11.299 - 		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
  11.300 -+			if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
  11.301 -+				err = ptype->gso_send_check(skb);
  11.302 -+				segs = ERR_PTR(err);
  11.303 -+				if (err || skb_gso_ok(skb, features))
  11.304 -+					break;
  11.305 -+				__skb_push(skb, skb->data - skb->nh.raw);
  11.306 -+			}
  11.307 - 			segs = ptype->gso_segment(skb, features);
  11.308 - 			break;
  11.309 - 		}
  11.310 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/af_inet.c ./net/ipv4/af_inet.c
  11.311 ---- ../orig-linux-2.6.16.29/net/ipv4/af_inet.c	2006-09-19 13:59:20.000000000 +0100
  11.312 -+++ ./net/ipv4/af_inet.c	2006-09-19 13:59:46.000000000 +0100
  11.313 -@@ -1085,6 +1085,40 @@ int inet_sk_rebuild_header(struct sock *
  11.314 - 
  11.315 - EXPORT_SYMBOL(inet_sk_rebuild_header);
  11.316 - 
  11.317 -+static int inet_gso_send_check(struct sk_buff *skb)
  11.318 -+{
  11.319 -+	struct iphdr *iph;
  11.320 -+	struct net_protocol *ops;
  11.321 -+	int proto;
  11.322 -+	int ihl;
  11.323 -+	int err = -EINVAL;
  11.324 -+
  11.325 -+	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
  11.326 -+		goto out;
  11.327 -+
  11.328 -+	iph = skb->nh.iph;
  11.329 -+	ihl = iph->ihl * 4;
  11.330 -+	if (ihl < sizeof(*iph))
  11.331 -+		goto out;
  11.332 -+
  11.333 -+	if (unlikely(!pskb_may_pull(skb, ihl)))
  11.334 -+		goto out;
  11.335 -+
  11.336 -+	skb->h.raw = __skb_pull(skb, ihl);
  11.337 -+	iph = skb->nh.iph;
  11.338 -+	proto = iph->protocol & (MAX_INET_PROTOS - 1);
  11.339 -+	err = -EPROTONOSUPPORT;
  11.340 -+
  11.341 -+	rcu_read_lock();
  11.342 -+	ops = rcu_dereference(inet_protos[proto]);
  11.343 -+	if (likely(ops && ops->gso_send_check))
  11.344 -+		err = ops->gso_send_check(skb);
  11.345 -+	rcu_read_unlock();
  11.346 -+
  11.347 -+out:
  11.348 -+	return err;
  11.349 -+}
  11.350 -+
  11.351 - static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
  11.352 - {
  11.353 - 	struct sk_buff *segs = ERR_PTR(-EINVAL);
  11.354 -@@ -1142,6 +1176,7 @@ static struct net_protocol igmp_protocol
  11.355 - static struct net_protocol tcp_protocol = {
  11.356 - 	.handler =	tcp_v4_rcv,
  11.357 - 	.err_handler =	tcp_v4_err,
  11.358 -+	.gso_send_check = tcp_v4_gso_send_check,
  11.359 - 	.gso_segment =	tcp_tso_segment,
  11.360 - 	.no_policy =	1,
  11.361 - };
  11.362 -@@ -1188,6 +1223,7 @@ static int ipv4_proc_init(void);
  11.363 - static struct packet_type ip_packet_type = {
  11.364 - 	.type = __constant_htons(ETH_P_IP),
  11.365 - 	.func = ip_rcv,
  11.366 -+	.gso_send_check = inet_gso_send_check,
  11.367 - 	.gso_segment = inet_gso_segment,
  11.368 - };
  11.369 - 
  11.370 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/ip_output.c ./net/ipv4/ip_output.c
  11.371 ---- ../orig-linux-2.6.16.29/net/ipv4/ip_output.c	2006-09-19 13:59:20.000000000 +0100
  11.372 -+++ ./net/ipv4/ip_output.c	2006-09-19 13:59:46.000000000 +0100
  11.373 -@@ -210,7 +210,7 @@ static inline int ip_finish_output(struc
  11.374 - 		return dst_output(skb);
  11.375 - 	}
  11.376 - #endif
  11.377 --	if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
  11.378 -+	if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
  11.379 - 		return ip_fragment(skb, ip_finish_output2);
  11.380 - 	else
  11.381 - 		return ip_finish_output2(skb);
  11.382 -@@ -1095,7 +1095,7 @@ ssize_t	ip_append_page(struct sock *sk, 
  11.383 - 	while (size > 0) {
  11.384 - 		int i;
  11.385 - 
  11.386 --		if (skb_shinfo(skb)->gso_size)
  11.387 -+		if (skb_is_gso(skb))
  11.388 - 			len = size;
  11.389 - 		else {
  11.390 - 
  11.391 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp_ipv4.c ./net/ipv4/tcp_ipv4.c
  11.392 ---- ../orig-linux-2.6.16.29/net/ipv4/tcp_ipv4.c	2006-09-12 19:02:10.000000000 +0100
  11.393 -+++ ./net/ipv4/tcp_ipv4.c	2006-09-19 13:59:46.000000000 +0100
  11.394 -@@ -495,6 +495,24 @@ void tcp_v4_send_check(struct sock *sk, 
  11.395 - 	}
  11.396 - }
  11.397 - 
  11.398 -+int tcp_v4_gso_send_check(struct sk_buff *skb)
  11.399 -+{
  11.400 -+	struct iphdr *iph;
  11.401 -+	struct tcphdr *th;
  11.402 -+
  11.403 -+	if (!pskb_may_pull(skb, sizeof(*th)))
  11.404 -+		return -EINVAL;
  11.405 -+
  11.406 -+	iph = skb->nh.iph;
  11.407 -+	th = skb->h.th;
  11.408 -+
  11.409 -+	th->check = 0;
  11.410 -+	th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
  11.411 -+	skb->csum = offsetof(struct tcphdr, check);
  11.412 -+	skb->ip_summed = CHECKSUM_HW;
  11.413 -+	return 0;
  11.414 -+}
  11.415 -+
  11.416 - /*
  11.417 -  *	This routine will send an RST to the other tcp.
  11.418 -  *
  11.419 -diff -pruN ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
  11.420 ---- ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c	2006-09-19 13:59:20.000000000 +0100
  11.421 -+++ ./net/ipv4/xfrm4_output.c	2006-09-19 13:59:46.000000000 +0100
  11.422 -@@ -195,7 +195,7 @@ static int xfrm4_output_finish(struct sk
  11.423 - 	}
  11.424 - #endif
  11.425 - 
  11.426 --	if (!skb_shinfo(skb)->gso_size)
  11.427 -+	if (!skb_is_gso(skb))
  11.428 - 		return xfrm4_output_finish2(skb);
  11.429 - 
  11.430 - 	skb->protocol = htons(ETH_P_IP);
  11.431 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/ip6_output.c ./net/ipv6/ip6_output.c
  11.432 ---- ../orig-linux-2.6.16.29/net/ipv6/ip6_output.c	2006-09-19 13:59:20.000000000 +0100
  11.433 -+++ ./net/ipv6/ip6_output.c	2006-09-19 13:59:46.000000000 +0100
  11.434 -@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s
  11.435 - 
  11.436 - int ip6_output(struct sk_buff *skb)
  11.437 - {
  11.438 --	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
  11.439 -+	if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
  11.440 - 				dst_allfrag(skb->dst))
  11.441 - 		return ip6_fragment(skb, ip6_output2);
  11.442 - 	else
  11.443 -diff -pruN ../orig-linux-2.6.16.29/net/ipv6/xfrm6_output.c ./net/ipv6/xfrm6_output.c
  11.444 ---- ../orig-linux-2.6.16.29/net/ipv6/xfrm6_output.c	2006-09-19 13:59:20.000000000 +0100
  11.445 -+++ ./net/ipv6/xfrm6_output.c	2006-09-19 13:59:46.000000000 +0100
  11.446 -@@ -179,7 +179,7 @@ static int xfrm6_output_finish(struct sk
  11.447 - {
  11.448 - 	struct sk_buff *segs;
  11.449 - 
  11.450 --	if (!skb_shinfo(skb)->gso_size)
  11.451 -+	if (!skb_is_gso(skb))
  11.452 - 		return xfrm6_output_finish2(skb);
  11.453 - 
  11.454 - 	skb->protocol = htons(ETH_P_IP);
    12.1 --- a/patches/linux-2.6.16.29/net-gso-3-fix-errorcheck.patch	Mon Nov 27 13:22:21 2006 +0000
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,17 +0,0 @@
    12.4 -diff -pruN ../orig-linux-2.6.16.29/include/linux/netdevice.h ./include/linux/netdevice.h
    12.5 ---- ../orig-linux-2.6.16.29/include/linux/netdevice.h	2006-09-19 13:59:46.000000000 +0100
    12.6 -+++ ./include/linux/netdevice.h	2006-09-19 14:05:28.000000000 +0100
    12.7 -@@ -930,10 +930,10 @@ static inline void netif_tx_lock_bh(stru
    12.8 - 
    12.9 - static inline int netif_tx_trylock(struct net_device *dev)
   12.10 - {
   12.11 --	int err = spin_trylock(&dev->_xmit_lock);
   12.12 --	if (!err)
   12.13 -+	int ok = spin_trylock(&dev->_xmit_lock);
   12.14 -+	if (likely(ok))
   12.15 - 		dev->xmit_lock_owner = smp_processor_id();
   12.16 --	return err;
   12.17 -+	return ok;
   12.18 - }
   12.19 - 
   12.20 - static inline void netif_tx_unlock(struct net_device *dev)
    13.1 --- a/patches/linux-2.6.16.29/net-gso-4-kill-warnon.patch	Mon Nov 27 13:22:21 2006 +0000
    13.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.3 @@ -1,27 +0,0 @@
    13.4 -diff -pruN ../orig-linux-2.6.16.29/net/core/dev.c ./net/core/dev.c
    13.5 ---- ../orig-linux-2.6.16.29/net/core/dev.c	2006-09-19 13:59:46.000000000 +0100
    13.6 -+++ ./net/core/dev.c	2006-09-19 14:05:32.000000000 +0100
    13.7 -@@ -1087,11 +1087,6 @@ int skb_checksum_help(struct sk_buff *sk
    13.8 - 		goto out_set_summed;
    13.9 - 
   13.10 - 	if (unlikely(skb_shinfo(skb)->gso_size)) {
   13.11 --		static int warned;
   13.12 --
   13.13 --		WARN_ON(!warned);
   13.14 --		warned = 1;
   13.15 --
   13.16 - 		/* Let GSO fix up the checksum. */
   13.17 - 		goto out_set_summed;
   13.18 - 	}
   13.19 -@@ -1141,11 +1136,6 @@ struct sk_buff *skb_gso_segment(struct s
   13.20 - 	__skb_pull(skb, skb->mac_len);
   13.21 - 
   13.22 - 	if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
   13.23 --		static int warned;
   13.24 --
   13.25 --		WARN_ON(!warned);
   13.26 --		warned = 1;
   13.27 --
   13.28 - 		if (skb_header_cloned(skb) &&
   13.29 - 		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
   13.30 - 			return ERR_PTR(err);
    14.1 --- a/patches/linux-2.6.16.29/net-gso-5-rcv-mss.patch	Mon Nov 27 13:22:21 2006 +0000
    14.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.3 @@ -1,13 +0,0 @@
    14.4 -diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
    14.5 -index 104af5d..1fa1536 100644
    14.6 ---- a/net/ipv4/tcp_input.c
    14.7 -+++ b/net/ipv4/tcp_input.c
    14.8 -@@ -127,7 +127,7 @@ static void tcp_measure_rcv_mss(struct s
    14.9 - 	/* skb->len may jitter because of SACKs, even if peer
   14.10 - 	 * sends good full-sized frames.
   14.11 - 	 */
   14.12 --	len = skb->len;
   14.13 -+	len = skb_shinfo(skb)->gso_size ?: skb->len;
   14.14 - 	if (len >= icsk->icsk_ack.rcv_mss) {
   14.15 - 		icsk->icsk_ack.rcv_mss = len;
   14.16 - 	} else {
    15.1 --- a/patches/linux-2.6.16.29/pci-mmconfig-fix-from-2.6.17.patch	Mon Nov 27 13:22:21 2006 +0000
    15.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.3 @@ -1,292 +0,0 @@
    15.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/pci/mmconfig.c ./arch/i386/pci/mmconfig.c
    15.5 ---- ../orig-linux-2.6.16.29/arch/i386/pci/mmconfig.c	2006-09-12 19:02:10.000000000 +0100
    15.6 -+++ ./arch/i386/pci/mmconfig.c	2006-09-21 09:35:27.000000000 +0100
    15.7 -@@ -12,14 +12,22 @@
    15.8 - #include <linux/pci.h>
    15.9 - #include <linux/init.h>
   15.10 - #include <linux/acpi.h>
   15.11 -+#include <asm/e820.h>
   15.12 - #include "pci.h"
   15.13 - 
   15.14 -+/* aperture is up to 256MB but BIOS may reserve less */
   15.15 -+#define MMCONFIG_APER_MIN	(2 * 1024*1024)
   15.16 -+#define MMCONFIG_APER_MAX	(256 * 1024*1024)
   15.17 -+
   15.18 -+/* Assume systems with more busses have correct MCFG */
   15.19 -+#define MAX_CHECK_BUS 16
   15.20 -+
   15.21 - #define mmcfg_virt_addr ((void __iomem *) fix_to_virt(FIX_PCIE_MCFG))
   15.22 - 
   15.23 - /* The base address of the last MMCONFIG device accessed */
   15.24 - static u32 mmcfg_last_accessed_device;
   15.25 - 
   15.26 --static DECLARE_BITMAP(fallback_slots, 32);
   15.27 -+static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32);
   15.28 - 
   15.29 - /*
   15.30 -  * Functions for accessing PCI configuration space with MMCONFIG accesses
   15.31 -@@ -29,8 +37,8 @@ static u32 get_base_addr(unsigned int se
   15.32 - 	int cfg_num = -1;
   15.33 - 	struct acpi_table_mcfg_config *cfg;
   15.34 - 
   15.35 --	if (seg == 0 && bus == 0 &&
   15.36 --	    test_bit(PCI_SLOT(devfn), fallback_slots))
   15.37 -+	if (seg == 0 && bus < MAX_CHECK_BUS &&
   15.38 -+	    test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots))
   15.39 - 		return 0;
   15.40 - 
   15.41 - 	while (1) {
   15.42 -@@ -74,8 +82,10 @@ static int pci_mmcfg_read(unsigned int s
   15.43 - 	unsigned long flags;
   15.44 - 	u32 base;
   15.45 - 
   15.46 --	if (!value || (bus > 255) || (devfn > 255) || (reg > 4095))
   15.47 -+	if ((bus > 255) || (devfn > 255) || (reg > 4095)) {
   15.48 -+		*value = -1;
   15.49 - 		return -EINVAL;
   15.50 -+	}
   15.51 - 
   15.52 - 	base = get_base_addr(seg, bus, devfn);
   15.53 - 	if (!base)
   15.54 -@@ -146,30 +156,66 @@ static struct pci_raw_ops pci_mmcfg = {
   15.55 -    Normally this can be expressed in the MCFG by not listing them
   15.56 -    and assigning suitable _SEGs, but this isn't implemented in some BIOS.
   15.57 -    Instead try to discover all devices on bus 0 that are unreachable using MM
   15.58 --   and fallback for them.
   15.59 --   We only do this for bus 0/seg 0 */
   15.60 -+   and fallback for them. */
   15.61 - static __init void unreachable_devices(void)
   15.62 - {
   15.63 --	int i;
   15.64 -+	int i, k;
   15.65 - 	unsigned long flags;
   15.66 - 
   15.67 --	for (i = 0; i < 32; i++) {
   15.68 --		u32 val1;
   15.69 --		u32 addr;
   15.70 -+	for (k = 0; k < MAX_CHECK_BUS; k++) {
   15.71 -+		for (i = 0; i < 32; i++) {
   15.72 -+			u32 val1;
   15.73 -+			u32 addr;
   15.74 -+
   15.75 -+			pci_conf1_read(0, k, PCI_DEVFN(i, 0), 0, 4, &val1);
   15.76 -+			if (val1 == 0xffffffff)
   15.77 -+				continue;
   15.78 -+
   15.79 -+			/* Locking probably not needed, but safer */
   15.80 -+			spin_lock_irqsave(&pci_config_lock, flags);
   15.81 -+			addr = get_base_addr(0, k, PCI_DEVFN(i, 0));
   15.82 -+			if (addr != 0)
   15.83 -+				pci_exp_set_dev_base(addr, k, PCI_DEVFN(i, 0));
   15.84 -+			if (addr == 0 ||
   15.85 -+			    readl((u32 __iomem *)mmcfg_virt_addr) != val1) {
   15.86 -+				set_bit(i, fallback_slots);
   15.87 -+				printk(KERN_NOTICE
   15.88 -+			"PCI: No mmconfig possible on %x:%x\n", k, i);
   15.89 -+			}
   15.90 -+			spin_unlock_irqrestore(&pci_config_lock, flags);
   15.91 -+		}
   15.92 -+	}
   15.93 -+}
   15.94 - 
   15.95 --		pci_conf1_read(0, 0, PCI_DEVFN(i, 0), 0, 4, &val1);
   15.96 --		if (val1 == 0xffffffff)
   15.97 -+/* NB. Ripped from arch/i386/kernel/setup.c for this Xen bugfix patch. */
   15.98 -+#ifdef CONFIG_XEN
   15.99 -+extern struct e820map machine_e820;
  15.100 -+#define e820 machine_e820
  15.101 -+#endif
  15.102 -+static int __init
  15.103 -+e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
  15.104 -+{
  15.105 -+	u64 start = s;
  15.106 -+	u64 end = e;
  15.107 -+	int i;
  15.108 -+	for (i = 0; i < e820.nr_map; i++) {
  15.109 -+		struct e820entry *ei = &e820.map[i];
  15.110 -+		if (type && ei->type != type)
  15.111 - 			continue;
  15.112 --
  15.113 --		/* Locking probably not needed, but safer */
  15.114 --		spin_lock_irqsave(&pci_config_lock, flags);
  15.115 --		addr = get_base_addr(0, 0, PCI_DEVFN(i, 0));
  15.116 --		if (addr != 0)
  15.117 --			pci_exp_set_dev_base(addr, 0, PCI_DEVFN(i, 0));
  15.118 --		if (addr == 0 || readl((u32 __iomem *)mmcfg_virt_addr) != val1)
  15.119 --			set_bit(i, fallback_slots);
  15.120 --		spin_unlock_irqrestore(&pci_config_lock, flags);
  15.121 -+		/* is the region (part) in overlap with the current region ?*/
  15.122 -+		if (ei->addr >= end || ei->addr + ei->size <= start)
  15.123 -+			continue;
  15.124 -+		/* if the region is at the beginning of <start,end> we move
  15.125 -+		 * start to the end of the region since it's ok until there
  15.126 -+		 */
  15.127 -+		if (ei->addr <= start)
  15.128 -+			start = ei->addr + ei->size;
  15.129 -+		/* if start is now at or beyond end, we're done, full
  15.130 -+		 * coverage */
  15.131 -+		if (start >= end)
  15.132 -+			return 1; /* we're done */
  15.133 - 	}
  15.134 -+	return 0;
  15.135 - }
  15.136 - 
  15.137 - static int __init pci_mmcfg_init(void)
  15.138 -@@ -183,6 +229,15 @@ static int __init pci_mmcfg_init(void)
  15.139 - 	    (pci_mmcfg_config[0].base_address == 0))
  15.140 - 		goto out;
  15.141 - 
  15.142 -+	if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
  15.143 -+			pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
  15.144 -+			E820_RESERVED)) {
  15.145 -+		printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
  15.146 -+				pci_mmcfg_config[0].base_address);
  15.147 -+		printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
  15.148 -+		goto out;
  15.149 -+	}
  15.150 -+
  15.151 - 	printk(KERN_INFO "PCI: Using MMCONFIG\n");
  15.152 - 	raw_pci_ops = &pci_mmcfg;
  15.153 - 	pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
  15.154 -diff -pruN ../orig-linux-2.6.16.29/arch/x86_64/pci/mmconfig.c ./arch/x86_64/pci/mmconfig.c
  15.155 ---- ../orig-linux-2.6.16.29/arch/x86_64/pci/mmconfig.c	2006-09-12 19:02:10.000000000 +0100
  15.156 -+++ ./arch/x86_64/pci/mmconfig.c	2006-09-21 09:35:40.000000000 +0100
  15.157 -@@ -9,11 +9,19 @@
  15.158 - #include <linux/init.h>
  15.159 - #include <linux/acpi.h>
  15.160 - #include <linux/bitmap.h>
  15.161 -+#include <asm/e820.h>
  15.162 -+
  15.163 - #include "pci.h"
  15.164 - 
  15.165 --#define MMCONFIG_APER_SIZE (256*1024*1024)
  15.166 -+/* aperture is up to 256MB but BIOS may reserve less */
  15.167 -+#define MMCONFIG_APER_MIN	(2 * 1024*1024)
  15.168 -+#define MMCONFIG_APER_MAX	(256 * 1024*1024)
  15.169 -+
  15.170 -+/* Verify the first 16 busses. We assume that systems with more busses
  15.171 -+   get MCFG right. */
  15.172 -+#define MAX_CHECK_BUS 16
  15.173 - 
  15.174 --static DECLARE_BITMAP(fallback_slots, 32);
  15.175 -+static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
  15.176 - 
  15.177 - /* Static virtual mapping of the MMCONFIG aperture */
  15.178 - struct mmcfg_virt {
  15.179 -@@ -55,7 +63,8 @@ static char __iomem *get_virt(unsigned i
  15.180 - static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
  15.181 - {
  15.182 - 	char __iomem *addr;
  15.183 --	if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), &fallback_slots))
  15.184 -+	if (seg == 0 && bus < MAX_CHECK_BUS &&
  15.185 -+		test_bit(32*bus + PCI_SLOT(devfn), fallback_slots))
  15.186 - 		return NULL;
  15.187 - 	addr = get_virt(seg, bus);
  15.188 - 	if (!addr)
  15.189 -@@ -69,8 +78,10 @@ static int pci_mmcfg_read(unsigned int s
  15.190 - 	char __iomem *addr;
  15.191 - 
  15.192 - 	/* Why do we have this when nobody checks it. How about a BUG()!? -AK */
  15.193 --	if (unlikely(!value || (bus > 255) || (devfn > 255) || (reg > 4095)))
  15.194 -+	if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) {
  15.195 -+		*value = -1;
  15.196 - 		return -EINVAL;
  15.197 -+	}
  15.198 - 
  15.199 - 	addr = pci_dev_base(seg, bus, devfn);
  15.200 - 	if (!addr)
  15.201 -@@ -129,23 +140,56 @@ static struct pci_raw_ops pci_mmcfg = {
  15.202 -    Normally this can be expressed in the MCFG by not listing them
  15.203 -    and assigning suitable _SEGs, but this isn't implemented in some BIOS.
  15.204 -    Instead try to discover all devices on bus 0 that are unreachable using MM
  15.205 --   and fallback for them.
  15.206 --   We only do this for bus 0/seg 0 */
  15.207 -+   and fallback for them. */
  15.208 - static __init void unreachable_devices(void)
  15.209 - {
  15.210 --	int i;
  15.211 --	for (i = 0; i < 32; i++) {
  15.212 --		u32 val1;
  15.213 --		char __iomem *addr;
  15.214 -+	int i, k;
  15.215 -+	/* Use the max bus number from ACPI here? */
  15.216 -+	for (k = 0; k < MAX_CHECK_BUS; k++) {
  15.217 -+		for (i = 0; i < 32; i++) {
  15.218 -+			u32 val1;
  15.219 -+			char __iomem *addr;
  15.220 -+
  15.221 -+			pci_conf1_read(0, k, PCI_DEVFN(i,0), 0, 4, &val1);
  15.222 -+			if (val1 == 0xffffffff)
  15.223 -+				continue;
  15.224 -+			addr = pci_dev_base(0, k, PCI_DEVFN(i, 0));
  15.225 -+			if (addr == NULL|| readl(addr) != val1) {
  15.226 -+				set_bit(i + 32*k, fallback_slots);
  15.227 -+				printk(KERN_NOTICE
  15.228 -+				"PCI: No mmconfig possible on device %x:%x\n",
  15.229 -+					k, i);
  15.230 -+			}
  15.231 -+		}
  15.232 -+	}
  15.233 -+}
  15.234 - 
  15.235 --		pci_conf1_read(0, 0, PCI_DEVFN(i,0), 0, 4, &val1);
  15.236 --		if (val1 == 0xffffffff)
  15.237 -+/* NB. Ripped from arch/x86_64/kernel/e820.c for this Xen bugfix patch. */
  15.238 -+#ifdef CONFIG_XEN
  15.239 -+extern struct e820map machine_e820;
  15.240 -+#define e820 machine_e820
  15.241 -+#endif
  15.242 -+static int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
  15.243 -+{
  15.244 -+	int i;
  15.245 -+	for (i = 0; i < e820.nr_map; i++) {
  15.246 -+		struct e820entry *ei = &e820.map[i];
  15.247 -+		if (type && ei->type != type)
  15.248 - 			continue;
  15.249 --		addr = pci_dev_base(0, 0, PCI_DEVFN(i, 0));
  15.250 --		if (addr == NULL|| readl(addr) != val1) {
  15.251 --			set_bit(i, &fallback_slots);
  15.252 --		}
  15.253 -+		/* is the region (part) in overlap with the current region ?*/
  15.254 -+		if (ei->addr >= end || ei->addr + ei->size <= start)
  15.255 -+			continue;
  15.256 -+
  15.257 -+		/* if the region is at the beginning of <start,end> we move
  15.258 -+		 * start to the end of the region since it's ok until there
  15.259 -+		 */
  15.260 -+		if (ei->addr <= start)
  15.261 -+			start = ei->addr + ei->size;
  15.262 -+		/* if start is now at or beyond end, we're done, full coverage */
  15.263 -+		if (start >= end)
  15.264 -+			return 1; /* we're done */
  15.265 - 	}
  15.266 -+	return 0;
  15.267 - }
  15.268 - 
  15.269 - static int __init pci_mmcfg_init(void)
  15.270 -@@ -161,6 +205,15 @@ static int __init pci_mmcfg_init(void)
  15.271 - 	    (pci_mmcfg_config[0].base_address == 0))
  15.272 - 		return 0;
  15.273 - 
  15.274 -+	if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
  15.275 -+			pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
  15.276 -+			E820_RESERVED)) {
  15.277 -+		printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
  15.278 -+				pci_mmcfg_config[0].base_address);
  15.279 -+		printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
  15.280 -+		return 0;
  15.281 -+	}
  15.282 -+
  15.283 - 	/* RED-PEN i386 doesn't do _nocache right now */
  15.284 - 	pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL);
  15.285 - 	if (pci_mmcfg_virt == NULL) {
  15.286 -@@ -169,7 +222,8 @@ static int __init pci_mmcfg_init(void)
  15.287 - 	}
  15.288 - 	for (i = 0; i < pci_mmcfg_config_num; ++i) {
  15.289 - 		pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
  15.290 --		pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, MMCONFIG_APER_SIZE);
  15.291 -+		pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address,
  15.292 -+							 MMCONFIG_APER_MAX);
  15.293 - 		if (!pci_mmcfg_virt[i].virt) {
  15.294 - 			printk("PCI: Cannot map mmconfig aperture for segment %d\n",
  15.295 - 			       pci_mmcfg_config[i].pci_segment_group_number);
    16.1 --- a/patches/linux-2.6.16.29/pmd-shared.patch	Mon Nov 27 13:22:21 2006 +0000
    16.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.3 @@ -1,111 +0,0 @@
    16.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/mm/pageattr.c ./arch/i386/mm/pageattr.c
    16.5 ---- ../orig-linux-2.6.16.29/arch/i386/mm/pageattr.c	2006-09-12 19:02:10.000000000 +0100
    16.6 -+++ ./arch/i386/mm/pageattr.c	2006-09-19 14:05:35.000000000 +0100
    16.7 -@@ -78,7 +78,7 @@ static void set_pmd_pte(pte_t *kpte, uns
    16.8 - 	unsigned long flags;
    16.9 - 
   16.10 - 	set_pte_atomic(kpte, pte); 	/* change init_mm */
   16.11 --	if (PTRS_PER_PMD > 1)
   16.12 -+	if (HAVE_SHARED_KERNEL_PMD)
   16.13 - 		return;
   16.14 - 
   16.15 - 	spin_lock_irqsave(&pgd_lock, flags);
   16.16 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/mm/pgtable.c ./arch/i386/mm/pgtable.c
   16.17 ---- ../orig-linux-2.6.16.29/arch/i386/mm/pgtable.c	2006-09-12 19:02:10.000000000 +0100
   16.18 -+++ ./arch/i386/mm/pgtable.c	2006-09-19 14:05:35.000000000 +0100
   16.19 -@@ -215,9 +215,10 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
   16.20 - 		spin_lock_irqsave(&pgd_lock, flags);
   16.21 - 	}
   16.22 - 
   16.23 --	clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
   16.24 --			swapper_pg_dir + USER_PTRS_PER_PGD,
   16.25 --			KERNEL_PGD_PTRS);
   16.26 -+	if (PTRS_PER_PMD == 1 || HAVE_SHARED_KERNEL_PMD)
   16.27 -+		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
   16.28 -+				swapper_pg_dir + USER_PTRS_PER_PGD,
   16.29 -+				KERNEL_PGD_PTRS);
   16.30 - 	if (PTRS_PER_PMD > 1)
   16.31 - 		return;
   16.32 - 
   16.33 -@@ -249,6 +250,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
   16.34 - 			goto out_oom;
   16.35 - 		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
   16.36 - 	}
   16.37 -+
   16.38 -+	if (!HAVE_SHARED_KERNEL_PMD) {
   16.39 -+		unsigned long flags;
   16.40 -+
   16.41 -+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   16.42 -+			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
   16.43 -+			if (!pmd)
   16.44 -+				goto out_oom;
   16.45 -+			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
   16.46 -+		}
   16.47 -+
   16.48 -+		spin_lock_irqsave(&pgd_lock, flags);
   16.49 -+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   16.50 -+			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
   16.51 -+			pgd_t *kpgd = pgd_offset_k(v);
   16.52 -+			pud_t *kpud = pud_offset(kpgd, v);
   16.53 -+			pmd_t *kpmd = pmd_offset(kpud, v);
   16.54 -+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   16.55 -+			memcpy(pmd, kpmd, PAGE_SIZE);
   16.56 -+		}
   16.57 -+		pgd_list_add(pgd);
   16.58 -+		spin_unlock_irqrestore(&pgd_lock, flags);
   16.59 -+	}
   16.60 -+
   16.61 - 	return pgd;
   16.62 - 
   16.63 - out_oom:
   16.64 -@@ -263,9 +288,23 @@ void pgd_free(pgd_t *pgd)
   16.65 - 	int i;
   16.66 - 
   16.67 - 	/* in the PAE case user pgd entries are overwritten before usage */
   16.68 --	if (PTRS_PER_PMD > 1)
   16.69 --		for (i = 0; i < USER_PTRS_PER_PGD; ++i)
   16.70 --			kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
   16.71 -+	if (PTRS_PER_PMD > 1) {
   16.72 -+		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
   16.73 -+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   16.74 -+			kmem_cache_free(pmd_cache, pmd);
   16.75 -+		}
   16.76 -+		if (!HAVE_SHARED_KERNEL_PMD) {
   16.77 -+			unsigned long flags;
   16.78 -+			spin_lock_irqsave(&pgd_lock, flags);
   16.79 -+			pgd_list_del(pgd);
   16.80 -+			spin_unlock_irqrestore(&pgd_lock, flags);
   16.81 -+			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   16.82 -+				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   16.83 -+				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
   16.84 -+				kmem_cache_free(pmd_cache, pmd);
   16.85 -+			}
   16.86 -+		}
   16.87 -+	}
   16.88 - 	/* in the non-PAE case, free_pgtables() clears user pgd entries */
   16.89 - 	kmem_cache_free(pgd_cache, pgd);
   16.90 - }
   16.91 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/pgtable-2level-defs.h ./include/asm-i386/pgtable-2level-defs.h
   16.92 ---- ../orig-linux-2.6.16.29/include/asm-i386/pgtable-2level-defs.h	2006-09-12 19:02:10.000000000 +0100
   16.93 -+++ ./include/asm-i386/pgtable-2level-defs.h	2006-09-19 14:05:35.000000000 +0100
   16.94 -@@ -1,6 +1,8 @@
   16.95 - #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
   16.96 - #define _I386_PGTABLE_2LEVEL_DEFS_H
   16.97 - 
   16.98 -+#define HAVE_SHARED_KERNEL_PMD 0
   16.99 -+
  16.100 - /*
  16.101 -  * traditional i386 two-level paging structure:
  16.102 -  */
  16.103 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/pgtable-3level-defs.h ./include/asm-i386/pgtable-3level-defs.h
  16.104 ---- ../orig-linux-2.6.16.29/include/asm-i386/pgtable-3level-defs.h	2006-09-12 19:02:10.000000000 +0100
  16.105 -+++ ./include/asm-i386/pgtable-3level-defs.h	2006-09-19 14:05:35.000000000 +0100
  16.106 -@@ -1,6 +1,8 @@
  16.107 - #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
  16.108 - #define _I386_PGTABLE_3LEVEL_DEFS_H
  16.109 - 
  16.110 -+#define HAVE_SHARED_KERNEL_PMD 1
  16.111 -+
  16.112 - /*
  16.113 -  * PGDIR_SHIFT determines what a top-level page table entry can map
  16.114 -  */
    17.1 --- a/patches/linux-2.6.16.29/rcu_needs_cpu.patch	Mon Nov 27 13:22:21 2006 +0000
    17.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.3 @@ -1,35 +0,0 @@
    17.4 -diff -pruN ../orig-linux-2.6.16.29/include/linux/rcupdate.h ./include/linux/rcupdate.h
    17.5 ---- ../orig-linux-2.6.16.29/include/linux/rcupdate.h	2006-09-12 19:02:10.000000000 +0100
    17.6 -+++ ./include/linux/rcupdate.h	2006-09-19 14:05:39.000000000 +0100
    17.7 -@@ -134,6 +134,7 @@ static inline void rcu_bh_qsctr_inc(int 
    17.8 - }
    17.9 - 
   17.10 - extern int rcu_pending(int cpu);
   17.11 -+extern int rcu_needs_cpu(int cpu);
   17.12 - 
   17.13 - /**
   17.14 -  * rcu_read_lock - mark the beginning of an RCU read-side critical section.
   17.15 -diff -pruN ../orig-linux-2.6.16.29/kernel/rcupdate.c ./kernel/rcupdate.c
   17.16 ---- ../orig-linux-2.6.16.29/kernel/rcupdate.c	2006-09-12 19:02:10.000000000 +0100
   17.17 -+++ ./kernel/rcupdate.c	2006-09-19 14:05:39.000000000 +0100
   17.18 -@@ -485,6 +485,20 @@ int rcu_pending(int cpu)
   17.19 - 		__rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
   17.20 - }
   17.21 - 
   17.22 -+/*
   17.23 -+ * Check to see if any future RCU-related work will need to be done
   17.24 -+ * by the current CPU, even if none need be done immediately, returning
   17.25 -+ * 1 if so.  This function is part of the RCU implementation; it is -not-
   17.26 -+ * an exported member of the RCU API.
   17.27 -+ */
   17.28 -+int rcu_needs_cpu(int cpu)
   17.29 -+{
   17.30 -+	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
   17.31 -+	struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
   17.32 -+
   17.33 -+	return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
   17.34 -+}
   17.35 -+
   17.36 - void rcu_check_callbacks(int cpu, int user)
   17.37 - {
   17.38 - 	if (user || 
    18.1 --- a/patches/linux-2.6.16.29/rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch	Mon Nov 27 13:22:21 2006 +0000
    18.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.3 @@ -1,30 +0,0 @@
    18.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/entry.S ./arch/i386/kernel/entry.S
    18.5 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/entry.S	2006-09-12 19:02:10.000000000 +0100
    18.6 -+++ ./arch/i386/kernel/entry.S	2006-09-19 14:05:44.000000000 +0100
    18.7 -@@ -177,7 +177,7 @@ need_resched:
    18.8 - 
    18.9 - 	# sysenter call handler stub
   18.10 - ENTRY(sysenter_entry)
   18.11 --	movl TSS_sysenter_esp0(%esp),%esp
   18.12 -+	movl SYSENTER_stack_esp0(%esp),%esp
   18.13 - sysenter_past_esp:
   18.14 - 	sti
   18.15 - 	pushl $(__USER_DS)
   18.16 -@@ -492,7 +492,7 @@ device_not_available_emulate:
   18.17 -  * that sets up the real kernel stack. Check here, since we can't
   18.18 -  * allow the wrong stack to be used.
   18.19 -  *
   18.20 -- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
   18.21 -+ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
   18.22 -  * already pushed 3 words if it hits on the sysenter instruction:
   18.23 -  * eflags, cs and eip.
   18.24 -  *
   18.25 -@@ -504,7 +504,7 @@ device_not_available_emulate:
   18.26 - 	cmpw $__KERNEL_CS,4(%esp);		\
   18.27 - 	jne ok;					\
   18.28 - label:						\
   18.29 --	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
   18.30 -+	movl SYSENTER_stack_esp0+offset(%esp),%esp;	\
   18.31 - 	pushfl;					\
   18.32 - 	pushl $__KERNEL_CS;			\
   18.33 - 	pushl $sysenter_past_esp
    19.1 --- a/patches/linux-2.6.16.29/series	Mon Nov 27 13:22:21 2006 +0000
    19.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.3 @@ -1,25 +0,0 @@
    19.4 -blktap-aio-16_03_06.patch
    19.5 -device_bind.patch
    19.6 -fix-hz-suspend.patch
    19.7 -fix-ide-cd-pio-mode.patch
    19.8 -i386-mach-io-check-nmi.patch
    19.9 -ipv6-no-autoconf.patch
   19.10 -net-csum.patch
   19.11 -net-gso-0-base.patch
   19.12 -net-gso-1-check-dodgy.patch
   19.13 -net-gso-2-checksum-fix.patch
   19.14 -net-gso-3-fix-errorcheck.patch
   19.15 -net-gso-4-kill-warnon.patch
   19.16 -net-gso-5-rcv-mss.patch
   19.17 -pci-mmconfig-fix-from-2.6.17.patch
   19.18 -pmd-shared.patch
   19.19 -rcu_needs_cpu.patch
   19.20 -rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch
   19.21 -smp-alts.patch
   19.22 -tpm_plugin_2.6.17.patch
   19.23 -x86-increase-interrupt-vector-range.patch
   19.24 -xen-hotplug.patch
   19.25 -xenoprof-generic.patch
   19.26 -x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch
   19.27 -x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch
   19.28 -x86-elfnote-as-preprocessor-macro.patch
    20.1 --- a/patches/linux-2.6.16.29/smp-alts.patch	Mon Nov 27 13:22:21 2006 +0000
    20.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.3 @@ -1,591 +0,0 @@
    20.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/Kconfig ./arch/i386/Kconfig
    20.5 ---- ../orig-linux-2.6.16.29/arch/i386/Kconfig	2006-09-12 19:02:10.000000000 +0100
    20.6 -+++ ./arch/i386/Kconfig	2006-09-19 14:05:48.000000000 +0100
    20.7 -@@ -202,6 +202,19 @@ config SMP
    20.8 - 
    20.9 - 	  If you don't know what to do here, say N.
   20.10 - 
   20.11 -+config SMP_ALTERNATIVES
   20.12 -+	bool "SMP alternatives support (EXPERIMENTAL)"
   20.13 -+	depends on SMP && EXPERIMENTAL
   20.14 -+	help
   20.15 -+	  Try to reduce the overhead of running an SMP kernel on a uniprocessor
   20.16 -+	  host slightly by replacing certain key instruction sequences
   20.17 -+	  according to whether we currently have more than one CPU available.
   20.18 -+	  This should provide a noticeable boost to performance when
   20.19 -+	  running SMP kernels on UP machines, and have negligible impact
   20.20 -+	  when running on an true SMP host.
   20.21 -+
   20.22 -+          If unsure, say N.
   20.23 -+	  
   20.24 - config NR_CPUS
   20.25 - 	int "Maximum number of CPUs (2-255)"
   20.26 - 	range 2 255
   20.27 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/Makefile ./arch/i386/kernel/Makefile
   20.28 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/Makefile	2006-09-12 19:02:10.000000000 +0100
   20.29 -+++ ./arch/i386/kernel/Makefile	2006-09-19 14:05:48.000000000 +0100
   20.30 -@@ -37,6 +37,7 @@ obj-$(CONFIG_EFI) 		+= efi.o efi_stub.o
   20.31 - obj-$(CONFIG_DOUBLEFAULT) 	+= doublefault.o
   20.32 - obj-$(CONFIG_VM86)		+= vm86.o
   20.33 - obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
   20.34 -+obj-$(CONFIG_SMP_ALTERNATIVES)  += smpalts.o
   20.35 - 
   20.36 - EXTRA_AFLAGS   := -traditional
   20.37 - 
   20.38 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/smpalts.c ./arch/i386/kernel/smpalts.c
   20.39 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/smpalts.c	1970-01-01 01:00:00.000000000 +0100
   20.40 -+++ ./arch/i386/kernel/smpalts.c	2006-09-19 14:05:48.000000000 +0100
   20.41 -@@ -0,0 +1,85 @@
   20.42 -+#include <linux/kernel.h>
   20.43 -+#include <asm/system.h>
   20.44 -+#include <asm/smp_alt.h>
   20.45 -+#include <asm/processor.h>
   20.46 -+#include <asm/string.h>
   20.47 -+
   20.48 -+struct smp_replacement_record {
   20.49 -+	unsigned char targ_size;
   20.50 -+	unsigned char smp1_size;
   20.51 -+	unsigned char smp2_size;
   20.52 -+	unsigned char up_size;
   20.53 -+	unsigned char feature;
   20.54 -+	unsigned char data[0];
   20.55 -+};
   20.56 -+
   20.57 -+struct smp_alternative_record {
   20.58 -+	void *targ_start;
   20.59 -+	struct smp_replacement_record *repl;
   20.60 -+};
   20.61 -+
   20.62 -+extern struct smp_alternative_record __start_smp_alternatives_table,
   20.63 -+  __stop_smp_alternatives_table;
   20.64 -+extern unsigned long __init_begin, __init_end;
   20.65 -+
   20.66 -+void prepare_for_smp(void)
   20.67 -+{
   20.68 -+	struct smp_alternative_record *r;
   20.69 -+	printk(KERN_INFO "Enabling SMP...\n");
   20.70 -+	for (r = &__start_smp_alternatives_table;
   20.71 -+	     r != &__stop_smp_alternatives_table;
   20.72 -+	     r++) {
   20.73 -+		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
   20.74 -+		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
   20.75 -+		BUG_ON(r->repl->targ_size < r->repl->up_size);
   20.76 -+               if (system_state == SYSTEM_RUNNING &&
   20.77 -+                   r->targ_start >= (void *)&__init_begin &&
   20.78 -+                   r->targ_start < (void *)&__init_end)
   20.79 -+                       continue;
   20.80 -+		if (r->repl->feature != (unsigned char)-1 &&
   20.81 -+		    boot_cpu_has(r->repl->feature)) {
   20.82 -+			memcpy(r->targ_start,
   20.83 -+			       r->repl->data + r->repl->smp1_size,
   20.84 -+			       r->repl->smp2_size);
   20.85 -+			memset(r->targ_start + r->repl->smp2_size,
   20.86 -+			       0x90,
   20.87 -+			       r->repl->targ_size - r->repl->smp2_size);
   20.88 -+		} else {
   20.89 -+			memcpy(r->targ_start,
   20.90 -+			       r->repl->data,
   20.91 -+			       r->repl->smp1_size);
   20.92 -+			memset(r->targ_start + r->repl->smp1_size,
   20.93 -+			       0x90,
   20.94 -+			       r->repl->targ_size - r->repl->smp1_size);
   20.95 -+		}
   20.96 -+	}
   20.97 -+	/* Paranoia */
   20.98 -+	asm volatile ("jmp 1f\n1:");
   20.99 -+	mb();
  20.100 -+}
  20.101 -+
  20.102 -+void unprepare_for_smp(void)
  20.103 -+{
  20.104 -+	struct smp_alternative_record *r;
  20.105 -+	printk(KERN_INFO "Disabling SMP...\n");
  20.106 -+	for (r = &__start_smp_alternatives_table;
  20.107 -+	     r != &__stop_smp_alternatives_table;
  20.108 -+	     r++) {
  20.109 -+		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
  20.110 -+		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
  20.111 -+		BUG_ON(r->repl->targ_size < r->repl->up_size);
  20.112 -+               if (system_state == SYSTEM_RUNNING &&
  20.113 -+                   r->targ_start >= (void *)&__init_begin &&
  20.114 -+                   r->targ_start < (void *)&__init_end)
  20.115 -+                       continue;
  20.116 -+		memcpy(r->targ_start,
  20.117 -+		       r->repl->data + r->repl->smp1_size + r->repl->smp2_size,
  20.118 -+		       r->repl->up_size);
  20.119 -+		memset(r->targ_start + r->repl->up_size,
  20.120 -+		       0x90,
  20.121 -+		       r->repl->targ_size - r->repl->up_size);
  20.122 -+	}
  20.123 -+	/* Paranoia */
  20.124 -+	asm volatile ("jmp 1f\n1:");
  20.125 -+	mb();
  20.126 -+}
  20.127 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/smpboot.c ./arch/i386/kernel/smpboot.c
  20.128 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/smpboot.c	2006-09-12 19:02:10.000000000 +0100
  20.129 -+++ ./arch/i386/kernel/smpboot.c	2006-09-19 14:05:48.000000000 +0100
  20.130 -@@ -1218,6 +1218,11 @@ static void __init smp_boot_cpus(unsigne
  20.131 - 		if (max_cpus <= cpucount+1)
  20.132 - 			continue;
  20.133 - 
  20.134 -+#ifdef CONFIG_SMP_ALTERNATIVES
  20.135 -+		if (kicked == 1)
  20.136 -+			prepare_for_smp();
  20.137 -+#endif
  20.138 -+
  20.139 - 		if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
  20.140 - 			printk("CPU #%d not responding - cannot use it.\n",
  20.141 - 								apicid);
  20.142 -@@ -1396,6 +1401,11 @@ int __devinit __cpu_up(unsigned int cpu)
  20.143 - 		return -EIO;
  20.144 - 	}
  20.145 - 
  20.146 -+#ifdef CONFIG_SMP_ALTERNATIVES
  20.147 -+	if (num_online_cpus() == 1)
  20.148 -+		prepare_for_smp();
  20.149 -+#endif
  20.150 -+
  20.151 - 	local_irq_enable();
  20.152 - 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
  20.153 - 	/* Unleash the CPU! */
  20.154 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/vmlinux.lds.S ./arch/i386/kernel/vmlinux.lds.S
  20.155 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/vmlinux.lds.S	2006-09-12 19:02:10.000000000 +0100
  20.156 -+++ ./arch/i386/kernel/vmlinux.lds.S	2006-09-19 14:05:48.000000000 +0100
  20.157 -@@ -34,6 +34,13 @@ SECTIONS
  20.158 -   __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
  20.159 -   __stop___ex_table = .;
  20.160 - 
  20.161 -+  . = ALIGN(16);
  20.162 -+  __start_smp_alternatives_table = .;
  20.163 -+  __smp_alternatives : { *(__smp_alternatives) }
  20.164 -+  __stop_smp_alternatives_table = .;
  20.165 -+
  20.166 -+  __smp_replacements : { *(__smp_replacements) }
  20.167 -+
  20.168 -   RODATA
  20.169 - 
  20.170 -   /* writeable */
  20.171 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/atomic.h ./include/asm-i386/atomic.h
  20.172 ---- ../orig-linux-2.6.16.29/include/asm-i386/atomic.h	2006-09-12 19:02:10.000000000 +0100
  20.173 -+++ ./include/asm-i386/atomic.h	2006-09-19 14:05:48.000000000 +0100
  20.174 -@@ -4,18 +4,13 @@
  20.175 - #include <linux/config.h>
  20.176 - #include <linux/compiler.h>
  20.177 - #include <asm/processor.h>
  20.178 -+#include <asm/smp_alt.h>
  20.179 - 
  20.180 - /*
  20.181 -  * Atomic operations that C can't guarantee us.  Useful for
  20.182 -  * resource counting etc..
  20.183 -  */
  20.184 - 
  20.185 --#ifdef CONFIG_SMP
  20.186 --#define LOCK "lock ; "
  20.187 --#else
  20.188 --#define LOCK ""
  20.189 --#endif
  20.190 --
  20.191 - /*
  20.192 -  * Make sure gcc doesn't try to be clever and move things around
  20.193 -  * on us. We need to use _exactly_ the address the user gave us,
  20.194 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/bitops.h ./include/asm-i386/bitops.h
  20.195 ---- ../orig-linux-2.6.16.29/include/asm-i386/bitops.h	2006-09-12 19:02:10.000000000 +0100
  20.196 -+++ ./include/asm-i386/bitops.h	2006-09-19 14:05:48.000000000 +0100
  20.197 -@@ -7,6 +7,7 @@
  20.198 - 
  20.199 - #include <linux/config.h>
  20.200 - #include <linux/compiler.h>
  20.201 -+#include <asm/smp_alt.h>
  20.202 - 
  20.203 - /*
  20.204 -  * These have to be done with inline assembly: that way the bit-setting
  20.205 -@@ -16,12 +17,6 @@
  20.206 -  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  20.207 -  */
  20.208 - 
  20.209 --#ifdef CONFIG_SMP
  20.210 --#define LOCK_PREFIX "lock ; "
  20.211 --#else
  20.212 --#define LOCK_PREFIX ""
  20.213 --#endif
  20.214 --
  20.215 - #define ADDR (*(volatile long *) addr)
  20.216 - 
  20.217 - /**
  20.218 -@@ -41,7 +36,7 @@
  20.219 -  */
  20.220 - static inline void set_bit(int nr, volatile unsigned long * addr)
  20.221 - {
  20.222 --	__asm__ __volatile__( LOCK_PREFIX
  20.223 -+	__asm__ __volatile__( LOCK
  20.224 - 		"btsl %1,%0"
  20.225 - 		:"+m" (ADDR)
  20.226 - 		:"Ir" (nr));
  20.227 -@@ -76,7 +71,7 @@ static inline void __set_bit(int nr, vol
  20.228 -  */
  20.229 - static inline void clear_bit(int nr, volatile unsigned long * addr)
  20.230 - {
  20.231 --	__asm__ __volatile__( LOCK_PREFIX
  20.232 -+	__asm__ __volatile__( LOCK
  20.233 - 		"btrl %1,%0"
  20.234 - 		:"+m" (ADDR)
  20.235 - 		:"Ir" (nr));
  20.236 -@@ -121,7 +116,7 @@ static inline void __change_bit(int nr, 
  20.237 -  */
  20.238 - static inline void change_bit(int nr, volatile unsigned long * addr)
  20.239 - {
  20.240 --	__asm__ __volatile__( LOCK_PREFIX
  20.241 -+	__asm__ __volatile__( LOCK
  20.242 - 		"btcl %1,%0"
  20.243 - 		:"+m" (ADDR)
  20.244 - 		:"Ir" (nr));
  20.245 -@@ -140,7 +135,7 @@ static inline int test_and_set_bit(int n
  20.246 - {
  20.247 - 	int oldbit;
  20.248 - 
  20.249 --	__asm__ __volatile__( LOCK_PREFIX
  20.250 -+	__asm__ __volatile__( LOCK
  20.251 - 		"btsl %2,%1\n\tsbbl %0,%0"
  20.252 - 		:"=r" (oldbit),"+m" (ADDR)
  20.253 - 		:"Ir" (nr) : "memory");
  20.254 -@@ -180,7 +175,7 @@ static inline int test_and_clear_bit(int
  20.255 - {
  20.256 - 	int oldbit;
  20.257 - 
  20.258 --	__asm__ __volatile__( LOCK_PREFIX
  20.259 -+	__asm__ __volatile__( LOCK
  20.260 - 		"btrl %2,%1\n\tsbbl %0,%0"
  20.261 - 		:"=r" (oldbit),"+m" (ADDR)
  20.262 - 		:"Ir" (nr) : "memory");
  20.263 -@@ -231,7 +226,7 @@ static inline int test_and_change_bit(in
  20.264 - {
  20.265 - 	int oldbit;
  20.266 - 
  20.267 --	__asm__ __volatile__( LOCK_PREFIX
  20.268 -+	__asm__ __volatile__( LOCK
  20.269 - 		"btcl %2,%1\n\tsbbl %0,%0"
  20.270 - 		:"=r" (oldbit),"+m" (ADDR)
  20.271 - 		:"Ir" (nr) : "memory");
  20.272 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/futex.h ./include/asm-i386/futex.h
  20.273 ---- ../orig-linux-2.6.16.29/include/asm-i386/futex.h	2006-09-12 19:02:10.000000000 +0100
  20.274 -+++ ./include/asm-i386/futex.h	2006-09-19 14:05:48.000000000 +0100
  20.275 -@@ -28,7 +28,7 @@
  20.276 - "1:	movl	%2, %0\n\
  20.277 - 	movl	%0, %3\n"					\
  20.278 - 	insn "\n"						\
  20.279 --"2:	" LOCK_PREFIX "cmpxchgl %3, %2\n\
  20.280 -+"2:	" LOCK "cmpxchgl %3, %2\n\
  20.281 - 	jnz	1b\n\
  20.282 - 3:	.section .fixup,\"ax\"\n\
  20.283 - 4:	mov	%5, %1\n\
  20.284 -@@ -68,7 +68,7 @@ futex_atomic_op_inuser (int encoded_op, 
  20.285 - #endif
  20.286 - 		switch (op) {
  20.287 - 		case FUTEX_OP_ADD:
  20.288 --			__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret,
  20.289 -+			__futex_atomic_op1(LOCK "xaddl %0, %2", ret,
  20.290 - 					   oldval, uaddr, oparg);
  20.291 - 			break;
  20.292 - 		case FUTEX_OP_OR:
  20.293 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/rwsem.h ./include/asm-i386/rwsem.h
  20.294 ---- ../orig-linux-2.6.16.29/include/asm-i386/rwsem.h	2006-09-12 19:02:10.000000000 +0100
  20.295 -+++ ./include/asm-i386/rwsem.h	2006-09-19 14:05:48.000000000 +0100
  20.296 -@@ -40,6 +40,7 @@
  20.297 - 
  20.298 - #include <linux/list.h>
  20.299 - #include <linux/spinlock.h>
  20.300 -+#include <asm/smp_alt.h>
  20.301 - 
  20.302 - struct rwsem_waiter;
  20.303 - 
  20.304 -@@ -99,7 +100,7 @@ static inline void __down_read(struct rw
  20.305 - {
  20.306 - 	__asm__ __volatile__(
  20.307 - 		"# beginning down_read\n\t"
  20.308 --LOCK_PREFIX	"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
  20.309 -+LOCK	        "  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
  20.310 - 		"  js        2f\n\t" /* jump if we weren't granted the lock */
  20.311 - 		"1:\n\t"
  20.312 - 		LOCK_SECTION_START("")
  20.313 -@@ -130,7 +131,7 @@ static inline int __down_read_trylock(st
  20.314 - 		"  movl	     %1,%2\n\t"
  20.315 - 		"  addl      %3,%2\n\t"
  20.316 - 		"  jle	     2f\n\t"
  20.317 --LOCK_PREFIX	"  cmpxchgl  %2,%0\n\t"
  20.318 -+LOCK	        "  cmpxchgl  %2,%0\n\t"
  20.319 - 		"  jnz	     1b\n\t"
  20.320 - 		"2:\n\t"
  20.321 - 		"# ending __down_read_trylock\n\t"
  20.322 -@@ -150,7 +151,7 @@ static inline void __down_write(struct r
  20.323 - 	tmp = RWSEM_ACTIVE_WRITE_BIAS;
  20.324 - 	__asm__ __volatile__(
  20.325 - 		"# beginning down_write\n\t"
  20.326 --LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
  20.327 -+LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
  20.328 - 		"  testl     %%edx,%%edx\n\t" /* was the count 0 before? */
  20.329 - 		"  jnz       2f\n\t" /* jump if we weren't granted the lock */
  20.330 - 		"1:\n\t"
  20.331 -@@ -188,7 +189,7 @@ static inline void __up_read(struct rw_s
  20.332 - 	__s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
  20.333 - 	__asm__ __volatile__(
  20.334 - 		"# beginning __up_read\n\t"
  20.335 --LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
  20.336 -+LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
  20.337 - 		"  js        2f\n\t" /* jump if the lock is being waited upon */
  20.338 - 		"1:\n\t"
  20.339 - 		LOCK_SECTION_START("")
  20.340 -@@ -214,7 +215,7 @@ static inline void __up_write(struct rw_
  20.341 - 	__asm__ __volatile__(
  20.342 - 		"# beginning __up_write\n\t"
  20.343 - 		"  movl      %2,%%edx\n\t"
  20.344 --LOCK_PREFIX	"  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
  20.345 -+LOCK	        "  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
  20.346 - 		"  jnz       2f\n\t" /* jump if the lock is being waited upon */
  20.347 - 		"1:\n\t"
  20.348 - 		LOCK_SECTION_START("")
  20.349 -@@ -239,7 +240,7 @@ static inline void __downgrade_write(str
  20.350 - {
  20.351 - 	__asm__ __volatile__(
  20.352 - 		"# beginning __downgrade_write\n\t"
  20.353 --LOCK_PREFIX	"  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
  20.354 -+LOCK	        "  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
  20.355 - 		"  js        2f\n\t" /* jump if the lock is being waited upon */
  20.356 - 		"1:\n\t"
  20.357 - 		LOCK_SECTION_START("")
  20.358 -@@ -263,7 +264,7 @@ LOCK_PREFIX	"  addl      %2,(%%eax)\n\t"
  20.359 - static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  20.360 - {
  20.361 - 	__asm__ __volatile__(
  20.362 --LOCK_PREFIX	"addl %1,%0"
  20.363 -+LOCK	          "addl %1,%0"
  20.364 - 		: "=m"(sem->count)
  20.365 - 		: "ir"(delta), "m"(sem->count));
  20.366 - }
  20.367 -@@ -276,7 +277,7 @@ static inline int rwsem_atomic_update(in
  20.368 - 	int tmp = delta;
  20.369 - 
  20.370 - 	__asm__ __volatile__(
  20.371 --LOCK_PREFIX	"xadd %0,(%2)"
  20.372 -+LOCK  	          "xadd %0,(%2)"
  20.373 - 		: "+r"(tmp), "=m"(sem->count)
  20.374 - 		: "r"(sem), "m"(sem->count)
  20.375 - 		: "memory");
  20.376 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/smp_alt.h ./include/asm-i386/smp_alt.h
  20.377 ---- ../orig-linux-2.6.16.29/include/asm-i386/smp_alt.h	1970-01-01 01:00:00.000000000 +0100
  20.378 -+++ ./include/asm-i386/smp_alt.h	2006-09-19 14:05:48.000000000 +0100
  20.379 -@@ -0,0 +1,32 @@
  20.380 -+#ifndef __ASM_SMP_ALT_H__
  20.381 -+#define __ASM_SMP_ALT_H__
  20.382 -+
  20.383 -+#include <linux/config.h>
  20.384 -+
  20.385 -+#ifdef CONFIG_SMP
  20.386 -+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
  20.387 -+#define LOCK \
  20.388 -+        "6677: nop\n" \
  20.389 -+	".section __smp_alternatives,\"a\"\n" \
  20.390 -+	".long 6677b\n" \
  20.391 -+	".long 6678f\n" \
  20.392 -+	".previous\n" \
  20.393 -+	".section __smp_replacements,\"a\"\n" \
  20.394 -+	"6678: .byte 1\n" \
  20.395 -+	".byte 1\n" \
  20.396 -+	".byte 0\n" \
  20.397 -+        ".byte 1\n" \
  20.398 -+	".byte -1\n" \
  20.399 -+	"lock\n" \
  20.400 -+	"nop\n" \
  20.401 -+	".previous\n"
  20.402 -+void prepare_for_smp(void);
  20.403 -+void unprepare_for_smp(void);
  20.404 -+#else
  20.405 -+#define LOCK "lock ; "
  20.406 -+#endif
  20.407 -+#else
  20.408 -+#define LOCK ""
  20.409 -+#endif
  20.410 -+
  20.411 -+#endif /* __ASM_SMP_ALT_H__ */
  20.412 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/spinlock.h ./include/asm-i386/spinlock.h
  20.413 ---- ../orig-linux-2.6.16.29/include/asm-i386/spinlock.h	2006-09-12 19:02:10.000000000 +0100
  20.414 -+++ ./include/asm-i386/spinlock.h	2006-09-19 14:05:48.000000000 +0100
  20.415 -@@ -6,6 +6,7 @@
  20.416 - #include <asm/page.h>
  20.417 - #include <linux/config.h>
  20.418 - #include <linux/compiler.h>
  20.419 -+#include <asm/smp_alt.h>
  20.420 - 
  20.421 - /*
  20.422 -  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  20.423 -@@ -23,7 +24,8 @@
  20.424 - 
  20.425 - #define __raw_spin_lock_string \
  20.426 - 	"\n1:\t" \
  20.427 --	"lock ; decb %0\n\t" \
  20.428 -+	LOCK \
  20.429 -+	"decb %0\n\t" \
  20.430 - 	"jns 3f\n" \
  20.431 - 	"2:\t" \
  20.432 - 	"rep;nop\n\t" \
  20.433 -@@ -34,7 +36,8 @@
  20.434 - 
  20.435 - #define __raw_spin_lock_string_flags \
  20.436 - 	"\n1:\t" \
  20.437 --	"lock ; decb %0\n\t" \
  20.438 -+	LOCK \
  20.439 -+	"decb %0\n\t" \
  20.440 - 	"jns 4f\n\t" \
  20.441 - 	"2:\t" \
  20.442 - 	"testl $0x200, %1\n\t" \
  20.443 -@@ -65,10 +68,34 @@ static inline void __raw_spin_lock_flags
  20.444 - static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  20.445 - {
  20.446 - 	char oldval;
  20.447 -+#ifdef CONFIG_SMP_ALTERNATIVES
  20.448 - 	__asm__ __volatile__(
  20.449 --		"xchgb %b0,%1"
  20.450 -+		"1:movb %1,%b0\n"
  20.451 -+		"movb $0,%1\n"
  20.452 -+		"2:"
  20.453 -+		".section __smp_alternatives,\"a\"\n"
  20.454 -+		".long 1b\n"
  20.455 -+		".long 3f\n"
  20.456 -+		".previous\n"
  20.457 -+		".section __smp_replacements,\"a\"\n"
  20.458 -+		"3: .byte 2b - 1b\n"
  20.459 -+		".byte 5f-4f\n"
  20.460 -+		".byte 0\n"
  20.461 -+		".byte 6f-5f\n"
  20.462 -+		".byte -1\n"
  20.463 -+		"4: xchgb %b0,%1\n"
  20.464 -+		"5: movb %1,%b0\n"
  20.465 -+		"movb $0,%1\n"
  20.466 -+		"6:\n"
  20.467 -+		".previous\n"
  20.468 - 		:"=q" (oldval), "=m" (lock->slock)
  20.469 - 		:"0" (0) : "memory");
  20.470 -+#else
  20.471 -+	__asm__ __volatile__(
  20.472 -+		"xchgb %b0,%1\n"
  20.473 -+		:"=q" (oldval), "=m" (lock->slock)
  20.474 -+		:"0" (0) : "memory");
  20.475 -+#endif
  20.476 - 	return oldval > 0;
  20.477 - }
  20.478 - 
  20.479 -@@ -178,12 +205,12 @@ static inline int __raw_write_trylock(ra
  20.480 - 
  20.481 - static inline void __raw_read_unlock(raw_rwlock_t *rw)
  20.482 - {
  20.483 --	asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
  20.484 -+	asm volatile(LOCK "incl %0" :"=m" (rw->lock) : : "memory");
  20.485 - }
  20.486 - 
  20.487 - static inline void __raw_write_unlock(raw_rwlock_t *rw)
  20.488 - {
  20.489 --	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
  20.490 -+	asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ", %0"
  20.491 - 				 : "=m" (rw->lock) : : "memory");
  20.492 - }
  20.493 - 
  20.494 -diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/system.h ./include/asm-i386/system.h
  20.495 ---- ../orig-linux-2.6.16.29/include/asm-i386/system.h	2006-09-12 19:02:10.000000000 +0100
  20.496 -+++ ./include/asm-i386/system.h	2006-09-19 14:05:48.000000000 +0100
  20.497 -@@ -5,7 +5,7 @@
  20.498 - #include <linux/kernel.h>
  20.499 - #include <asm/segment.h>
  20.500 - #include <asm/cpufeature.h>
  20.501 --#include <linux/bitops.h> /* for LOCK_PREFIX */
  20.502 -+#include <asm/smp_alt.h>
  20.503 - 
  20.504 - #ifdef __KERNEL__
  20.505 - 
  20.506 -@@ -271,19 +271,19 @@ static inline unsigned long __cmpxchg(vo
  20.507 - 	unsigned long prev;
  20.508 - 	switch (size) {
  20.509 - 	case 1:
  20.510 --		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
  20.511 -+		__asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
  20.512 - 				     : "=a"(prev)
  20.513 - 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
  20.514 - 				     : "memory");
  20.515 - 		return prev;
  20.516 - 	case 2:
  20.517 --		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
  20.518 -+		__asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
  20.519 - 				     : "=a"(prev)
  20.520 - 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
  20.521 - 				     : "memory");
  20.522 - 		return prev;
  20.523 - 	case 4:
  20.524 --		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
  20.525 -+		__asm__ __volatile__(LOCK "cmpxchgl %1,%2"
  20.526 - 				     : "=a"(prev)
  20.527 - 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
  20.528 - 				     : "memory");
  20.529 -@@ -336,7 +336,7 @@ static inline unsigned long long __cmpxc
  20.530 - 				      unsigned long long new)
  20.531 - {
  20.532 - 	unsigned long long prev;
  20.533 --	__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
  20.534 -+	__asm__ __volatile__(LOCK "cmpxchg8b %3"
  20.535 - 			     : "=A"(prev)
  20.536 - 			     : "b"((unsigned long)new),
  20.537 - 			       "c"((unsigned long)(new >> 32)),
  20.538 -@@ -503,11 +503,55 @@ struct alt_instr { 
  20.539 - #endif
  20.540 - 
  20.541 - #ifdef CONFIG_SMP
  20.542 -+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
  20.543 -+#define smp_alt_mb(instr)                                           \
  20.544 -+__asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
  20.545 -+		     ".section __smp_alternatives,\"a\"\n"          \
  20.546 -+		     ".long 6667b\n"                                \
  20.547 -+                     ".long 6673f\n"                                \
  20.548 -+		     ".previous\n"                                  \
  20.549 -+		     ".section __smp_replacements,\"a\"\n"          \
  20.550 -+		     "6673:.byte 6668b-6667b\n"                     \
  20.551 -+		     ".byte 6670f-6669f\n"                          \
  20.552 -+		     ".byte 6671f-6670f\n"                          \
  20.553 -+                     ".byte 0\n"                                    \
  20.554 -+		     ".byte %c0\n"                                  \
  20.555 -+		     "6669:lock;addl $0,0(%%esp)\n"                 \
  20.556 -+		     "6670:" instr "\n"                             \
  20.557 -+		     "6671:\n"                                      \
  20.558 -+		     ".previous\n"                                  \
  20.559 -+		     :                                              \
  20.560 -+		     : "i" (X86_FEATURE_XMM2)                       \
  20.561 -+		     : "memory")
  20.562 -+#define smp_rmb() smp_alt_mb("lfence")
  20.563 -+#define smp_mb()  smp_alt_mb("mfence")
  20.564 -+#define set_mb(var, value) do {                                     \
  20.565 -+unsigned long __set_mb_temp;                                        \
  20.566 -+__asm__ __volatile__("6667:movl %1, %0\n6668:\n"                    \
  20.567 -+		     ".section __smp_alternatives,\"a\"\n"          \
  20.568 -+		     ".long 6667b\n"                                \
  20.569 -+		     ".long 6673f\n"                                \
  20.570 -+		     ".previous\n"                                  \
  20.571 -+		     ".section __smp_replacements,\"a\"\n"          \
  20.572 -+		     "6673: .byte 6668b-6667b\n"                    \
  20.573 -+		     ".byte 6670f-6669f\n"                          \
  20.574 -+		     ".byte 0\n"                                    \
  20.575 -+		     ".byte 6671f-6670f\n"                          \
  20.576 -+		     ".byte -1\n"                                   \
  20.577 -+		     "6669: xchg %1, %0\n"                          \
  20.578 -+		     "6670:movl %1, %0\n"                           \
  20.579 -+		     "6671:\n"                                      \
  20.580 -+		     ".previous\n"                                  \
  20.581 -+		     : "=m" (var), "=r" (__set_mb_temp)             \
  20.582 -+		     : "1" (value)                                  \
  20.583 -+		     : "memory"); } while (0)
  20.584 -+#else
  20.585 - #define smp_mb()	mb()
  20.586 - #define smp_rmb()	rmb()
  20.587 -+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
  20.588 -+#endif
  20.589 - #define smp_wmb()	wmb()
  20.590 - #define smp_read_barrier_depends()	read_barrier_depends()
  20.591 --#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
  20.592 - #else
  20.593 - #define smp_mb()	barrier()
  20.594 - #define smp_rmb()	barrier()
    21.1 --- a/patches/linux-2.6.16.29/tpm_plugin_2.6.17.patch	Mon Nov 27 13:22:21 2006 +0000
    21.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.3 @@ -1,1545 +0,0 @@
    21.4 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_atmel.c ./drivers/char/tpm/tpm_atmel.c
    21.5 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_atmel.c	2006-09-12 19:02:10.000000000 +0100
    21.6 -+++ ./drivers/char/tpm/tpm_atmel.c	2006-09-19 14:05:52.000000000 +0100
    21.7 -@@ -47,12 +47,12 @@ static int tpm_atml_recv(struct tpm_chip
    21.8 - 		return -EIO;
    21.9 - 
   21.10 - 	for (i = 0; i < 6; i++) {
   21.11 --		status = ioread8(chip->vendor->iobase + 1);
   21.12 -+		status = ioread8(chip->vendor.iobase + 1);
   21.13 - 		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   21.14 - 			dev_err(chip->dev, "error reading header\n");
   21.15 - 			return -EIO;
   21.16 - 		}
   21.17 --		*buf++ = ioread8(chip->vendor->iobase);
   21.18 -+		*buf++ = ioread8(chip->vendor.iobase);
   21.19 - 	}
   21.20 - 
   21.21 - 	/* size of the data received */
   21.22 -@@ -63,7 +63,7 @@ static int tpm_atml_recv(struct tpm_chip
   21.23 - 		dev_err(chip->dev,
   21.24 - 			"Recv size(%d) less than available space\n", size);
   21.25 - 		for (; i < size; i++) {	/* clear the waiting data anyway */
   21.26 --			status = ioread8(chip->vendor->iobase + 1);
   21.27 -+			status = ioread8(chip->vendor.iobase + 1);
   21.28 - 			if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   21.29 - 				dev_err(chip->dev, "error reading data\n");
   21.30 - 				return -EIO;
   21.31 -@@ -74,16 +74,16 @@ static int tpm_atml_recv(struct tpm_chip
   21.32 - 
   21.33 - 	/* read all the data available */
   21.34 - 	for (; i < size; i++) {
   21.35 --		status = ioread8(chip->vendor->iobase + 1);
   21.36 -+		status = ioread8(chip->vendor.iobase + 1);
   21.37 - 		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   21.38 - 			dev_err(chip->dev, "error reading data\n");
   21.39 - 			return -EIO;
   21.40 - 		}
   21.41 --		*buf++ = ioread8(chip->vendor->iobase);
   21.42 -+		*buf++ = ioread8(chip->vendor.iobase);
   21.43 - 	}
   21.44 - 
   21.45 - 	/* make sure data available is gone */
   21.46 --	status = ioread8(chip->vendor->iobase + 1);
   21.47 -+	status = ioread8(chip->vendor.iobase + 1);
   21.48 - 
   21.49 - 	if (status & ATML_STATUS_DATA_AVAIL) {
   21.50 - 		dev_err(chip->dev, "data available is stuck\n");
   21.51 -@@ -100,7 +100,7 @@ static int tpm_atml_send(struct tpm_chip
   21.52 - 	dev_dbg(chip->dev, "tpm_atml_send:\n");
   21.53 - 	for (i = 0; i < count; i++) {
   21.54 - 		dev_dbg(chip->dev, "%d 0x%x(%d)\n",  i, buf[i], buf[i]);
   21.55 -- 		iowrite8(buf[i], chip->vendor->iobase);
   21.56 -+ 		iowrite8(buf[i], chip->vendor.iobase);
   21.57 - 	}
   21.58 - 
   21.59 - 	return count;
   21.60 -@@ -108,12 +108,12 @@ static int tpm_atml_send(struct tpm_chip
   21.61 - 
   21.62 - static void tpm_atml_cancel(struct tpm_chip *chip)
   21.63 - {
   21.64 --	iowrite8(ATML_STATUS_ABORT, chip->vendor->iobase + 1);
   21.65 -+	iowrite8(ATML_STATUS_ABORT, chip->vendor.iobase + 1);
   21.66 - }
   21.67 - 
   21.68 - static u8 tpm_atml_status(struct tpm_chip *chip)
   21.69 - {
   21.70 --	return ioread8(chip->vendor->iobase + 1);
   21.71 -+	return ioread8(chip->vendor.iobase + 1);
   21.72 - }
   21.73 - 
   21.74 - static struct file_operations atmel_ops = {
   21.75 -@@ -140,7 +140,7 @@ static struct attribute* atmel_attrs[] =
   21.76 - 
   21.77 - static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs };
   21.78 - 
   21.79 --static struct tpm_vendor_specific tpm_atmel = {
   21.80 -+static const struct tpm_vendor_specific tpm_atmel = {
   21.81 - 	.recv = tpm_atml_recv,
   21.82 - 	.send = tpm_atml_send,
   21.83 - 	.cancel = tpm_atml_cancel,
   21.84 -@@ -159,10 +159,10 @@ static void atml_plat_remove(void)
   21.85 - 	struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
   21.86 - 
   21.87 - 	if (chip) {
   21.88 --		if (chip->vendor->have_region)
   21.89 --			atmel_release_region(chip->vendor->base,
   21.90 --					     chip->vendor->region_size);
   21.91 --		atmel_put_base_addr(chip->vendor);
   21.92 -+		if (chip->vendor.have_region)
   21.93 -+			atmel_release_region(chip->vendor.base,
   21.94 -+					     chip->vendor.region_size);
   21.95 -+		atmel_put_base_addr(chip->vendor.iobase);
   21.96 - 		tpm_remove_hardware(chip->dev);
   21.97 - 		platform_device_unregister(pdev);
   21.98 - 	}
   21.99 -@@ -179,18 +179,22 @@ static struct device_driver atml_drv = {
  21.100 - static int __init init_atmel(void)
  21.101 - {
  21.102 - 	int rc = 0;
  21.103 -+	void __iomem *iobase = NULL;
  21.104 -+	int have_region, region_size;
  21.105 -+	unsigned long base;
  21.106 -+	struct  tpm_chip *chip;
  21.107 - 
  21.108 - 	driver_register(&atml_drv);
  21.109 - 
  21.110 --	if ((tpm_atmel.iobase = atmel_get_base_addr(&tpm_atmel)) == NULL) {
  21.111 -+	if ((iobase = atmel_get_base_addr(&base, &region_size)) == NULL) {
  21.112 - 		rc = -ENODEV;
  21.113 - 		goto err_unreg_drv;
  21.114 - 	}
  21.115 - 
  21.116 --	tpm_atmel.have_region =
  21.117 -+	have_region =
  21.118 - 	    (atmel_request_region
  21.119 --	     (tpm_atmel.base, tpm_atmel.region_size,
  21.120 --	      "tpm_atmel0") == NULL) ? 0 : 1;
  21.121 -+	     (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
  21.122 -+
  21.123 - 
  21.124 - 	if (IS_ERR
  21.125 - 	    (pdev =
  21.126 -@@ -199,17 +203,25 @@ static int __init init_atmel(void)
  21.127 - 		goto err_rel_reg;
  21.128 - 	}
  21.129 - 
  21.130 --	if ((rc = tpm_register_hardware(&pdev->dev, &tpm_atmel)) < 0)
  21.131 -+	if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_atmel))) {
  21.132 -+		rc = -ENODEV;
  21.133 - 		goto err_unreg_dev;
  21.134 -+	}
  21.135 -+
  21.136 -+	chip->vendor.iobase = iobase;
  21.137 -+	chip->vendor.base = base;
  21.138 -+	chip->vendor.have_region = have_region;
  21.139 -+	chip->vendor.region_size = region_size;
  21.140 -+
  21.141 - 	return 0;
  21.142 - 
  21.143 - err_unreg_dev:
  21.144 - 	platform_device_unregister(pdev);
  21.145 - err_rel_reg:
  21.146 --	atmel_put_base_addr(&tpm_atmel);
  21.147 --	if (tpm_atmel.have_region)
  21.148 --		atmel_release_region(tpm_atmel.base,
  21.149 --				     tpm_atmel.region_size);
  21.150 -+	atmel_put_base_addr(iobase);
  21.151 -+	if (have_region)
  21.152 -+		atmel_release_region(base,
  21.153 -+				     region_size);
  21.154 - err_unreg_drv:
  21.155 - 	driver_unregister(&atml_drv);
  21.156 - 	return rc;
  21.157 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_atmel.h ./drivers/char/tpm/tpm_atmel.h
  21.158 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_atmel.h	2006-09-12 19:02:10.000000000 +0100
  21.159 -+++ ./drivers/char/tpm/tpm_atmel.h	2006-09-19 14:05:52.000000000 +0100
  21.160 -@@ -28,13 +28,12 @@
  21.161 - #define atmel_request_region request_mem_region
  21.162 - #define atmel_release_region release_mem_region
  21.163 - 
  21.164 --static inline void atmel_put_base_addr(struct tpm_vendor_specific
  21.165 --					 *vendor)
  21.166 -+static inline void atmel_put_base_addr(void __iomem *iobase)
  21.167 - {
  21.168 --	iounmap(vendor->iobase);
  21.169 -+	iounmap(iobase);
  21.170 - }
  21.171 - 
  21.172 --static void __iomem * atmel_get_base_addr(struct tpm_vendor_specific *vendor)
  21.173 -+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
  21.174 - {
  21.175 - 	struct device_node *dn;
  21.176 - 	unsigned long address, size;
  21.177 -@@ -71,9 +70,9 @@ static void __iomem * atmel_get_base_add
  21.178 - 	else
  21.179 - 		size = reg[naddrc];
  21.180 - 
  21.181 --	vendor->base = address;
  21.182 --	vendor->region_size = size;
  21.183 --	return ioremap(vendor->base, vendor->region_size);
  21.184 -+	*base = address;
  21.185 -+	*region_size = size;
  21.186 -+	return ioremap(*base, *region_size);
  21.187 - }
  21.188 - #else
  21.189 - #define atmel_getb(chip, offset) inb(chip->vendor->base + offset)
  21.190 -@@ -106,14 +105,12 @@ static int atmel_verify_tpm11(void)
  21.191 - 	return 0;
  21.192 - }
  21.193 - 
  21.194 --static inline void atmel_put_base_addr(struct tpm_vendor_specific
  21.195 --					 *vendor)
  21.196 -+static inline void atmel_put_base_addr(void __iomem *iobase)
  21.197 - {
  21.198 - }
  21.199 - 
  21.200 - /* Determine where to talk to device */
  21.201 --static void __iomem * atmel_get_base_addr(struct tpm_vendor_specific
  21.202 --					 *vendor)
  21.203 -+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
  21.204 - {
  21.205 - 	int lo, hi;
  21.206 - 
  21.207 -@@ -123,9 +120,9 @@ static void __iomem * atmel_get_base_add
  21.208 - 	lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
  21.209 - 	hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
  21.210 - 
  21.211 --	vendor->base = (hi << 8) | lo;
  21.212 --	vendor->region_size = 2;
  21.213 -+	*base = (hi << 8) | lo;
  21.214 -+	*region_size = 2;
  21.215 - 
  21.216 --	return ioport_map(vendor->base, vendor->region_size);
  21.217 -+	return ioport_map(*base, *region_size);
  21.218 - }
  21.219 - #endif
  21.220 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_bios.c ./drivers/char/tpm/tpm_bios.c
  21.221 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_bios.c	2006-09-12 19:02:10.000000000 +0100
  21.222 -+++ ./drivers/char/tpm/tpm_bios.c	2006-09-19 14:05:52.000000000 +0100
  21.223 -@@ -29,6 +29,11 @@
  21.224 - #define MAX_TEXT_EVENT		1000	/* Max event string length */
  21.225 - #define ACPI_TCPA_SIG		"TCPA"	/* 0x41504354 /'TCPA' */
  21.226 - 
  21.227 -+enum bios_platform_class {
  21.228 -+	BIOS_CLIENT = 0x00,
  21.229 -+	BIOS_SERVER = 0x01,
  21.230 -+};
  21.231 -+
  21.232 - struct tpm_bios_log {
  21.233 - 	void *bios_event_log;
  21.234 - 	void *bios_event_log_end;
  21.235 -@@ -36,9 +41,18 @@ struct tpm_bios_log {
  21.236 - 
  21.237 - struct acpi_tcpa {
  21.238 - 	struct acpi_table_header hdr;
  21.239 --	u16 reserved;
  21.240 --	u32 log_max_len __attribute__ ((packed));
  21.241 --	u32 log_start_addr __attribute__ ((packed));
  21.242 -+	u16 platform_class;
  21.243 -+	union {
  21.244 -+		struct client_hdr {
  21.245 -+			u32 log_max_len __attribute__ ((packed));
  21.246 -+			u64 log_start_addr __attribute__ ((packed));
  21.247 -+		} client;
  21.248 -+		struct server_hdr {
  21.249 -+			u16 reserved;
  21.250 -+			u64 log_max_len __attribute__ ((packed));
  21.251 -+			u64 log_start_addr __attribute__ ((packed));
  21.252 -+		} server;
  21.253 -+	};
  21.254 - };
  21.255 - 
  21.256 - struct tcpa_event {
  21.257 -@@ -91,6 +105,12 @@ static const char* tcpa_event_type_strin
  21.258 - 	"Non-Host Info"
  21.259 - };
  21.260 - 
  21.261 -+struct tcpa_pc_event {
  21.262 -+	u32 event_id;
  21.263 -+	u32 event_size;
  21.264 -+	u8 event_data[0];
  21.265 -+};
  21.266 -+
  21.267 - enum tcpa_pc_event_ids {
  21.268 - 	SMBIOS = 1,
  21.269 - 	BIS_CERT,
  21.270 -@@ -100,14 +120,15 @@ enum tcpa_pc_event_ids {
  21.271 - 	NVRAM,
  21.272 - 	OPTION_ROM_EXEC,
  21.273 - 	OPTION_ROM_CONFIG,
  21.274 --	OPTION_ROM_MICROCODE,
  21.275 -+	OPTION_ROM_MICROCODE = 10,
  21.276 - 	S_CRTM_VERSION,
  21.277 - 	S_CRTM_CONTENTS,
  21.278 - 	POST_CONTENTS,
  21.279 -+	HOST_TABLE_OF_DEVICES,
  21.280 - };
  21.281 - 
  21.282 - static const char* tcpa_pc_event_id_strings[] = {
  21.283 --	""
  21.284 -+	"",
  21.285 - 	"SMBIOS",
  21.286 - 	"BIS Certificate",
  21.287 - 	"POST BIOS ",
  21.288 -@@ -116,10 +137,12 @@ static const char* tcpa_pc_event_id_stri
  21.289 - 	"NVRAM",
  21.290 - 	"Option ROM",
  21.291 - 	"Option ROM config",
  21.292 --	"Option ROM microcode",
  21.293 -+	"",
  21.294 -+	"Option ROM microcode ",
  21.295 - 	"S-CRTM Version",
  21.296 --	"S-CRTM Contents",
  21.297 --	"S-CRTM POST Contents",
  21.298 -+	"S-CRTM Contents ",
  21.299 -+	"POST Contents ",
  21.300 -+	"Table of Devices",
  21.301 - };
  21.302 - 
  21.303 - /* returns pointer to start of pos. entry of tcg log */
  21.304 -@@ -191,7 +214,7 @@ static int get_event_name(char *dest, st
  21.305 - 	const char *name = "";
  21.306 - 	char data[40] = "";
  21.307 - 	int i, n_len = 0, d_len = 0;
  21.308 --	u32 event_id;
  21.309 -+	struct tcpa_pc_event *pc_event;
  21.310 - 
  21.311 - 	switch(event->event_type) {
  21.312 - 	case PREBOOT:
  21.313 -@@ -220,31 +243,32 @@ static int get_event_name(char *dest, st
  21.314 - 		}
  21.315 - 		break;
  21.316 - 	case EVENT_TAG:
  21.317 --		event_id = be32_to_cpu(*((u32 *)event_entry));
  21.318 -+		pc_event = (struct tcpa_pc_event *)event_entry;
  21.319 - 
  21.320 - 		/* ToDo Row data -> Base64 */
  21.321 - 
  21.322 --		switch (event_id) {
  21.323 -+		switch (pc_event->event_id) {
  21.324 - 		case SMBIOS:
  21.325 - 		case BIS_CERT:
  21.326 - 		case CMOS:
  21.327 - 		case NVRAM:
  21.328 - 		case OPTION_ROM_EXEC:
  21.329 - 		case OPTION_ROM_CONFIG:
  21.330 --		case OPTION_ROM_MICROCODE:
  21.331 - 		case S_CRTM_VERSION:
  21.332 --		case S_CRTM_CONTENTS:
  21.333 --		case POST_CONTENTS:
  21.334 --			name = tcpa_pc_event_id_strings[event_id];
  21.335 -+			name = tcpa_pc_event_id_strings[pc_event->event_id];
  21.336 - 			n_len = strlen(name);
  21.337 - 			break;
  21.338 -+		/* hash data */
  21.339 - 		case POST_BIOS_ROM:
  21.340 - 		case ESCD:
  21.341 --			name = tcpa_pc_event_id_strings[event_id];
  21.342 -+		case OPTION_ROM_MICROCODE:
  21.343 -+		case S_CRTM_CONTENTS:
  21.344 -+		case POST_CONTENTS:
  21.345 -+			name = tcpa_pc_event_id_strings[pc_event->event_id];
  21.346 - 			n_len = strlen(name);
  21.347 - 			for (i = 0; i < 20; i++)
  21.348 --				d_len += sprintf(data, "%02x",
  21.349 --						event_entry[8 + i]);
  21.350 -+				d_len += sprintf(&data[2*i], "%02x",
  21.351 -+						pc_event->event_data[i]);
  21.352 - 			break;
  21.353 - 		default:
  21.354 - 			break;
  21.355 -@@ -260,52 +284,13 @@ static int get_event_name(char *dest, st
  21.356 - 
  21.357 - static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
  21.358 - {
  21.359 -+	struct tcpa_event *event = v;
  21.360 -+	char *data = v;
  21.361 -+	int i;
  21.362 - 
  21.363 --	char *eventname;
  21.364 --	char data[4];
  21.365 --	u32 help;
  21.366 --	int i, len;
  21.367 --	struct tcpa_event *event = (struct tcpa_event *) v;
  21.368 --	unsigned char *event_entry =
  21.369 --	    (unsigned char *) (v + sizeof(struct tcpa_event));
  21.370 --
  21.371 --	eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
  21.372 --	if (!eventname) {
  21.373 --		printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
  21.374 --		       __func__);
  21.375 --		return -ENOMEM;
  21.376 --	}
  21.377 --
  21.378 --	/* 1st: PCR used is in little-endian format (4 bytes) */
  21.379 --	help = le32_to_cpu(event->pcr_index);
  21.380 --	memcpy(data, &help, 4);
  21.381 --	for (i = 0; i < 4; i++)
  21.382 --		seq_putc(m, data[i]);
  21.383 --
  21.384 --	/* 2nd: SHA1 (20 bytes) */
  21.385 --	for (i = 0; i < 20; i++)
  21.386 --		seq_putc(m, event->pcr_value[i]);
  21.387 --
  21.388 --	/* 3rd: event type identifier (4 bytes) */
  21.389 --	help = le32_to_cpu(event->event_type);
  21.390 --	memcpy(data, &help, 4);
  21.391 --	for (i = 0; i < 4; i++)
  21.392 -+	for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
  21.393 - 		seq_putc(m, data[i]);
  21.394 - 
  21.395 --	len = 0;
  21.396 --
  21.397 --	len += get_event_name(eventname, event, event_entry);
  21.398 --
  21.399 --	/* 4th:  filename <= 255 + \'0' delimiter */
  21.400 --	if (len > TCG_EVENT_NAME_LEN_MAX)
  21.401 --		len = TCG_EVENT_NAME_LEN_MAX;
  21.402 --
  21.403 --	for (i = 0; i < len; i++)
  21.404 --		seq_putc(m, eventname[i]);
  21.405 --
  21.406 --	/* 5th: delimiter */
  21.407 --	seq_putc(m, '\0');
  21.408 --
  21.409 - 	return 0;
  21.410 - }
  21.411 - 
  21.412 -@@ -353,6 +338,7 @@ static int tpm_ascii_bios_measurements_s
  21.413 - 	/* 4th: eventname <= max + \'0' delimiter */
  21.414 - 	seq_printf(m, " %s\n", eventname);
  21.415 - 
  21.416 -+	kfree(eventname);
  21.417 - 	return 0;
  21.418 - }
  21.419 - 
  21.420 -@@ -376,6 +362,7 @@ static int read_log(struct tpm_bios_log 
  21.421 - 	struct acpi_tcpa *buff;
  21.422 - 	acpi_status status;
  21.423 - 	struct acpi_table_header *virt;
  21.424 -+	u64 len, start;
  21.425 - 
  21.426 - 	if (log->bios_event_log != NULL) {
  21.427 - 		printk(KERN_ERR
  21.428 -@@ -396,27 +383,37 @@ static int read_log(struct tpm_bios_log 
  21.429 - 		return -EIO;
  21.430 - 	}
  21.431 - 
  21.432 --	if (buff->log_max_len == 0) {
  21.433 -+	switch(buff->platform_class) {
  21.434 -+	case BIOS_SERVER:
  21.435 -+		len = buff->server.log_max_len;
  21.436 -+		start = buff->server.log_start_addr;
  21.437 -+		break;
  21.438 -+	case BIOS_CLIENT:
  21.439 -+	default:
  21.440 -+		len = buff->client.log_max_len;
  21.441 -+		start = buff->client.log_start_addr;
  21.442 -+		break;
  21.443 -+	}
  21.444 -+	if (!len) {
  21.445 - 		printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
  21.446 - 		return -EIO;
  21.447 - 	}
  21.448 - 
  21.449 - 	/* malloc EventLog space */
  21.450 --	log->bios_event_log = kmalloc(buff->log_max_len, GFP_KERNEL);
  21.451 -+	log->bios_event_log = kmalloc(len, GFP_KERNEL);
  21.452 - 	if (!log->bios_event_log) {
  21.453 --		printk
  21.454 --		    ("%s: ERROR - Not enough  Memory for BIOS measurements\n",
  21.455 --		     __func__);
  21.456 -+		printk("%s: ERROR - Not enough  Memory for BIOS measurements\n",
  21.457 -+			__func__);
  21.458 - 		return -ENOMEM;
  21.459 - 	}
  21.460 - 
  21.461 --	log->bios_event_log_end = log->bios_event_log + buff->log_max_len;
  21.462 -+	log->bios_event_log_end = log->bios_event_log + len;
  21.463 - 
  21.464 --	acpi_os_map_memory(buff->log_start_addr, buff->log_max_len, (void *) &virt);
  21.465 -+	acpi_os_map_memory(start, len, (void *) &virt);
  21.466 - 
  21.467 --	memcpy(log->bios_event_log, virt, buff->log_max_len);
  21.468 -+	memcpy(log->bios_event_log, virt, len);
  21.469 - 
  21.470 --	acpi_os_unmap_memory(virt, buff->log_max_len);
  21.471 -+	acpi_os_unmap_memory(virt, len);
  21.472 - 	return 0;
  21.473 - }
  21.474 - 
  21.475 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_infineon.c ./drivers/char/tpm/tpm_infineon.c
  21.476 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_infineon.c	2006-09-12 19:02:10.000000000 +0100
  21.477 -+++ ./drivers/char/tpm/tpm_infineon.c	2006-09-19 14:05:52.000000000 +0100
  21.478 -@@ -15,6 +15,7 @@
  21.479 -  * License.
  21.480 -  */
  21.481 - 
  21.482 -+#include <linux/init.h>
  21.483 - #include <linux/pnp.h>
  21.484 - #include "tpm.h"
  21.485 - 
  21.486 -@@ -104,7 +105,7 @@ static int empty_fifo(struct tpm_chip *c
  21.487 - 
  21.488 - 	if (clear_wrfifo) {
  21.489 - 		for (i = 0; i < 4096; i++) {
  21.490 --			status = inb(chip->vendor->base + WRFIFO);
  21.491 -+			status = inb(chip->vendor.base + WRFIFO);
  21.492 - 			if (status == 0xff) {
  21.493 - 				if (check == 5)
  21.494 - 					break;
  21.495 -@@ -124,8 +125,8 @@ static int empty_fifo(struct tpm_chip *c
  21.496 - 	 */
  21.497 - 	i = 0;
  21.498 - 	do {
  21.499 --		status = inb(chip->vendor->base + RDFIFO);
  21.500 --		status = inb(chip->vendor->base + STAT);
  21.501 -+		status = inb(chip->vendor.base + RDFIFO);
  21.502 -+		status = inb(chip->vendor.base + STAT);
  21.503 - 		i++;
  21.504 - 		if (i == TPM_MAX_TRIES)
  21.505 - 			return -EIO;
  21.506 -@@ -138,7 +139,7 @@ static int wait(struct tpm_chip *chip, i
  21.507 - 	int status;
  21.508 - 	int i;
  21.509 - 	for (i = 0; i < TPM_MAX_TRIES; i++) {
  21.510 --		status = inb(chip->vendor->base + STAT);
  21.511 -+		status = inb(chip->vendor.base + STAT);
  21.512 - 		/* check the status-register if wait_for_bit is set */
  21.513 - 		if (status & 1 << wait_for_bit)
  21.514 - 			break;
  21.515 -@@ -157,7 +158,7 @@ static int wait(struct tpm_chip *chip, i
  21.516 - static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
  21.517 - {
  21.518 - 	wait(chip, STAT_XFE);
  21.519 --	outb(sendbyte, chip->vendor->base + WRFIFO);
  21.520 -+	outb(sendbyte, chip->vendor.base + WRFIFO);
  21.521 - }
  21.522 - 
  21.523 -     /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more
  21.524 -@@ -204,7 +205,7 @@ recv_begin:
  21.525 - 		ret = wait(chip, STAT_RDA);
  21.526 - 		if (ret)
  21.527 - 			return -EIO;
  21.528 --		buf[i] = inb(chip->vendor->base + RDFIFO);
  21.529 -+		buf[i] = inb(chip->vendor.base + RDFIFO);
  21.530 - 	}
  21.531 - 
  21.532 - 	if (buf[0] != TPM_VL_VER) {
  21.533 -@@ -219,7 +220,7 @@ recv_begin:
  21.534 - 
  21.535 - 		for (i = 0; i < size; i++) {
  21.536 - 			wait(chip, STAT_RDA);
  21.537 --			buf[i] = inb(chip->vendor->base + RDFIFO);
  21.538 -+			buf[i] = inb(chip->vendor.base + RDFIFO);
  21.539 - 		}
  21.540 - 
  21.541 - 		if ((size == 0x6D00) && (buf[1] == 0x80)) {
  21.542 -@@ -268,7 +269,7 @@ static int tpm_inf_send(struct tpm_chip 
  21.543 - 	u8 count_high, count_low, count_4, count_3, count_2, count_1;
  21.544 - 
  21.545 - 	/* Disabling Reset, LP and IRQC */
  21.546 --	outb(RESET_LP_IRQC_DISABLE, chip->vendor->base + CMD);
  21.547 -+	outb(RESET_LP_IRQC_DISABLE, chip->vendor.base + CMD);
  21.548 - 
  21.549 - 	ret = empty_fifo(chip, 1);
  21.550 - 	if (ret) {
  21.551 -@@ -319,7 +320,7 @@ static void tpm_inf_cancel(struct tpm_ch
  21.552 - 
  21.553 - static u8 tpm_inf_status(struct tpm_chip *chip)
  21.554 - {
  21.555 --	return inb(chip->vendor->base + STAT);
  21.556 -+	return inb(chip->vendor.base + STAT);
  21.557 - }
  21.558 - 
  21.559 - static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
  21.560 -@@ -346,7 +347,7 @@ static struct file_operations inf_ops = 
  21.561 - 	.release = tpm_release,
  21.562 - };
  21.563 - 
  21.564 --static struct tpm_vendor_specific tpm_inf = {
  21.565 -+static const struct tpm_vendor_specific tpm_inf = {
  21.566 - 	.recv = tpm_inf_recv,
  21.567 - 	.send = tpm_inf_send,
  21.568 - 	.cancel = tpm_inf_cancel,
  21.569 -@@ -375,6 +376,7 @@ static int __devinit tpm_inf_pnp_probe(s
  21.570 - 	int version[2];
  21.571 - 	int productid[2];
  21.572 - 	char chipname[20];
  21.573 -+	struct tpm_chip *chip;
  21.574 - 
  21.575 - 	/* read IO-ports through PnP */
  21.576 - 	if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
  21.577 -@@ -395,14 +397,13 @@ static int __devinit tpm_inf_pnp_probe(s
  21.578 - 			goto err_last;
  21.579 - 		}
  21.580 - 		/* publish my base address and request region */
  21.581 --		tpm_inf.base = TPM_INF_BASE;
  21.582 - 		if (request_region
  21.583 --		    (tpm_inf.base, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) {
  21.584 -+		    (TPM_INF_BASE, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) {
  21.585 - 			rc = -EINVAL;
  21.586 - 			goto err_last;
  21.587 - 		}
  21.588 --		if (request_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN,
  21.589 --				"tpm_infineon0") == NULL) {
  21.590 -+		if (request_region
  21.591 -+		    (TPM_INF_ADDR, TPM_INF_ADDR_LEN, "tpm_infineon0") == NULL) {
  21.592 - 			rc = -EINVAL;
  21.593 - 			goto err_last;
  21.594 - 		}
  21.595 -@@ -442,9 +443,9 @@ static int __devinit tpm_inf_pnp_probe(s
  21.596 - 
  21.597 - 		/* configure TPM with IO-ports */
  21.598 - 		outb(IOLIMH, TPM_INF_ADDR);
  21.599 --		outb(((tpm_inf.base >> 8) & 0xff), TPM_INF_DATA);
  21.600 -+		outb(((TPM_INF_BASE >> 8) & 0xff), TPM_INF_DATA);
  21.601 - 		outb(IOLIML, TPM_INF_ADDR);
  21.602 --		outb((tpm_inf.base & 0xff), TPM_INF_DATA);
  21.603 -+		outb((TPM_INF_BASE & 0xff), TPM_INF_DATA);
  21.604 - 
  21.605 - 		/* control if IO-ports are set correctly */
  21.606 - 		outb(IOLIMH, TPM_INF_ADDR);
  21.607 -@@ -452,10 +453,10 @@ static int __devinit tpm_inf_pnp_probe(s
  21.608 - 		outb(IOLIML, TPM_INF_ADDR);
  21.609 - 		iol = inb(TPM_INF_DATA);
  21.610 - 
  21.611 --		if ((ioh << 8 | iol) != tpm_inf.base) {
  21.612 -+		if ((ioh << 8 | iol) != TPM_INF_BASE) {
  21.613 - 			dev_err(&dev->dev,
  21.614 --				"Could not set IO-ports to 0x%lx\n",
  21.615 --				tpm_inf.base);
  21.616 -+				"Could not set IO-ports to 0x%x\n",
  21.617 -+				TPM_INF_BASE);
  21.618 - 			rc = -EIO;
  21.619 - 			goto err_release_region;
  21.620 - 		}
  21.621 -@@ -466,15 +467,15 @@ static int __devinit tpm_inf_pnp_probe(s
  21.622 - 		outb(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
  21.623 - 
  21.624 - 		/* disable RESET, LP and IRQC */
  21.625 --		outb(RESET_LP_IRQC_DISABLE, tpm_inf.base + CMD);
  21.626 -+		outb(RESET_LP_IRQC_DISABLE, TPM_INF_BASE + CMD);
  21.627 - 
  21.628 - 		/* Finally, we're done, print some infos */
  21.629 - 		dev_info(&dev->dev, "TPM found: "
  21.630 - 			 "config base 0x%x, "
  21.631 - 			 "io base 0x%x, "
  21.632 --			 "chip version %02x%02x, "
  21.633 --			 "vendor id %x%x (Infineon), "
  21.634 --			 "product id %02x%02x"
  21.635 -+			 "chip version 0x%02x%02x, "
  21.636 -+			 "vendor id 0x%x%x (Infineon), "
  21.637 -+			 "product id 0x%02x%02x"
  21.638 - 			 "%s\n",
  21.639 - 			 TPM_INF_ADDR,
  21.640 - 			 TPM_INF_BASE,
  21.641 -@@ -482,11 +483,10 @@ static int __devinit tpm_inf_pnp_probe(s
  21.642 - 			 vendorid[0], vendorid[1],
  21.643 - 			 productid[0], productid[1], chipname);
  21.644 - 
  21.645 --		rc = tpm_register_hardware(&dev->dev, &tpm_inf);
  21.646 --		if (rc < 0) {
  21.647 --			rc = -ENODEV;
  21.648 -+		if (!(chip = tpm_register_hardware(&dev->dev, &tpm_inf))) {
  21.649 - 			goto err_release_region;
  21.650 - 		}
  21.651 -+		chip->vendor.base = TPM_INF_BASE;
  21.652 - 		return 0;
  21.653 - 	} else {
  21.654 - 		rc = -ENODEV;
  21.655 -@@ -494,7 +494,7 @@ static int __devinit tpm_inf_pnp_probe(s
  21.656 - 	}
  21.657 - 
  21.658 - err_release_region:
  21.659 --	release_region(tpm_inf.base, TPM_INF_PORT_LEN);
  21.660 -+	release_region(TPM_INF_BASE, TPM_INF_PORT_LEN);
  21.661 - 	release_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN);
  21.662 - 
  21.663 - err_last:
  21.664 -@@ -506,7 +506,8 @@ static __devexit void tpm_inf_pnp_remove
  21.665 - 	struct tpm_chip *chip = pnp_get_drvdata(dev);
  21.666 - 
  21.667 - 	if (chip) {
  21.668 --		release_region(chip->vendor->base, TPM_INF_PORT_LEN);
  21.669 -+		release_region(TPM_INF_BASE, TPM_INF_PORT_LEN);
  21.670 -+		release_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN);
  21.671 - 		tpm_remove_hardware(chip->dev);
  21.672 - 	}
  21.673 - }
  21.674 -@@ -520,7 +521,7 @@ static struct pnp_driver tpm_inf_pnp = {
  21.675 - 	},
  21.676 - 	.id_table = tpm_pnp_tbl,
  21.677 - 	.probe = tpm_inf_pnp_probe,
  21.678 --	.remove = tpm_inf_pnp_remove,
  21.679 -+	.remove = __devexit_p(tpm_inf_pnp_remove),
  21.680 - };
  21.681 - 
  21.682 - static int __init init_inf(void)
  21.683 -@@ -538,5 +539,5 @@ module_exit(cleanup_inf);
  21.684 - 
  21.685 - MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>");
  21.686 - MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
  21.687 --MODULE_VERSION("1.7");
  21.688 -+MODULE_VERSION("1.8");
  21.689 - MODULE_LICENSE("GPL");
  21.690 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_nsc.c ./drivers/char/tpm/tpm_nsc.c
  21.691 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_nsc.c	2006-09-12 19:02:10.000000000 +0100
  21.692 -+++ ./drivers/char/tpm/tpm_nsc.c	2006-09-19 14:05:52.000000000 +0100
  21.693 -@@ -71,7 +71,7 @@ static int wait_for_stat(struct tpm_chip
  21.694 - 	unsigned long stop;
  21.695 - 
  21.696 - 	/* status immediately available check */
  21.697 --	*data = inb(chip->vendor->base + NSC_STATUS);
  21.698 -+	*data = inb(chip->vendor.base + NSC_STATUS);
  21.699 - 	if ((*data & mask) == val)
  21.700 - 		return 0;
  21.701 - 
  21.702 -@@ -79,7 +79,7 @@ static int wait_for_stat(struct tpm_chip
  21.703 - 	stop = jiffies + 10 * HZ;
  21.704 - 	do {
  21.705 - 		msleep(TPM_TIMEOUT);
  21.706 --		*data = inb(chip->vendor->base + 1);
  21.707 -+		*data = inb(chip->vendor.base + 1);
  21.708 - 		if ((*data & mask) == val)
  21.709 - 			return 0;
  21.710 - 	}
  21.711 -@@ -94,9 +94,9 @@ static int nsc_wait_for_ready(struct tpm
  21.712 - 	unsigned long stop;
  21.713 - 
  21.714 - 	/* status immediately available check */
  21.715 --	status = inb(chip->vendor->base + NSC_STATUS);
  21.716 -+	status = inb(chip->vendor.base + NSC_STATUS);
  21.717 - 	if (status & NSC_STATUS_OBF)
  21.718 --		status = inb(chip->vendor->base + NSC_DATA);
  21.719 -+		status = inb(chip->vendor.base + NSC_DATA);
  21.720 - 	if (status & NSC_STATUS_RDY)
  21.721 - 		return 0;
  21.722 - 
  21.723 -@@ -104,9 +104,9 @@ static int nsc_wait_for_ready(struct tpm
  21.724 - 	stop = jiffies + 100;
  21.725 - 	do {
  21.726 - 		msleep(TPM_TIMEOUT);
  21.727 --		status = inb(chip->vendor->base + NSC_STATUS);
  21.728 -+		status = inb(chip->vendor.base + NSC_STATUS);
  21.729 - 		if (status & NSC_STATUS_OBF)
  21.730 --			status = inb(chip->vendor->base + NSC_DATA);
  21.731 -+			status = inb(chip->vendor.base + NSC_DATA);
  21.732 - 		if (status & NSC_STATUS_RDY)
  21.733 - 			return 0;
  21.734 - 	}
  21.735 -@@ -132,7 +132,7 @@ static int tpm_nsc_recv(struct tpm_chip 
  21.736 - 		return -EIO;
  21.737 - 	}
  21.738 - 	if ((data =
  21.739 --	     inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
  21.740 -+	     inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
  21.741 - 		dev_err(chip->dev, "not in normal mode (0x%x)\n",
  21.742 - 			data);
  21.743 - 		return -EIO;
  21.744 -@@ -148,7 +148,7 @@ static int tpm_nsc_recv(struct tpm_chip 
  21.745 - 		}
  21.746 - 		if (data & NSC_STATUS_F0)
  21.747 - 			break;
  21.748 --		*p = inb(chip->vendor->base + NSC_DATA);
  21.749 -+		*p = inb(chip->vendor.base + NSC_DATA);
  21.750 - 	}
  21.751 - 
  21.752 - 	if ((data & NSC_STATUS_F0) == 0 &&
  21.753 -@@ -156,7 +156,7 @@ static int tpm_nsc_recv(struct tpm_chip 
  21.754 - 		dev_err(chip->dev, "F0 not set\n");
  21.755 - 		return -EIO;
  21.756 - 	}
  21.757 --	if ((data = inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_EOC) {
  21.758 -+	if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) {
  21.759 - 		dev_err(chip->dev,
  21.760 - 			"expected end of command(0x%x)\n", data);
  21.761 - 		return -EIO;
  21.762 -@@ -182,7 +182,7 @@ static int tpm_nsc_send(struct tpm_chip 
  21.763 - 	 * fix it. Not sure why this is needed, we followed the flow
  21.764 - 	 * chart in the manual to the letter.
  21.765 - 	 */
  21.766 --	outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
  21.767 -+	outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND);
  21.768 - 
  21.769 - 	if (nsc_wait_for_ready(chip) != 0)
  21.770 - 		return -EIO;
  21.771 -@@ -192,7 +192,7 @@ static int tpm_nsc_send(struct tpm_chip 
  21.772 - 		return -EIO;
  21.773 - 	}
  21.774 - 
  21.775 --	outb(NSC_COMMAND_NORMAL, chip->vendor->base + NSC_COMMAND);
  21.776 -+	outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND);
  21.777 - 	if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
  21.778 - 		dev_err(chip->dev, "IBR timeout\n");
  21.779 - 		return -EIO;
  21.780 -@@ -204,26 +204,26 @@ static int tpm_nsc_send(struct tpm_chip 
  21.781 - 				"IBF timeout (while writing data)\n");
  21.782 - 			return -EIO;
  21.783 - 		}
  21.784 --		outb(buf[i], chip->vendor->base + NSC_DATA);
  21.785 -+		outb(buf[i], chip->vendor.base + NSC_DATA);
  21.786 - 	}
  21.787 - 
  21.788 - 	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
  21.789 - 		dev_err(chip->dev, "IBF timeout\n");
  21.790 - 		return -EIO;
  21.791 - 	}
  21.792 --	outb(NSC_COMMAND_EOC, chip->vendor->base + NSC_COMMAND);
  21.793 -+	outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND);
  21.794 - 
  21.795 - 	return count;
  21.796 - }
  21.797 - 
  21.798 - static void tpm_nsc_cancel(struct tpm_chip *chip)
  21.799 - {
  21.800 --	outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
  21.801 -+	outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND);
  21.802 - }
  21.803 - 
  21.804 - static u8 tpm_nsc_status(struct tpm_chip *chip)
  21.805 - {
  21.806 --	return inb(chip->vendor->base + NSC_STATUS);
  21.807 -+	return inb(chip->vendor.base + NSC_STATUS);
  21.808 - }
  21.809 - 
  21.810 - static struct file_operations nsc_ops = {
  21.811 -@@ -250,7 +250,7 @@ static struct attribute * nsc_attrs[] = 
  21.812 - 
  21.813 - static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs };
  21.814 - 
  21.815 --static struct tpm_vendor_specific tpm_nsc = {
  21.816 -+static const struct tpm_vendor_specific tpm_nsc = {
  21.817 - 	.recv = tpm_nsc_recv,
  21.818 - 	.send = tpm_nsc_send,
  21.819 - 	.cancel = tpm_nsc_cancel,
  21.820 -@@ -268,7 +268,7 @@ static void __devexit tpm_nsc_remove(str
  21.821 - {
  21.822 - 	struct tpm_chip *chip = dev_get_drvdata(dev);
  21.823 - 	if ( chip ) {
  21.824 --		release_region(chip->vendor->base, 2);
  21.825 -+		release_region(chip->vendor.base, 2);
  21.826 - 		tpm_remove_hardware(chip->dev);
  21.827 - 	}
  21.828 - }
  21.829 -@@ -286,7 +286,8 @@ static int __init init_nsc(void)
  21.830 - 	int rc = 0;
  21.831 - 	int lo, hi;
  21.832 - 	int nscAddrBase = TPM_ADDR;
  21.833 --
  21.834 -+	struct tpm_chip *chip;
  21.835 -+	unsigned long base;
  21.836 - 
  21.837 - 	/* verify that it is a National part (SID) */
  21.838 - 	if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
  21.839 -@@ -300,7 +301,7 @@ static int __init init_nsc(void)
  21.840 - 
  21.841 - 	hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
  21.842 - 	lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
  21.843 --	tpm_nsc.base = (hi<<8) | lo;
  21.844 -+	base = (hi<<8) | lo;
  21.845 - 
  21.846 - 	/* enable the DPM module */
  21.847 - 	tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
  21.848 -@@ -320,13 +321,15 @@ static int __init init_nsc(void)
  21.849 - 	if ((rc = platform_device_register(pdev)) < 0)
  21.850 - 		goto err_free_dev;
  21.851 - 
  21.852 --	if (request_region(tpm_nsc.base, 2, "tpm_nsc0") == NULL ) {
  21.853 -+	if (request_region(base, 2, "tpm_nsc0") == NULL ) {
  21.854 - 		rc = -EBUSY;
  21.855 - 		goto err_unreg_dev;
  21.856 - 	}
  21.857 - 
  21.858 --	if ((rc = tpm_register_hardware(&pdev->dev, &tpm_nsc)) < 0)
  21.859 -+	if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) {
  21.860 -+		rc = -ENODEV;
  21.861 - 		goto err_rel_reg;
  21.862 -+	}
  21.863 - 
  21.864 - 	dev_dbg(&pdev->dev, "NSC TPM detected\n");
  21.865 - 	dev_dbg(&pdev->dev,
  21.866 -@@ -361,10 +364,12 @@ static int __init init_nsc(void)
  21.867 - 		 "NSC TPM revision %d\n",
  21.868 - 		 tpm_read_index(nscAddrBase, 0x27) & 0x1F);
  21.869 - 
  21.870 -+	chip->vendor.base = base;
  21.871 -+
  21.872 - 	return 0;
  21.873 - 
  21.874 - err_rel_reg:
  21.875 --	release_region(tpm_nsc.base, 2);
  21.876 -+	release_region(base, 2);
  21.877 - err_unreg_dev:
  21.878 - 	platform_device_unregister(pdev);
  21.879 - err_free_dev:
  21.880 -diff -pruN ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_tis.c ./drivers/char/tpm/tpm_tis.c
  21.881 ---- ../orig-linux-2.6.16.29/drivers/char/tpm/tpm_tis.c	1970-01-01 01:00:00.000000000 +0100
  21.882 -+++ ./drivers/char/tpm/tpm_tis.c	2006-09-19 14:05:52.000000000 +0100
  21.883 -@@ -0,0 +1,665 @@
  21.884 -+/*
  21.885 -+ * Copyright (C) 2005, 2006 IBM Corporation
  21.886 -+ *
  21.887 -+ * Authors:
  21.888 -+ * Leendert van Doorn <leendert@watson.ibm.com>
  21.889 -+ * Kylene Hall <kjhall@us.ibm.com>
  21.890 -+ *
  21.891 -+ * Device driver for TCG/TCPA TPM (trusted platform module).
  21.892 -+ * Specifications at www.trustedcomputinggroup.org
  21.893 -+ *
  21.894 -+ * This device driver implements the TPM interface as defined in
  21.895 -+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
  21.896 -+ *
  21.897 -+ * This program is free software; you can redistribute it and/or
  21.898 -+ * modify it under the terms of the GNU General Public License as
  21.899 -+ * published by the Free Software Foundation, version 2 of the
  21.900 -+ * License.
  21.901 -+ */
  21.902 -+#include <linux/init.h>
  21.903 -+#include <linux/module.h>
  21.904 -+#include <linux/moduleparam.h>
  21.905 -+#include <linux/pnp.h>
  21.906 -+#include <linux/interrupt.h>
  21.907 -+#include <linux/wait.h>
  21.908 -+#include "tpm.h"
  21.909 -+
  21.910 -+#define TPM_HEADER_SIZE 10
  21.911 -+
  21.912 -+enum tis_access {
  21.913 -+	TPM_ACCESS_VALID = 0x80,
  21.914 -+	TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
  21.915 -+	TPM_ACCESS_REQUEST_PENDING = 0x04,
  21.916 -+	TPM_ACCESS_REQUEST_USE = 0x02,
  21.917 -+};
  21.918 -+
  21.919 -+enum tis_status {
  21.920 -+	TPM_STS_VALID = 0x80,
  21.921 -+	TPM_STS_COMMAND_READY = 0x40,
  21.922 -+	TPM_STS_GO = 0x20,
  21.923 -+	TPM_STS_DATA_AVAIL = 0x10,
  21.924 -+	TPM_STS_DATA_EXPECT = 0x08,
  21.925 -+};
  21.926 -+
  21.927 -+enum tis_int_flags {
  21.928 -+	TPM_GLOBAL_INT_ENABLE = 0x80000000,
  21.929 -+	TPM_INTF_BURST_COUNT_STATIC = 0x100,
  21.930 -+	TPM_INTF_CMD_READY_INT = 0x080,
  21.931 -+	TPM_INTF_INT_EDGE_FALLING = 0x040,
  21.932 -+	TPM_INTF_INT_EDGE_RISING = 0x020,
  21.933 -+	TPM_INTF_INT_LEVEL_LOW = 0x010,
  21.934 -+	TPM_INTF_INT_LEVEL_HIGH = 0x008,
  21.935 -+	TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
  21.936 -+	TPM_INTF_STS_VALID_INT = 0x002,
  21.937 -+	TPM_INTF_DATA_AVAIL_INT = 0x001,
  21.938 -+};
  21.939 -+
  21.940 -+enum tis_defaults {
  21.941 -+	TIS_MEM_BASE = 0xFED40000,
  21.942 -+	TIS_MEM_LEN = 0x5000,
  21.943 -+	TIS_SHORT_TIMEOUT = 750,	/* ms */
  21.944 -+	TIS_LONG_TIMEOUT = 2000,	/* 2 sec */
  21.945 -+};
  21.946 -+
  21.947 -+#define	TPM_ACCESS(l)			(0x0000 | ((l) << 12))
  21.948 -+#define	TPM_INT_ENABLE(l)		(0x0008 | ((l) << 12))
  21.949 -+#define	TPM_INT_VECTOR(l)		(0x000C | ((l) << 12))
  21.950 -+#define	TPM_INT_STATUS(l)		(0x0010 | ((l) << 12))
  21.951 -+#define	TPM_INTF_CAPS(l)		(0x0014 | ((l) << 12))
  21.952 -+#define	TPM_STS(l)			(0x0018 | ((l) << 12))
  21.953 -+#define	TPM_DATA_FIFO(l)		(0x0024 | ((l) << 12))
  21.954 -+
  21.955 -+#define	TPM_DID_VID(l)			(0x0F00 | ((l) << 12))
  21.956 -+#define	TPM_RID(l)			(0x0F04 | ((l) << 12))
  21.957 -+
  21.958 -+static LIST_HEAD(tis_chips);
  21.959 -+static DEFINE_SPINLOCK(tis_lock);
  21.960 -+
  21.961 -+static int check_locality(struct tpm_chip *chip, int l)
  21.962 -+{
  21.963 -+	if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  21.964 -+	     (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
  21.965 -+	    (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
  21.966 -+		return chip->vendor.locality = l;
  21.967 -+
  21.968 -+	return -1;
  21.969 -+}
  21.970 -+
  21.971 -+static void release_locality(struct tpm_chip *chip, int l, int force)
  21.972 -+{
  21.973 -+	if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  21.974 -+		      (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
  21.975 -+	    (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
  21.976 -+		iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
  21.977 -+			 chip->vendor.iobase + TPM_ACCESS(l));
  21.978 -+}
  21.979 -+
  21.980 -+static int request_locality(struct tpm_chip *chip, int l)
  21.981 -+{
  21.982 -+	unsigned long stop;
  21.983 -+	long rc;
  21.984 -+
  21.985 -+	if (check_locality(chip, l) >= 0)
  21.986 -+		return l;
  21.987 -+
  21.988 -+	iowrite8(TPM_ACCESS_REQUEST_USE,
  21.989 -+		 chip->vendor.iobase + TPM_ACCESS(l));
  21.990 -+
  21.991 -+	if (chip->vendor.irq) {
  21.992 -+		rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
  21.993 -+						      (check_locality
  21.994 -+						       (chip, l) >= 0),
  21.995 -+						      chip->vendor.timeout_a);
  21.996 -+		if (rc > 0)
  21.997 -+			return l;
  21.998 -+
  21.999 -+	} else {
 21.1000 -+		/* wait for burstcount */
 21.1001 -+		stop = jiffies + chip->vendor.timeout_a;
 21.1002 -+		do {
 21.1003 -+			if (check_locality(chip, l) >= 0)
 21.1004 -+				return l;
 21.1005 -+			msleep(TPM_TIMEOUT);
 21.1006 -+		}
 21.1007 -+		while (time_before(jiffies, stop));
 21.1008 -+	}
 21.1009 -+	return -1;
 21.1010 -+}
 21.1011 -+
 21.1012 -+static u8 tpm_tis_status(struct tpm_chip *chip)
 21.1013 -+{
 21.1014 -+	return ioread8(chip->vendor.iobase +
 21.1015 -+		       TPM_STS(chip->vendor.locality));
 21.1016 -+}
 21.1017 -+
 21.1018 -+static void tpm_tis_ready(struct tpm_chip *chip)
 21.1019 -+{
 21.1020 -+	/* this causes the current command to be aborted */
 21.1021 -+	iowrite8(TPM_STS_COMMAND_READY,
 21.1022 -+		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
 21.1023 -+}
 21.1024 -+
 21.1025 -+static int get_burstcount(struct tpm_chip *chip)
 21.1026 -+{
 21.1027 -+	unsigned long stop;
 21.1028 -+	int burstcnt;
 21.1029 -+
 21.1030 -+	/* wait for burstcount */
 21.1031 -+	/* which timeout value, spec has 2 answers (c & d) */
 21.1032 -+	stop = jiffies + chip->vendor.timeout_d;
 21.1033 -+	do {
 21.1034 -+		burstcnt = ioread8(chip->vendor.iobase +
 21.1035 -+				   TPM_STS(chip->vendor.locality) + 1);
 21.1036 -+		burstcnt += ioread8(chip->vendor.iobase +
 21.1037 -+				    TPM_STS(chip->vendor.locality) +
 21.1038 -+				    2) << 8;
 21.1039 -+		if (burstcnt)
 21.1040 -+			return burstcnt;
 21.1041 -+		msleep(TPM_TIMEOUT);
 21.1042 -+	} while (time_before(jiffies, stop));
 21.1043 -+	return -EBUSY;
 21.1044 -+}
 21.1045 -+
 21.1046 -+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
 21.1047 -+			 wait_queue_head_t *queue)
 21.1048 -+{
 21.1049 -+	unsigned long stop;
 21.1050 -+	long rc;
 21.1051 -+	u8 status;
 21.1052 -+
 21.1053 -+	/* check current status */
 21.1054 -+	status = tpm_tis_status(chip);
 21.1055 -+	if ((status & mask) == mask)
 21.1056 -+		return 0;
 21.1057 -+
 21.1058 -+	if (chip->vendor.irq) {
 21.1059 -+		rc = wait_event_interruptible_timeout(*queue,
 21.1060 -+						      ((tpm_tis_status
 21.1061 -+							(chip) & mask) ==
 21.1062 -+						       mask), timeout);
 21.1063 -+		if (rc > 0)
 21.1064 -+			return 0;
 21.1065 -+	} else {
 21.1066 -+		stop = jiffies + timeout;
 21.1067 -+		do {
 21.1068 -+			msleep(TPM_TIMEOUT);
 21.1069 -+			status = tpm_tis_status(chip);
 21.1070 -+			if ((status & mask) == mask)
 21.1071 -+				return 0;
 21.1072 -+		} while (time_before(jiffies, stop));
 21.1073 -+	}
 21.1074 -+	return -ETIME;
 21.1075 -+}
 21.1076 -+
 21.1077 -+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
 21.1078 -+{
 21.1079 -+	int size = 0, burstcnt;
 21.1080 -+	while (size < count &&
 21.1081 -+	       wait_for_stat(chip,
 21.1082 -+			     TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 21.1083 -+			     chip->vendor.timeout_c,
 21.1084 -+			     &chip->vendor.read_queue)
 21.1085 -+	       == 0) {
 21.1086 -+		burstcnt = get_burstcount(chip);
 21.1087 -+		for (; burstcnt > 0 && size < count; burstcnt--)
 21.1088 -+			buf[size++] = ioread8(chip->vendor.iobase +
 21.1089 -+					      TPM_DATA_FIFO(chip->vendor.
 21.1090 -+							    locality));
 21.1091 -+	}
 21.1092 -+	return size;
 21.1093 -+}
 21.1094 -+
 21.1095 -+static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 21.1096 -+{
 21.1097 -+	int size = 0;
 21.1098 -+	int expected, status;
 21.1099 -+
 21.1100 -+	if (count < TPM_HEADER_SIZE) {
 21.1101 -+		size = -EIO;
 21.1102 -+		goto out;
 21.1103 -+	}
 21.1104 -+
 21.1105 -+	/* read first 10 bytes, including tag, paramsize, and result */
 21.1106 -+	if ((size =
 21.1107 -+	     recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
 21.1108 -+		dev_err(chip->dev, "Unable to read header\n");
 21.1109 -+		goto out;
 21.1110 -+	}
 21.1111 -+
 21.1112 -+	expected = be32_to_cpu(*(__be32 *) (buf + 2));
 21.1113 -+	if (expected > count) {
 21.1114 -+		size = -EIO;
 21.1115 -+		goto out;
 21.1116 -+	}
 21.1117 -+
 21.1118 -+	if ((size +=
 21.1119 -+	     recv_data(chip, &buf[TPM_HEADER_SIZE],
 21.1120 -+		       expected - TPM_HEADER_SIZE)) < expected) {
 21.1121 -+		dev_err(chip->dev, "Unable to read remainder of result\n");
 21.1122 -+		size = -ETIME;
 21.1123 -+		goto out;
 21.1124 -+	}
 21.1125 -+
 21.1126 -+	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
 21.1127 -+		      &chip->vendor.int_queue);
 21.1128 -+	status = tpm_tis_status(chip);
 21.1129 -+	if (status & TPM_STS_DATA_AVAIL) {	/* retry? */
 21.1130 -+		dev_err(chip->dev, "Error left over data\n");
 21.1131 -+		size = -EIO;
 21.1132 -+		goto out;
 21.1133 -+	}
 21.1134 -+
 21.1135 -+out:
 21.1136 -+	tpm_tis_ready(chip);
 21.1137 -+	release_locality(chip, chip->vendor.locality, 0);
 21.1138 -+	return size;
 21.1139 -+}
 21.1140 -+
 21.1141 -+/*
 21.1142 -+ * If interrupts are used (signaled by an irq set in the vendor structure)
 21.1143 -+ * tpm.c can skip polling for the data to be available as the interrupt is
 21.1144 -+ * waited for here
 21.1145 -+ */
 21.1146 -+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
 21.1147 -+{
 21.1148 -+	int rc, status, burstcnt;
 21.1149 -+	size_t count = 0;
 21.1150 -+	u32 ordinal;
 21.1151 -+
 21.1152 -+	if (request_locality(chip, 0) < 0)
 21.1153 -+		return -EBUSY;
 21.1154 -+
 21.1155 -+	status = tpm_tis_status(chip);
 21.1156 -+	if ((status & TPM_STS_COMMAND_READY) == 0) {
 21.1157 -+		tpm_tis_ready(chip);
 21.1158 -+		if (wait_for_stat
 21.1159 -+		    (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
 21.1160 -+		     &chip->vendor.int_queue) < 0) {
 21.1161 -+			rc = -ETIME;
 21.1162 -+			goto out_err;
 21.1163 -+		}
 21.1164 -+	}
 21.1165 -+
 21.1166 -+	while (count < len - 1) {
 21.1167 -+		burstcnt = get_burstcount(chip);
 21.1168 -+		for (; burstcnt > 0 && count < len - 1; burstcnt--) {
 21.1169 -+			iowrite8(buf[count], chip->vendor.iobase +
 21.1170 -+				 TPM_DATA_FIFO(chip->vendor.locality));
 21.1171 -+			count++;
 21.1172 -+		}
 21.1173 -+
 21.1174 -+		wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
 21.1175 -+			      &chip->vendor.int_queue);
 21.1176 -+		status = tpm_tis_status(chip);
 21.1177 -+		if ((status & TPM_STS_DATA_EXPECT) == 0) {
 21.1178 -+			rc = -EIO;
 21.1179 -+			goto out_err;
 21.1180 -+		}
 21.1181 -+	}
 21.1182 -+
 21.1183 -+	/* write last byte */
 21.1184 -+	iowrite8(buf[count],
 21.1185 -+		 chip->vendor.iobase +
 21.1186 -+		 TPM_DATA_FIFO(chip->vendor.locality));
 21.1187 -+	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
 21.1188 -+		      &chip->vendor.int_queue);
 21.1189 -+	status = tpm_tis_status(chip);
 21.1190 -+	if ((status & TPM_STS_DATA_EXPECT) != 0) {
 21.1191 -+		rc = -EIO;
 21.1192 -+		goto out_err;
 21.1193 -+	}
 21.1194 -+
 21.1195 -+	/* go and do it */
 21.1196 -+	iowrite8(TPM_STS_GO,
 21.1197 -+		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
 21.1198 -+
 21.1199 -+	if (chip->vendor.irq) {
 21.1200 -+		ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
 21.1201 -+		if (wait_for_stat
 21.1202 -+		    (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 21.1203 -+		     tpm_calc_ordinal_duration(chip, ordinal),
 21.1204 -+		     &chip->vendor.read_queue) < 0) {
 21.1205 -+			rc = -ETIME;
 21.1206 -+			goto out_err;
 21.1207 -+		}
 21.1208 -+	}
 21.1209 -+	return len;
 21.1210 -+out_err:
 21.1211 -+	tpm_tis_ready(chip);
 21.1212 -+	release_locality(chip, chip->vendor.locality, 0);
 21.1213 -+	return rc;
 21.1214 -+}
 21.1215 -+
 21.1216 -+static struct file_operations tis_ops = {
 21.1217 -+	.owner = THIS_MODULE,
 21.1218 -+	.llseek = no_llseek,
 21.1219 -+	.open = tpm_open,
 21.1220 -+	.read = tpm_read,
 21.1221 -+	.write = tpm_write,
 21.1222 -+	.release = tpm_release,
 21.1223 -+};
 21.1224 -+
 21.1225 -+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
 21.1226 -+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
 21.1227 -+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
 21.1228 -+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
 21.1229 -+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
 21.1230 -+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
 21.1231 -+		   NULL);
 21.1232 -+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
 21.1233 -+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
 21.1234 -+
 21.1235 -+static struct attribute *tis_attrs[] = {
 21.1236 -+	&dev_attr_pubek.attr,
 21.1237 -+	&dev_attr_pcrs.attr,
 21.1238 -+	&dev_attr_enabled.attr,
 21.1239 -+	&dev_attr_active.attr,
 21.1240 -+	&dev_attr_owned.attr,
 21.1241 -+	&dev_attr_temp_deactivated.attr,
 21.1242 -+	&dev_attr_caps.attr,
 21.1243 -+	&dev_attr_cancel.attr, NULL,
 21.1244 -+};
 21.1245 -+
 21.1246 -+static struct attribute_group tis_attr_grp = {
 21.1247 -+	.attrs = tis_attrs
 21.1248 -+};
 21.1249 -+
 21.1250 -+static struct tpm_vendor_specific tpm_tis = {
 21.1251 -+	.status = tpm_tis_status,
 21.1252 -+	.recv = tpm_tis_recv,
 21.1253 -+	.send = tpm_tis_send,
 21.1254 -+	.cancel = tpm_tis_ready,
 21.1255 -+	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 21.1256 -+	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 21.1257 -+	.req_canceled = TPM_STS_COMMAND_READY,
 21.1258 -+	.attr_group = &tis_attr_grp,
 21.1259 -+	.miscdev = {
 21.1260 -+		    .fops = &tis_ops,},
 21.1261 -+};
 21.1262 -+
 21.1263 -+static irqreturn_t tis_int_probe(int irq, void *dev_id, struct pt_regs *regs)
 21.1264 -+{
 21.1265 -+	struct tpm_chip *chip = (struct tpm_chip *) dev_id;
 21.1266 -+	u32 interrupt;
 21.1267 -+
 21.1268 -+	interrupt = ioread32(chip->vendor.iobase +
 21.1269 -+			     TPM_INT_STATUS(chip->vendor.locality));
 21.1270 -+
 21.1271 -+	if (interrupt == 0)
 21.1272 -+		return IRQ_NONE;
 21.1273 -+
 21.1274 -+	chip->vendor.irq = irq;
 21.1275 -+
 21.1276 -+	/* Clear interrupts handled with TPM_EOI */
 21.1277 -+	iowrite32(interrupt,
 21.1278 -+		  chip->vendor.iobase +
 21.1279 -+		  TPM_INT_STATUS(chip->vendor.locality));
 21.1280 -+	return IRQ_HANDLED;
 21.1281 -+}
 21.1282 -+
 21.1283 -+static irqreturn_t tis_int_handler(int irq, void *dev_id, struct pt_regs *regs)
 21.1284 -+{
 21.1285 -+	struct tpm_chip *chip = (struct tpm_chip *) dev_id;
 21.1286 -+	u32 interrupt;
 21.1287 -+	int i;
 21.1288 -+
 21.1289 -+	interrupt = ioread32(chip->vendor.iobase +
 21.1290 -+			     TPM_INT_STATUS(chip->vendor.locality));
 21.1291 -+
 21.1292 -+	if (interrupt == 0)
 21.1293 -+		return IRQ_NONE;
 21.1294 -+
 21.1295 -+	if (interrupt & TPM_INTF_DATA_AVAIL_INT)
 21.1296 -+		wake_up_interruptible(&chip->vendor.read_queue);
 21.1297 -+	if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
 21.1298 -+		for (i = 0; i < 5; i++)
 21.1299 -+			if (check_locality(chip, i) >= 0)
 21.1300 -+				break;
 21.1301 -+	if (interrupt &
 21.1302 -+	    (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
 21.1303 -+	     TPM_INTF_CMD_READY_INT))
 21.1304 -+		wake_up_interruptible(&chip->vendor.int_queue);
 21.1305 -+
 21.1306 -+	/* Clear interrupts handled with TPM_EOI */
 21.1307 -+	iowrite32(interrupt,
 21.1308 -+		  chip->vendor.iobase +
 21.1309 -+		  TPM_INT_STATUS(chip->vendor.locality));
 21.1310 -+	return IRQ_HANDLED;
 21.1311 -+}
 21.1312 -+
 21.1313 -+static int interrupts = 1;
 21.1314 -+module_param(interrupts, bool, 0444);
 21.1315 -+MODULE_PARM_DESC(interrupts, "Enable interrupts");
 21.1316 -+
 21.1317 -+static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
 21.1318 -+				      const struct pnp_device_id *pnp_id)
 21.1319 -+{
 21.1320 -+	u32 vendor, intfcaps, intmask;
 21.1321 -+	int rc, i;
 21.1322 -+	unsigned long start, len;
 21.1323 -+	struct tpm_chip *chip;
 21.1324 -+
 21.1325 -+	start = pnp_mem_start(pnp_dev, 0);
 21.1326 -+	len = pnp_mem_len(pnp_dev, 0);
 21.1327 -+
 21.1328 -+	if (!start)
 21.1329 -+		start = TIS_MEM_BASE;
 21.1330 -+	if (!len)
 21.1331 -+		len = TIS_MEM_LEN;
 21.1332 -+
 21.1333 -+	if (!(chip = tpm_register_hardware(&pnp_dev->dev, &tpm_tis)))
 21.1334 -+		return -ENODEV;
 21.1335 -+
 21.1336 -+	chip->vendor.iobase = ioremap(start, len);
 21.1337 -+	if (!chip->vendor.iobase) {
 21.1338 -+		rc = -EIO;
 21.1339 -+		goto out_err;
 21.1340 -+	}
 21.1341 -+
 21.1342 -+	vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
 21.1343 -+
 21.1344 -+	/* Default timeouts */
 21.1345 -+	chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
 21.1346 -+	chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
 21.1347 -+	chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
 21.1348 -+	chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
 21.1349 -+
 21.1350 -+	dev_info(&pnp_dev->dev,
 21.1351 -+		 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
 21.1352 -+		 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
 21.1353 -+
 21.1354 -+	/* Figure out the capabilities */
 21.1355 -+	intfcaps =
 21.1356 -+	    ioread32(chip->vendor.iobase +
 21.1357 -+		     TPM_INTF_CAPS(chip->vendor.locality));
 21.1358 -+	dev_dbg(&pnp_dev->dev, "TPM interface capabilities (0x%x):\n",
 21.1359 -+		intfcaps);
 21.1360 -+	if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
 21.1361 -+		dev_dbg(&pnp_dev->dev, "\tBurst Count Static\n");
 21.1362 -+	if (intfcaps & TPM_INTF_CMD_READY_INT)
 21.1363 -+		dev_dbg(&pnp_dev->dev, "\tCommand Ready Int Support\n");
 21.1364 -+	if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
 21.1365 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Edge Falling\n");
 21.1366 -+	if (intfcaps & TPM_INTF_INT_EDGE_RISING)
 21.1367 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Edge Rising\n");
 21.1368 -+	if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
 21.1369 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Level Low\n");
 21.1370 -+	if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
 21.1371 -+		dev_dbg(&pnp_dev->dev, "\tInterrupt Level High\n");
 21.1372 -+	if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
 21.1373 -+		dev_dbg(&pnp_dev->dev, "\tLocality Change Int Support\n");
 21.1374 -+	if (intfcaps & TPM_INTF_STS_VALID_INT)
 21.1375 -+		dev_dbg(&pnp_dev->dev, "\tSts Valid Int Support\n");
 21.1376 -+	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
 21.1377 -+		dev_dbg(&pnp_dev->dev, "\tData Avail Int Support\n");
 21.1378 -+
 21.1379 -+	if (request_locality(chip, 0) != 0) {
 21.1380 -+		rc = -ENODEV;
 21.1381 -+		goto out_err;
 21.1382 -+	}
 21.1383 -+
 21.1384 -+	/* INTERRUPT Setup */
 21.1385 -+	init_waitqueue_head(&chip->vendor.read_queue);
 21.1386 -+	init_waitqueue_head(&chip->vendor.int_queue);
 21.1387 -+
 21.1388 -+	intmask =
 21.1389 -+	    ioread32(chip->vendor.iobase +
 21.1390 -+		     TPM_INT_ENABLE(chip->vendor.locality));
 21.1391 -+
 21.1392 -+	intmask |= TPM_INTF_CMD_READY_INT
 21.1393 -+	    | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
 21.1394 -+	    | TPM_INTF_STS_VALID_INT;
 21.1395 -+
 21.1396 -+	iowrite32(intmask,
 21.1397 -+		  chip->vendor.iobase +
 21.1398 -+		  TPM_INT_ENABLE(chip->vendor.locality));
 21.1399 -+	if (interrupts) {
 21.1400 -+		chip->vendor.irq =
 21.1401 -+		    ioread8(chip->vendor.iobase +
 21.1402 -+			    TPM_INT_VECTOR(chip->vendor.locality));
 21.1403 -+
 21.1404 -+		for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
 21.1405 -+			iowrite8(i, chip->vendor.iobase +
 21.1406 -+				    TPM_INT_VECTOR(chip->vendor.locality));
 21.1407 -+			if (request_irq
 21.1408 -+			    (i, tis_int_probe, SA_SHIRQ,
 21.1409 -+			     chip->vendor.miscdev.name, chip) != 0) {
 21.1410 -+				dev_info(chip->dev,
 21.1411 -+					 "Unable to request irq: %d for probe\n",
 21.1412 -+					 i);
 21.1413 -+				continue;
 21.1414 -+			}
 21.1415 -+
 21.1416 -+			/* Clear all existing */
 21.1417 -+			iowrite32(ioread32
 21.1418 -+				  (chip->vendor.iobase +
 21.1419 -+				   TPM_INT_STATUS(chip->vendor.locality)),
 21.1420 -+				  chip->vendor.iobase +
 21.1421 -+				  TPM_INT_STATUS(chip->vendor.locality));
 21.1422 -+
 21.1423 -+			/* Turn on */
 21.1424 -+			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
 21.1425 -+				  chip->vendor.iobase +
 21.1426 -+				  TPM_INT_ENABLE(chip->vendor.locality));
 21.1427 -+
 21.1428 -+			/* Generate Interrupts */
 21.1429 -+			tpm_gen_interrupt(chip);
 21.1430 -+
 21.1431 -+			/* Turn off */
 21.1432 -+			iowrite32(intmask,
 21.1433 -+				  chip->vendor.iobase +
 21.1434 -+				  TPM_INT_ENABLE(chip->vendor.locality));
 21.1435 -+			free_irq(i, chip);
 21.1436 -+		}
 21.1437 -+	}
 21.1438 -+	if (chip->vendor.irq) {
 21.1439 -+		iowrite8(chip->vendor.irq,
 21.1440 -+			 chip->vendor.iobase +
 21.1441 -+			 TPM_INT_VECTOR(chip->vendor.locality));
 21.1442 -+		if (request_irq
 21.1443 -+		    (chip->vendor.irq, tis_int_handler, SA_SHIRQ,
 21.1444 -+		     chip->vendor.miscdev.name, chip) != 0) {
 21.1445 -+			dev_info(chip->dev,
 21.1446 -+				 "Unable to request irq: %d for use\n",
 21.1447 -+				 chip->vendor.irq);
 21.1448 -+			chip->vendor.irq = 0;
 21.1449 -+		} else {
 21.1450 -+			/* Clear all existing */
 21.1451 -+			iowrite32(ioread32
 21.1452 -+				  (chip->vendor.iobase +
 21.1453 -+				   TPM_INT_STATUS(chip->vendor.locality)),
 21.1454 -+				  chip->vendor.iobase +
 21.1455 -+				  TPM_INT_STATUS(chip->vendor.locality));
 21.1456 -+
 21.1457 -+			/* Turn on */
 21.1458 -+			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
 21.1459 -+				  chip->vendor.iobase +
 21.1460 -+				  TPM_INT_ENABLE(chip->vendor.locality));
 21.1461 -+		}
 21.1462 -+	}
 21.1463 -+
 21.1464 -+	INIT_LIST_HEAD(&chip->vendor.list);
 21.1465 -+	spin_lock(&tis_lock);
 21.1466 -+	list_add(&chip->vendor.list, &tis_chips);
 21.1467 -+	spin_unlock(&tis_lock);
 21.1468 -+
 21.1469 -+	tpm_get_timeouts(chip);
 21.1470 -+	tpm_continue_selftest(chip);
 21.1471 -+
 21.1472 -+	return 0;
 21.1473 -+out_err:
 21.1474 -+	if (chip->vendor.iobase)
 21.1475 -+		iounmap(chip->vendor.iobase);
 21.1476 -+	tpm_remove_hardware(chip->dev);
 21.1477 -+	return rc;
 21.1478 -+}
 21.1479 -+
 21.1480 -+static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
 21.1481 -+{
 21.1482 -+	return tpm_pm_suspend(&dev->dev, msg);
 21.1483 -+}
 21.1484 -+
 21.1485 -+static int tpm_tis_pnp_resume(struct pnp_dev *dev)
 21.1486 -+{
 21.1487 -+	return tpm_pm_resume(&dev->dev);
 21.1488 -+}
 21.1489 -+
 21.1490 -+static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
 21.1491 -+	{"PNP0C31", 0},		/* TPM */
 21.1492 -+	{"ATM1200", 0},		/* Atmel */
 21.1493 -+	{"IFX0102", 0},		/* Infineon */
 21.1494 -+	{"BCM0101", 0},		/* Broadcom */
 21.1495 -+	{"NSC1200", 0},		/* National */
 21.1496 -+	/* Add new here */
 21.1497 -+	{"", 0},		/* User Specified */
 21.1498 -+	{"", 0}			/* Terminator */
 21.1499 -+};
 21.1500 -+
 21.1501 -+static struct pnp_driver tis_pnp_driver = {
 21.1502 -+	.name = "tpm_tis",
 21.1503 -+	.id_table = tpm_pnp_tbl,
 21.1504 -+	.probe = tpm_tis_pnp_init,
 21.1505 -+	.suspend = tpm_tis_pnp_suspend,
 21.1506 -+	.resume = tpm_tis_pnp_resume,
 21.1507 -+};
 21.1508 -+
 21.1509 -+#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
 21.1510 -+module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
 21.1511 -+		    sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
 21.1512 -+MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
 21.1513 -+
 21.1514 -+static int __init init_tis(void)
 21.1515 -+{
 21.1516 -+	return pnp_register_driver(&tis_pnp_driver);
 21.1517 -+}
 21.1518 -+
 21.1519 -+static void __exit cleanup_tis(void)
 21.1520 -+{
 21.1521 -+	struct tpm_vendor_specific *i, *j;
 21.1522 -+	struct tpm_chip *chip;
 21.1523 -+	spin_lock(&tis_lock);
 21.1524 -+	list_for_each_entry_safe(i, j, &tis_chips, list) {
 21.1525 -+		chip = to_tpm_chip(i);
 21.1526 -+		iowrite32(~TPM_GLOBAL_INT_ENABLE &
 21.1527 -+			  ioread32(chip->vendor.iobase +
 21.1528 -+				   TPM_INT_ENABLE(chip->vendor.
 21.1529 -+						  locality)),
 21.1530 -+			  chip->vendor.iobase +
 21.1531 -+			  TPM_INT_ENABLE(chip->vendor.locality));
 21.1532 -+		release_locality(chip, chip->vendor.locality, 1);
 21.1533 -+		if (chip->vendor.irq)
 21.1534 -+			free_irq(chip->vendor.irq, chip);
 21.1535 -+		iounmap(i->iobase);
 21.1536 -+		list_del(&i->list);
 21.1537 -+		tpm_remove_hardware(chip->dev);
 21.1538 -+	}
 21.1539 -+	spin_unlock(&tis_lock);
 21.1540 -+	pnp_unregister_driver(&tis_pnp_driver);
 21.1541 -+}
 21.1542 -+
 21.1543 -+module_init(init_tis);
 21.1544 -+module_exit(cleanup_tis);
 21.1545 -+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
 21.1546 -+MODULE_DESCRIPTION("TPM Driver");
 21.1547 -+MODULE_VERSION("2.0");
 21.1548 -+MODULE_LICENSE("GPL");
    22.1 --- a/patches/linux-2.6.16.29/x86-elfnote-as-preprocessor-macro.patch	Mon Nov 27 13:22:21 2006 +0000
    22.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.3 @@ -1,43 +0,0 @@
    22.4 -diff -pruN ../orig-linux-2.6.16.29/include/linux/elfnote.h ./include/linux/elfnote.h
    22.5 ---- ../orig-linux-2.6.16.29/include/linux/elfnote.h	2006-09-19 14:06:10.000000000 +0100
    22.6 -+++ ./include/linux/elfnote.h	2006-09-19 14:06:20.000000000 +0100
    22.7 -@@ -31,22 +31,24 @@
    22.8 - /*
    22.9 -  * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
   22.10 -  * turn out to be the same size and shape), followed by the name and
   22.11 -- * desc data with appropriate padding.  The 'desc' argument includes
   22.12 -- * the assembler pseudo op defining the type of the data: .asciz
   22.13 -- * "hello, world"
   22.14 -+ * desc data with appropriate padding.  The 'desctype' argument is the
   22.15 -+ * assembler pseudo op defining the type of the data e.g. .asciz while
   22.16 -+ * 'descdata' is the data itself e.g.  "hello, world".
   22.17 -+ *
   22.18 -+ * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
   22.19 -+ *      ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
   22.20 -  */
   22.21 --.macro ELFNOTE name type desc:vararg
   22.22 --.pushsection ".note.\name"
   22.23 --  .align 4
   22.24 --  .long 2f - 1f			/* namesz */
   22.25 --  .long 4f - 3f			/* descsz */
   22.26 --  .long \type
   22.27 --1:.asciz "\name"
   22.28 --2:.align 4
   22.29 --3:\desc
   22.30 --4:.align 4
   22.31 --.popsection
   22.32 --.endm
   22.33 -+#define ELFNOTE(name, type, desctype, descdata)	\
   22.34 -+.pushsection .note.name			;	\
   22.35 -+  .align 4				;	\
   22.36 -+  .long 2f - 1f		/* namesz */	;	\
   22.37 -+  .long 4f - 3f		/* descsz */	;	\
   22.38 -+  .long type				;	\
   22.39 -+1:.asciz "name"				;	\
   22.40 -+2:.align 4				;	\
   22.41 -+3:desctype descdata			;	\
   22.42 -+4:.align 4				;	\
   22.43 -+.popsection				;
   22.44 - #else	/* !__ASSEMBLER__ */
   22.45 - #include <linux/elf.h>
   22.46 - /*
    23.1 --- a/patches/linux-2.6.16.29/x86-increase-interrupt-vector-range.patch	Mon Nov 27 13:22:21 2006 +0000
    23.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.3 @@ -1,89 +0,0 @@
    23.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/entry.S ./arch/i386/kernel/entry.S
    23.5 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/entry.S	2006-09-19 14:05:44.000000000 +0100
    23.6 -+++ ./arch/i386/kernel/entry.S	2006-09-19 14:05:56.000000000 +0100
    23.7 -@@ -406,7 +406,7 @@ vector=0
    23.8 - ENTRY(irq_entries_start)
    23.9 - .rept NR_IRQS
   23.10 - 	ALIGN
   23.11 --1:	pushl $vector-256
   23.12 -+1:	pushl $~(vector)
   23.13 - 	jmp common_interrupt
   23.14 - .data
   23.15 - 	.long 1b
   23.16 -@@ -423,7 +423,7 @@ common_interrupt:
   23.17 - 
   23.18 - #define BUILD_INTERRUPT(name, nr)	\
   23.19 - ENTRY(name)				\
   23.20 --	pushl $nr-256;			\
   23.21 -+	pushl $~(nr);			\
   23.22 - 	SAVE_ALL			\
   23.23 - 	movl %esp,%eax;			\
   23.24 - 	call smp_/**/name;		\
   23.25 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/irq.c ./arch/i386/kernel/irq.c
   23.26 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/irq.c	2006-09-12 19:02:10.000000000 +0100
   23.27 -+++ ./arch/i386/kernel/irq.c	2006-09-19 14:05:56.000000000 +0100
   23.28 -@@ -53,8 +53,8 @@ static union irq_ctx *softirq_ctx[NR_CPU
   23.29 -  */
   23.30 - fastcall unsigned int do_IRQ(struct pt_regs *regs)
   23.31 - {	
   23.32 --	/* high bits used in ret_from_ code */
   23.33 --	int irq = regs->orig_eax & 0xff;
   23.34 -+	/* high bit used in ret_from_ code */
   23.35 -+	int irq = ~regs->orig_eax;
   23.36 - #ifdef CONFIG_4KSTACKS
   23.37 - 	union irq_ctx *curctx, *irqctx;
   23.38 - 	u32 *isp;
   23.39 -diff -pruN ../orig-linux-2.6.16.29/arch/x86_64/kernel/entry.S ./arch/x86_64/kernel/entry.S
   23.40 ---- ../orig-linux-2.6.16.29/arch/x86_64/kernel/entry.S	2006-09-12 19:02:10.000000000 +0100
   23.41 -+++ ./arch/x86_64/kernel/entry.S	2006-09-19 14:05:56.000000000 +0100
   23.42 -@@ -596,7 +596,7 @@ retint_kernel:	
   23.43 -  */		
   23.44 - 	.macro apicinterrupt num,func
   23.45 - 	INTR_FRAME
   23.46 --	pushq $\num-256
   23.47 -+	pushq $~(\num)
   23.48 - 	CFI_ADJUST_CFA_OFFSET 8
   23.49 - 	interrupt \func
   23.50 - 	jmp ret_from_intr
   23.51 -diff -pruN ../orig-linux-2.6.16.29/arch/x86_64/kernel/irq.c ./arch/x86_64/kernel/irq.c
   23.52 ---- ../orig-linux-2.6.16.29/arch/x86_64/kernel/irq.c	2006-09-12 19:02:10.000000000 +0100
   23.53 -+++ ./arch/x86_64/kernel/irq.c	2006-09-19 14:05:56.000000000 +0100
   23.54 -@@ -96,8 +96,8 @@ skip:
   23.55 -  */
   23.56 - asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
   23.57 - {	
   23.58 --	/* high bits used in ret_from_ code  */
   23.59 --	unsigned irq = regs->orig_rax & 0xff;
   23.60 -+	/* high bit used in ret_from_ code  */
   23.61 -+	unsigned irq = ~regs->orig_rax;
   23.62 - 
   23.63 - 	exit_idle();
   23.64 - 	irq_enter();
   23.65 -diff -pruN ../orig-linux-2.6.16.29/arch/x86_64/kernel/smp.c ./arch/x86_64/kernel/smp.c
   23.66 ---- ../orig-linux-2.6.16.29/arch/x86_64/kernel/smp.c	2006-09-12 19:02:10.000000000 +0100
   23.67 -+++ ./arch/x86_64/kernel/smp.c	2006-09-19 14:05:56.000000000 +0100
   23.68 -@@ -135,10 +135,10 @@ asmlinkage void smp_invalidate_interrupt
   23.69 - 
   23.70 - 	cpu = smp_processor_id();
   23.71 - 	/*
   23.72 --	 * orig_rax contains the interrupt vector - 256.
   23.73 -+	 * orig_rax contains the negated interrupt vector.
   23.74 - 	 * Use that to determine where the sender put the data.
   23.75 - 	 */
   23.76 --	sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
   23.77 -+	sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
   23.78 - 	f = &per_cpu(flush_state, sender);
   23.79 - 
   23.80 - 	if (!cpu_isset(cpu, f->flush_cpumask))
   23.81 -diff -pruN ../orig-linux-2.6.16.29/include/asm-x86_64/hw_irq.h ./include/asm-x86_64/hw_irq.h
   23.82 ---- ../orig-linux-2.6.16.29/include/asm-x86_64/hw_irq.h	2006-09-12 19:02:10.000000000 +0100
   23.83 -+++ ./include/asm-x86_64/hw_irq.h	2006-09-19 14:05:56.000000000 +0100
   23.84 -@@ -127,7 +127,7 @@ asmlinkage void IRQ_NAME(nr); \
   23.85 - __asm__( \
   23.86 - "\n.p2align\n" \
   23.87 - "IRQ" #nr "_interrupt:\n\t" \
   23.88 --	"push $" #nr "-256 ; " \
   23.89 -+	"push $~(" #nr ") ; " \
   23.90 - 	"jmp common_interrupt");
   23.91 - 
   23.92 - #if defined(CONFIG_X86_IO_APIC)
    24.1 --- a/patches/linux-2.6.16.29/x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch	Mon Nov 27 13:22:21 2006 +0000
    24.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.3 @@ -1,143 +0,0 @@
    24.4 -diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/vmlinux.lds.S ./arch/i386/kernel/vmlinux.lds.S
    24.5 ---- ../orig-linux-2.6.16.29/arch/i386/kernel/vmlinux.lds.S	2006-09-19 14:05:48.000000000 +0100
    24.6 -+++ ./arch/i386/kernel/vmlinux.lds.S	2006-09-19 14:06:10.000000000 +0100
    24.7 -@@ -12,6 +12,12 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386"
    24.8 - OUTPUT_ARCH(i386)
    24.9 - ENTRY(phys_startup_32)
   24.10 - jiffies = jiffies_64;
   24.11 -+
   24.12 -+PHDRS {
   24.13 -+	text PT_LOAD FLAGS(5);	/* R_E */
   24.14 -+	data PT_LOAD FLAGS(7);	/* RWE */
   24.15 -+	note PT_NOTE FLAGS(4);	/* R__ */
   24.16 -+}
   24.17 - SECTIONS
   24.18 - {
   24.19 -   . = __KERNEL_START;
   24.20 -@@ -25,7 +31,7 @@ SECTIONS
   24.21 - 	KPROBES_TEXT
   24.22 - 	*(.fixup)
   24.23 - 	*(.gnu.warning)
   24.24 --	} = 0x9090
   24.25 -+	} :text = 0x9090
   24.26 - 
   24.27 -   _etext = .;			/* End of text section */
   24.28 - 
   24.29 -@@ -47,7 +53,7 @@ SECTIONS
   24.30 -   .data : AT(ADDR(.data) - LOAD_OFFSET) {	/* Data */
   24.31 - 	*(.data)
   24.32 - 	CONSTRUCTORS
   24.33 --	}
   24.34 -+	} :data
   24.35 - 
   24.36 -   . = ALIGN(4096);
   24.37 -   __nosave_begin = .;
   24.38 -@@ -154,4 +160,6 @@ SECTIONS
   24.39 -   STABS_DEBUG
   24.40 - 
   24.41 -   DWARF_DEBUG
   24.42 -+
   24.43 -+  NOTES
   24.44 - }
   24.45 -diff -pruN ../orig-linux-2.6.16.29/include/asm-generic/vmlinux.lds.h ./include/asm-generic/vmlinux.lds.h
   24.46 ---- ../orig-linux-2.6.16.29/include/asm-generic/vmlinux.lds.h	2006-09-12 19:02:10.000000000 +0100
   24.47 -+++ ./include/asm-generic/vmlinux.lds.h	2006-09-19 14:06:10.000000000 +0100
   24.48 -@@ -152,3 +152,6 @@
   24.49 - 		.stab.index 0 : { *(.stab.index) }			\
   24.50 - 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
   24.51 - 		.comment 0 : { *(.comment) }
   24.52 -+
   24.53 -+#define NOTES								\
   24.54 -+		.notes : { *(.note.*) } :note
   24.55 -diff -pruN ../orig-linux-2.6.16.29/include/linux/elfnote.h ./include/linux/elfnote.h
   24.56 ---- ../orig-linux-2.6.16.29/include/linux/elfnote.h	1970-01-01 01:00:00.000000000 +0100
   24.57 -+++ ./include/linux/elfnote.h	2006-09-19 14:06:10.000000000 +0100
   24.58 -@@ -0,0 +1,88 @@
   24.59 -+#ifndef _LINUX_ELFNOTE_H
   24.60 -+#define _LINUX_ELFNOTE_H
   24.61 -+/*
   24.62 -+ * Helper macros to generate ELF Note structures, which are put into a
   24.63 -+ * PT_NOTE segment of the final vmlinux image.  These are useful for
   24.64 -+ * including name-value pairs of metadata into the kernel binary (or
   24.65 -+ * modules?) for use by external programs.
   24.66 -+ *
   24.67 -+ * Each note has three parts: a name, a type and a desc.  The name is
   24.68 -+ * intended to distinguish the note's originator, so it would be a
   24.69 -+ * company, project, subsystem, etc; it must be in a suitable form for
   24.70 -+ * use in a section name.  The type is an integer which is used to tag
   24.71 -+ * the data, and is considered to be within the "name" namespace (so
   24.72 -+ * "FooCo"'s type 42 is distinct from "BarProj"'s type 42).  The
   24.73 -+ * "desc" field is the actual data.  There are no constraints on the
   24.74 -+ * desc field's contents, though typically they're fairly small.
   24.75 -+ *
   24.76 -+ * All notes from a given NAME are put into a section named
   24.77 -+ * .note.NAME.  When the kernel image is finally linked, all the notes
   24.78 -+ * are packed into a single .notes section, which is mapped into the
   24.79 -+ * PT_NOTE segment.  Because notes for a given name are grouped into
   24.80 -+ * the same section, they'll all be adjacent the output file.
   24.81 -+ *
   24.82 -+ * This file defines macros for both C and assembler use.  Their
   24.83 -+ * syntax is slightly different, but they're semantically similar.
   24.84 -+ *
   24.85 -+ * See the ELF specification for more detail about ELF notes.
   24.86 -+ */
   24.87 -+
   24.88 -+#ifdef __ASSEMBLER__
   24.89 -+/*
   24.90 -+ * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
   24.91 -+ * turn out to be the same size and shape), followed by the name and
   24.92 -+ * desc data with appropriate padding.  The 'desc' argument includes
   24.93 -+ * the assembler pseudo op defining the type of the data: .asciz
   24.94 -+ * "hello, world"
   24.95 -+ */
   24.96 -+.macro ELFNOTE name type desc:vararg
   24.97 -+.pushsection ".note.\name"
   24.98 -+  .align 4
   24.99 -+  .long 2f - 1f			/* namesz */
  24.100 -+  .long 4f - 3f			/* descsz */
  24.101 -+  .long \type
  24.102 -+1:.asciz "\name"
  24.103 -+2:.align 4
  24.104 -+3:\desc
  24.105 -+4:.align 4
  24.106 -+.popsection
  24.107 -+.endm
  24.108 -+#else	/* !__ASSEMBLER__ */
  24.109 -+#include <linux/elf.h>
  24.110 -+/*
  24.111 -+ * Use an anonymous structure which matches the shape of
  24.112 -+ * Elf{32,64}_Nhdr, but includes the name and desc data.  The size and
  24.113 -+ * type of name and desc depend on the macro arguments.  "name" must
  24.114 -+ * be a literal string, and "desc" must be passed by value.  You may
  24.115 -+ * only define one note per line, since __LINE__ is used to generate
  24.116 -+ * unique symbols.
  24.117 -+ */
  24.118 -+#define _ELFNOTE_PASTE(a,b)	a##b
  24.119 -+#define _ELFNOTE(size, name, unique, type, desc)			\
  24.120 -+	static const struct {						\
  24.121 -+		struct elf##size##_note _nhdr;				\
  24.122 -+		unsigned char _name[sizeof(name)]			\
  24.123 -+		__attribute__((aligned(sizeof(Elf##size##_Word))));	\
  24.124 -+		typeof(desc) _desc					\
  24.125 -+			     __attribute__((aligned(sizeof(Elf##size##_Word)))); \
  24.126 -+	} _ELFNOTE_PASTE(_note_, unique)				\
  24.127 -+		__attribute_used__					\
  24.128 -+		__attribute__((section(".note." name),			\
  24.129 -+			       aligned(sizeof(Elf##size##_Word)),	\
  24.130 -+			       unused)) = {				\
  24.131 -+		{							\
  24.132 -+			sizeof(name),					\
  24.133 -+			sizeof(desc),					\
  24.134 -+			type,						\
  24.135 -+		},							\
  24.136 -+		name,							\
  24.137 -+		desc							\
  24.138 -+	}
  24.139 -+#define ELFNOTE(size, name, type, desc)		\
  24.140 -+	_ELFNOTE(size, name, __LINE__, type, desc)
  24.141 -+
  24.142 -+#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
  24.143 -+#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
  24.144 -+#endif	/* __ASSEMBLER__ */
  24.145 -+
  24.146 -+#endif /* _LINUX_ELFNOTE_H */
    25.1 --- a/patches/linux-2.6.16.29/x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch	Mon Nov 27 13:22:21 2006 +0000
    25.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.3 @@ -1,84 +0,0 @@
    25.4 -diff -pruN ../orig-linux-2.6.16.29/arch/x86_64/kernel/vmlinux.lds.S ./arch/x86_64/kernel/vmlinux.lds.S
    25.5 ---- ../orig-linux-2.6.16.29/arch/x86_64/kernel/vmlinux.lds.S	2006-09-12 19:02:10.000000000 +0100
    25.6 -+++ ./arch/x86_64/kernel/vmlinux.lds.S	2006-09-19 14:06:15.000000000 +0100
    25.7 -@@ -14,6 +14,12 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86
    25.8 - OUTPUT_ARCH(i386:x86-64)
    25.9 - ENTRY(phys_startup_64)
   25.10 - jiffies_64 = jiffies;
   25.11 -+PHDRS {
   25.12 -+	text PT_LOAD FLAGS(5);	/* R_E */
   25.13 -+	data PT_LOAD FLAGS(7);	/* RWE */
   25.14 -+	user PT_LOAD FLAGS(7);	/* RWE */
   25.15 -+	note PT_NOTE FLAGS(4);	/* R__ */
   25.16 -+}
   25.17 - SECTIONS
   25.18 - {
   25.19 -   . = __START_KERNEL;
   25.20 -@@ -26,7 +32,7 @@ SECTIONS
   25.21 - 	KPROBES_TEXT
   25.22 - 	*(.fixup)
   25.23 - 	*(.gnu.warning)
   25.24 --	} = 0x9090
   25.25 -+	} :text = 0x9090
   25.26 -   				/* out-of-line lock text */
   25.27 -   .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
   25.28 - 
   25.29 -@@ -43,17 +49,10 @@ SECTIONS
   25.30 -   .data : AT(ADDR(.data) - LOAD_OFFSET) {
   25.31 - 	*(.data)
   25.32 - 	CONSTRUCTORS
   25.33 --	}
   25.34 -+	} :data
   25.35 - 
   25.36 -   _edata = .;			/* End of data section */
   25.37 - 
   25.38 --  __bss_start = .;		/* BSS */
   25.39 --  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
   25.40 --	*(.bss.page_aligned)	
   25.41 --	*(.bss)
   25.42 --	}
   25.43 --  __bss_stop = .;
   25.44 --
   25.45 -   . = ALIGN(PAGE_SIZE);
   25.46 -   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
   25.47 -   .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
   25.48 -@@ -75,7 +74,7 @@ SECTIONS
   25.49 - #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
   25.50 - 
   25.51 -   . = VSYSCALL_ADDR;
   25.52 --  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
   25.53 -+  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
   25.54 -   __vsyscall_0 = VSYSCALL_VIRT_ADDR;
   25.55 - 
   25.56 -   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
   25.57 -@@ -118,7 +117,7 @@ SECTIONS
   25.58 -   . = ALIGN(8192);		/* init_task */
   25.59 -   .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
   25.60 - 	*(.data.init_task)
   25.61 --  }
   25.62 -+  } :data
   25.63 - 
   25.64 -   . = ALIGN(4096);
   25.65 -   .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
   25.66 -@@ -188,6 +187,14 @@ SECTIONS
   25.67 -   . = ALIGN(4096);
   25.68 -   __nosave_end = .;
   25.69 - 
   25.70 -+  __bss_start = .;		/* BSS */
   25.71 -+  . = ALIGN(4096);
   25.72 -+  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
   25.73 -+	*(.bss.page_aligned)
   25.74 -+	*(.bss)
   25.75 -+	}
   25.76 -+  __bss_stop = .;
   25.77 -+
   25.78 -   _end = . ;
   25.79 - 
   25.80 -   /* Sections to be discarded */
   25.81 -@@ -201,4 +208,6 @@ SECTIONS
   25.82 -   STABS_DEBUG
   25.83 - 
   25.84 -   DWARF_DEBUG
   25.85 -+
   25.86 -+  NOTES
   25.87 - }
    26.1 --- a/patches/linux-2.6.16.29/xen-hotplug.patch	Mon Nov 27 13:22:21 2006 +0000
    26.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.3 @@ -1,12 +0,0 @@
    26.4 -diff -pruN ../orig-linux-2.6.16.29/fs/proc/proc_misc.c ./fs/proc/proc_misc.c
    26.5 ---- ../orig-linux-2.6.16.29/fs/proc/proc_misc.c	2006-09-12 19:02:10.000000000 +0100
    26.6 -+++ ./fs/proc/proc_misc.c	2006-09-19 14:06:00.000000000 +0100
    26.7 -@@ -433,7 +433,7 @@ static int show_stat(struct seq_file *p,
    26.8 - 		(unsigned long long)cputime64_to_clock_t(irq),
    26.9 - 		(unsigned long long)cputime64_to_clock_t(softirq),
   26.10 - 		(unsigned long long)cputime64_to_clock_t(steal));
   26.11 --	for_each_online_cpu(i) {
   26.12 -+	for_each_cpu(i) {
   26.13 - 
   26.14 - 		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
   26.15 - 		user = kstat_cpu(i).cpustat.user;
    27.1 --- a/patches/linux-2.6.16.29/xenoprof-generic.patch	Mon Nov 27 13:22:21 2006 +0000
    27.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.3 @@ -1,662 +0,0 @@
    27.4 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/buffer_sync.c ./drivers/oprofile/buffer_sync.c
    27.5 ---- ../orig-linux-2.6.16.29/drivers/oprofile/buffer_sync.c	2006-11-06 14:46:52.000000000 -0800
    27.6 -+++ ./drivers/oprofile/buffer_sync.c	2006-11-06 15:16:52.000000000 -0800
    27.7 -@@ -6,6 +6,10 @@
    27.8 -  *
    27.9 -  * @author John Levon <levon@movementarian.org>
   27.10 -  *
   27.11 -+ * Modified by Aravind Menon for Xen
   27.12 -+ * These modifications are:
   27.13 -+ * Copyright (C) 2005 Hewlett-Packard Co.
   27.14 -+ *
   27.15 -  * This is the core of the buffer management. Each
   27.16 -  * CPU buffer is processed and entered into the
   27.17 -  * global event buffer. Such processing is necessary
   27.18 -@@ -38,6 +42,7 @@ static cpumask_t marked_cpus = CPU_MASK_
   27.19 - static DEFINE_SPINLOCK(task_mortuary);
   27.20 - static void process_task_mortuary(void);
   27.21 - 
   27.22 -+static int cpu_current_domain[NR_CPUS];
   27.23 - 
   27.24 - /* Take ownership of the task struct and place it on the
   27.25 -  * list for processing. Only after two full buffer syncs
   27.26 -@@ -146,6 +151,11 @@ static void end_sync(void)
   27.27 - int sync_start(void)
   27.28 - {
   27.29 - 	int err;
   27.30 -+	int i;
   27.31 -+
   27.32 -+	for (i = 0; i < NR_CPUS; i++) {
   27.33 -+		cpu_current_domain[i] = COORDINATOR_DOMAIN;
   27.34 -+	}
   27.35 - 
   27.36 - 	start_cpu_work();
   27.37 - 
   27.38 -@@ -275,15 +285,31 @@ static void add_cpu_switch(int i)
   27.39 - 	last_cookie = INVALID_COOKIE;
   27.40 - }
   27.41 - 
   27.42 --static void add_kernel_ctx_switch(unsigned int in_kernel)
   27.43 -+static void add_cpu_mode_switch(unsigned int cpu_mode)
   27.44 - {
   27.45 - 	add_event_entry(ESCAPE_CODE);
   27.46 --	if (in_kernel)
   27.47 --		add_event_entry(KERNEL_ENTER_SWITCH_CODE); 
   27.48 --	else
   27.49 --		add_event_entry(KERNEL_EXIT_SWITCH_CODE); 
   27.50 -+	switch (cpu_mode) {
   27.51 -+	case CPU_MODE_USER:
   27.52 -+		add_event_entry(USER_ENTER_SWITCH_CODE);
   27.53 -+		break;
   27.54 -+	case CPU_MODE_KERNEL:
   27.55 -+		add_event_entry(KERNEL_ENTER_SWITCH_CODE);
   27.56 -+		break;
   27.57 -+	case CPU_MODE_XEN:
   27.58 -+		add_event_entry(XEN_ENTER_SWITCH_CODE);
   27.59 -+	  	break;
   27.60 -+	default:
   27.61 -+		break;
   27.62 -+	}
   27.63 - }
   27.64 -- 
   27.65 -+
   27.66 -+static void add_domain_switch(unsigned long domain_id)
   27.67 -+{
   27.68 -+	add_event_entry(ESCAPE_CODE);
   27.69 -+	add_event_entry(DOMAIN_SWITCH_CODE);
   27.70 -+	add_event_entry(domain_id);
   27.71 -+}
   27.72 -+
   27.73 - static void
   27.74 - add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
   27.75 - {
   27.76 -@@ -348,9 +374,9 @@ static int add_us_sample(struct mm_struc
   27.77 -  * for later lookup from userspace.
   27.78 -  */
   27.79 - static int
   27.80 --add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
   27.81 -+add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
   27.82 - {
   27.83 --	if (in_kernel) {
   27.84 -+	if (cpu_mode >= CPU_MODE_KERNEL) {
   27.85 - 		add_sample_entry(s->eip, s->event);
   27.86 - 		return 1;
   27.87 - 	} else if (mm) {
   27.88 -@@ -496,15 +522,21 @@ void sync_buffer(int cpu)
   27.89 - 	struct mm_struct *mm = NULL;
   27.90 - 	struct task_struct * new;
   27.91 - 	unsigned long cookie = 0;
   27.92 --	int in_kernel = 1;
   27.93 -+	int cpu_mode = 1;
   27.94 - 	unsigned int i;
   27.95 - 	sync_buffer_state state = sb_buffer_start;
   27.96 - 	unsigned long available;
   27.97 -+	int domain_switch = 0;
   27.98 - 
   27.99 - 	down(&buffer_sem);
  27.100 -  
  27.101 - 	add_cpu_switch(cpu);
  27.102 - 
  27.103 -+	/* We need to assign the first samples in this CPU buffer to the
  27.104 -+	   same domain that we were processing at the last sync_buffer */
  27.105 -+	if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
  27.106 -+		add_domain_switch(cpu_current_domain[cpu]);
  27.107 -+	}
  27.108 - 	/* Remember, only we can modify tail_pos */
  27.109 - 
  27.110 - 	available = get_slots(cpu_buf);
  27.111 -@@ -512,16 +544,18 @@ void sync_buffer(int cpu)
  27.112 - 	for (i = 0; i < available; ++i) {
  27.113 - 		struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
  27.114 -  
  27.115 --		if (is_code(s->eip)) {
  27.116 --			if (s->event <= CPU_IS_KERNEL) {
  27.117 --				/* kernel/userspace switch */
  27.118 --				in_kernel = s->event;
  27.119 -+		if (is_code(s->eip) && !domain_switch) {
  27.120 -+			if (s->event <= CPU_MODE_XEN) {
  27.121 -+				/* xen/kernel/userspace switch */
  27.122 -+				cpu_mode = s->event;
  27.123 - 				if (state == sb_buffer_start)
  27.124 - 					state = sb_sample_start;
  27.125 --				add_kernel_ctx_switch(s->event);
  27.126 -+				add_cpu_mode_switch(s->event);
  27.127 - 			} else if (s->event == CPU_TRACE_BEGIN) {
  27.128 - 				state = sb_bt_start;
  27.129 - 				add_trace_begin();
  27.130 -+			} else if (s->event == CPU_DOMAIN_SWITCH) {
  27.131 -+					domain_switch = 1;				
  27.132 - 			} else {
  27.133 - 				struct mm_struct * oldmm = mm;
  27.134 - 
  27.135 -@@ -535,11 +569,21 @@ void sync_buffer(int cpu)
  27.136 - 				add_user_ctx_switch(new, cookie);
  27.137 - 			}
  27.138 - 		} else {
  27.139 --			if (state >= sb_bt_start &&
  27.140 --			    !add_sample(mm, s, in_kernel)) {
  27.141 --				if (state == sb_bt_start) {
  27.142 --					state = sb_bt_ignore;
  27.143 --					atomic_inc(&oprofile_stats.bt_lost_no_mapping);
  27.144 -+			if (domain_switch) {
  27.145 -+				cpu_current_domain[cpu] = s->eip;
  27.146 -+				add_domain_switch(s->eip);
  27.147 -+				domain_switch = 0;
  27.148 -+			} else {
  27.149 -+				if (cpu_current_domain[cpu] !=
  27.150 -+				    COORDINATOR_DOMAIN) {
  27.151 -+					add_sample_entry(s->eip, s->event);
  27.152 -+				}
  27.153 -+				else  if (state >= sb_bt_start &&
  27.154 -+				    !add_sample(mm, s, cpu_mode)) {
  27.155 -+					if (state == sb_bt_start) {
  27.156 -+						state = sb_bt_ignore;
  27.157 -+						atomic_inc(&oprofile_stats.bt_lost_no_mapping);
  27.158 -+					}
  27.159 - 				}
  27.160 - 			}
  27.161 - 		}
  27.162 -@@ -548,6 +592,11 @@ void sync_buffer(int cpu)
  27.163 - 	}
  27.164 - 	release_mm(mm);
  27.165 - 
  27.166 -+	/* We reset domain to COORDINATOR at each CPU switch */
  27.167 -+	if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
  27.168 -+		add_domain_switch(COORDINATOR_DOMAIN);
  27.169 -+	}
  27.170 -+
  27.171 - 	mark_done(cpu);
  27.172 - 
  27.173 - 	up(&buffer_sem);
  27.174 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.c ./drivers/oprofile/cpu_buffer.c
  27.175 ---- ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.c	2006-11-06 14:46:52.000000000 -0800
  27.176 -+++ ./drivers/oprofile/cpu_buffer.c	2006-11-06 14:47:55.000000000 -0800
  27.177 -@@ -6,6 +6,10 @@
  27.178 -  *
  27.179 -  * @author John Levon <levon@movementarian.org>
  27.180 -  *
  27.181 -+ * Modified by Aravind Menon for Xen
  27.182 -+ * These modifications are:
  27.183 -+ * Copyright (C) 2005 Hewlett-Packard Co.
  27.184 -+ *
  27.185 -  * Each CPU has a local buffer that stores PC value/event
  27.186 -  * pairs. We also log context switches when we notice them.
  27.187 -  * Eventually each CPU's buffer is processed into the global
  27.188 -@@ -34,6 +38,8 @@ static void wq_sync_buffer(void *);
  27.189 - #define DEFAULT_TIMER_EXPIRE (HZ / 10)
  27.190 - static int work_enabled;
  27.191 - 
  27.192 -+static int32_t current_domain = COORDINATOR_DOMAIN;
  27.193 -+
  27.194 - void free_cpu_buffers(void)
  27.195 - {
  27.196 - 	int i;
  27.197 -@@ -58,7 +64,7 @@ int alloc_cpu_buffers(void)
  27.198 - 			goto fail;
  27.199 -  
  27.200 - 		b->last_task = NULL;
  27.201 --		b->last_is_kernel = -1;
  27.202 -+		b->last_cpu_mode = -1;
  27.203 - 		b->tracing = 0;
  27.204 - 		b->buffer_size = buffer_size;
  27.205 - 		b->tail_pos = 0;
  27.206 -@@ -114,7 +120,7 @@ void cpu_buffer_reset(struct oprofile_cp
  27.207 - 	 * collected will populate the buffer with proper
  27.208 - 	 * values to initialize the buffer
  27.209 - 	 */
  27.210 --	cpu_buf->last_is_kernel = -1;
  27.211 -+	cpu_buf->last_cpu_mode = -1;
  27.212 - 	cpu_buf->last_task = NULL;
  27.213 - }
  27.214 - 
  27.215 -@@ -164,13 +170,13 @@ add_code(struct oprofile_cpu_buffer * bu
  27.216 -  * because of the head/tail separation of the writer and reader
  27.217 -  * of the CPU buffer.
  27.218 -  *
  27.219 -- * is_kernel is needed because on some architectures you cannot
  27.220 -+ * cpu_mode is needed because on some architectures you cannot
  27.221 -  * tell if you are in kernel or user space simply by looking at
  27.222 -- * pc. We tag this in the buffer by generating kernel enter/exit
  27.223 -- * events whenever is_kernel changes
  27.224 -+ * pc. We tag this in the buffer by generating kernel/user (and xen)
  27.225 -+ *  enter events whenever cpu_mode changes
  27.226 -  */
  27.227 - static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
  27.228 --		      int is_kernel, unsigned long event)
  27.229 -+		      int cpu_mode, unsigned long event)
  27.230 - {
  27.231 - 	struct task_struct * task;
  27.232 - 
  27.233 -@@ -181,18 +187,18 @@ static int log_sample(struct oprofile_cp
  27.234 - 		return 0;
  27.235 - 	}
  27.236 - 
  27.237 --	is_kernel = !!is_kernel;
  27.238 --
  27.239 - 	task = current;
  27.240 - 
  27.241 - 	/* notice a switch from user->kernel or vice versa */
  27.242 --	if (cpu_buf->last_is_kernel != is_kernel) {
  27.243 --		cpu_buf->last_is_kernel = is_kernel;
  27.244 --		add_code(cpu_buf, is_kernel);
  27.245 -+	if (cpu_buf->last_cpu_mode != cpu_mode) {
  27.246 -+		cpu_buf->last_cpu_mode = cpu_mode;
  27.247 -+		add_code(cpu_buf, cpu_mode);
  27.248 - 	}
  27.249 --
  27.250 -+	
  27.251 - 	/* notice a task switch */
  27.252 --	if (cpu_buf->last_task != task) {
  27.253 -+	/* if not processing other domain samples */
  27.254 -+	if ((cpu_buf->last_task != task) &&
  27.255 -+	    (current_domain == COORDINATOR_DOMAIN)) {
  27.256 - 		cpu_buf->last_task = task;
  27.257 - 		add_code(cpu_buf, (unsigned long)task);
  27.258 - 	}
  27.259 -@@ -269,6 +275,25 @@ void oprofile_add_trace(unsigned long pc
  27.260 - 	add_sample(cpu_buf, pc, 0);
  27.261 - }
  27.262 - 
  27.263 -+int oprofile_add_domain_switch(int32_t domain_id)
  27.264 -+{
  27.265 -+	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
  27.266 -+
  27.267 -+	/* should have space for switching into and out of domain 
  27.268 -+	   (2 slots each) plus one sample and one cpu mode switch */
  27.269 -+	if (((nr_available_slots(cpu_buf) < 6) && 
  27.270 -+	     (domain_id != COORDINATOR_DOMAIN)) ||
  27.271 -+	    (nr_available_slots(cpu_buf) < 2))
  27.272 -+		return 0;
  27.273 -+
  27.274 -+	add_code(cpu_buf, CPU_DOMAIN_SWITCH);
  27.275 -+	add_sample(cpu_buf, domain_id, 0);
  27.276 -+
  27.277 -+	current_domain = domain_id;
  27.278 -+
  27.279 -+	return 1;
  27.280 -+}
  27.281 -+
  27.282 - /*
  27.283 -  * This serves to avoid cpu buffer overflow, and makes sure
  27.284 -  * the task mortuary progresses
  27.285 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.h ./drivers/oprofile/cpu_buffer.h
  27.286 ---- ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.h	2006-11-06 14:46:52.000000000 -0800
  27.287 -+++ ./drivers/oprofile/cpu_buffer.h	2006-11-06 14:47:55.000000000 -0800
  27.288 -@@ -36,7 +36,7 @@ struct oprofile_cpu_buffer {
  27.289 - 	volatile unsigned long tail_pos;
  27.290 - 	unsigned long buffer_size;
  27.291 - 	struct task_struct * last_task;
  27.292 --	int last_is_kernel;
  27.293 -+	int last_cpu_mode;
  27.294 - 	int tracing;
  27.295 - 	struct op_sample * buffer;
  27.296 - 	unsigned long sample_received;
  27.297 -@@ -51,7 +51,10 @@ extern struct oprofile_cpu_buffer cpu_bu
  27.298 - void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
  27.299 - 
  27.300 - /* transient events for the CPU buffer -> event buffer */
  27.301 --#define CPU_IS_KERNEL 1
  27.302 --#define CPU_TRACE_BEGIN 2
  27.303 -+#define CPU_MODE_USER           0
  27.304 -+#define CPU_MODE_KERNEL         1
  27.305 -+#define CPU_MODE_XEN            2
  27.306 -+#define CPU_TRACE_BEGIN         3
  27.307 -+#define CPU_DOMAIN_SWITCH       4
  27.308 - 
  27.309 - #endif /* OPROFILE_CPU_BUFFER_H */
  27.310 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/event_buffer.h ./drivers/oprofile/event_buffer.h
  27.311 ---- ../orig-linux-2.6.16.29/drivers/oprofile/event_buffer.h	2006-11-06 14:46:52.000000000 -0800
  27.312 -+++ ./drivers/oprofile/event_buffer.h	2006-11-06 14:47:55.000000000 -0800
  27.313 -@@ -29,15 +29,20 @@ void wake_up_buffer_waiter(void);
  27.314 - #define CPU_SWITCH_CODE 		2
  27.315 - #define COOKIE_SWITCH_CODE 		3
  27.316 - #define KERNEL_ENTER_SWITCH_CODE	4
  27.317 --#define KERNEL_EXIT_SWITCH_CODE		5
  27.318 -+#define USER_ENTER_SWITCH_CODE		5
  27.319 - #define MODULE_LOADED_CODE		6
  27.320 - #define CTX_TGID_CODE			7
  27.321 - #define TRACE_BEGIN_CODE		8
  27.322 - #define TRACE_END_CODE			9
  27.323 -+#define XEN_ENTER_SWITCH_CODE		10
  27.324 -+#define DOMAIN_SWITCH_CODE		11
  27.325 -  
  27.326 - #define INVALID_COOKIE ~0UL
  27.327 - #define NO_COOKIE 0UL
  27.328 - 
  27.329 -+/* Constant used to refer to coordinator domain (Xen) */
  27.330 -+#define COORDINATOR_DOMAIN -1
  27.331 -+
  27.332 - /* add data to the event buffer */
  27.333 - void add_event_entry(unsigned long data);
  27.334 -  
  27.335 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprof.c ./drivers/oprofile/oprof.c
  27.336 ---- ../orig-linux-2.6.16.29/drivers/oprofile/oprof.c	2006-11-06 14:46:52.000000000 -0800
  27.337 -+++ ./drivers/oprofile/oprof.c	2006-11-06 14:47:55.000000000 -0800
  27.338 -@@ -5,6 +5,10 @@
  27.339 -  * @remark Read the file COPYING
  27.340 -  *
  27.341 -  * @author John Levon <levon@movementarian.org>
  27.342 -+ *
  27.343 -+ * Modified by Aravind Menon for Xen
  27.344 -+ * These modifications are:
  27.345 -+ * Copyright (C) 2005 Hewlett-Packard Co.
  27.346 -  */
  27.347 - 
  27.348 - #include <linux/kernel.h>
  27.349 -@@ -19,7 +23,7 @@
  27.350 - #include "cpu_buffer.h"
  27.351 - #include "buffer_sync.h"
  27.352 - #include "oprofile_stats.h"
  27.353 -- 
  27.354 -+
  27.355 - struct oprofile_operations oprofile_ops;
  27.356 - 
  27.357 - unsigned long oprofile_started;
  27.358 -@@ -33,6 +37,32 @@ static DECLARE_MUTEX(start_sem);
  27.359 -  */
  27.360 - static int timer = 0;
  27.361 - 
  27.362 -+int oprofile_set_active(int active_domains[], unsigned int adomains)
  27.363 -+{
  27.364 -+	int err;
  27.365 -+
  27.366 -+	if (!oprofile_ops.set_active)
  27.367 -+		return -EINVAL;
  27.368 -+
  27.369 -+	down(&start_sem);
  27.370 -+	err = oprofile_ops.set_active(active_domains, adomains);
  27.371 -+	up(&start_sem);
  27.372 -+	return err;
  27.373 -+}
  27.374 -+
  27.375 -+int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
  27.376 -+{
  27.377 -+	int err;
  27.378 -+
  27.379 -+	if (!oprofile_ops.set_passive)
  27.380 -+		return -EINVAL;
  27.381 -+
  27.382 -+	down(&start_sem);
  27.383 -+	err = oprofile_ops.set_passive(passive_domains, pdomains);
  27.384 -+	up(&start_sem);
  27.385 -+	return err;
  27.386 -+}
  27.387 -+
  27.388 - int oprofile_setup(void)
  27.389 - {
  27.390 - 	int err;
  27.391 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprof.h ./drivers/oprofile/oprof.h
  27.392 ---- ../orig-linux-2.6.16.29/drivers/oprofile/oprof.h	2006-11-06 14:46:52.000000000 -0800
  27.393 -+++ ./drivers/oprofile/oprof.h	2006-11-06 14:47:55.000000000 -0800
  27.394 -@@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
  27.395 - void oprofile_timer_init(struct oprofile_operations * ops);
  27.396 - 
  27.397 - int oprofile_set_backtrace(unsigned long depth);
  27.398 -+
  27.399 -+int oprofile_set_active(int active_domains[], unsigned int adomains);
  27.400 -+int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
  27.401 -  
  27.402 - #endif /* OPROF_H */
  27.403 -diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprofile_files.c ./drivers/oprofile/oprofile_files.c
  27.404 ---- ../orig-linux-2.6.16.29/drivers/oprofile/oprofile_files.c	2006-11-06 14:46:52.000000000 -0800
  27.405 -+++ ./drivers/oprofile/oprofile_files.c	2006-11-06 14:47:55.000000000 -0800
  27.406 -@@ -5,15 +5,21 @@
  27.407 -  * @remark Read the file COPYING
  27.408 -  *
  27.409 -  * @author John Levon <levon@movementarian.org>
  27.410 -+ *
  27.411 -+ * Modified by Aravind Menon for Xen
  27.412 -+ * These modifications are:
  27.413 -+ * Copyright (C) 2005 Hewlett-Packard Co.	
  27.414 -  */
  27.415 - 
  27.416 - #include <linux/fs.h>
  27.417 - #include <linux/oprofile.h>
  27.418 -+#include <asm/uaccess.h>
  27.419 -+#include <linux/ctype.h>
  27.420 - 
  27.421 - #include "event_buffer.h"
  27.422 - #include "oprofile_stats.h"
  27.423 - #include "oprof.h"
  27.424 -- 
  27.425 -+
  27.426 - unsigned long fs_buffer_size = 131072;
  27.427 - unsigned long fs_cpu_buffer_size = 8192;
  27.428 - unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
  27.429 -@@ -117,11 +123,202 @@ static ssize_t dump_write(struct file * 
  27.430 - static struct file_operations dump_fops = {
  27.431 - 	.write		= dump_write,
  27.432 - };
  27.433 -- 
  27.434 -+
  27.435 -+#define TMPBUFSIZE 512
  27.436 -+
  27.437 -+static unsigned int adomains = 0;
  27.438 -+static int active_domains[MAX_OPROF_DOMAINS + 1];
  27.439 -+static DEFINE_MUTEX(adom_mutex);
  27.440 -+
  27.441 -+static ssize_t adomain_write(struct file * file, char const __user * buf, 
  27.442 -+			     size_t count, loff_t * offset)
  27.443 -+{
  27.444 -+	char *tmpbuf;
  27.445 -+	char *startp, *endp;
  27.446 -+	int i;
  27.447 -+	unsigned long val;
  27.448 -+	ssize_t retval = count;
  27.449 -+	
  27.450 -+	if (*offset)
  27.451 -+		return -EINVAL;	
  27.452 -+	if (count > TMPBUFSIZE - 1)
  27.453 -+		return -EINVAL;
  27.454 -+
  27.455 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  27.456 -+		return -ENOMEM;
  27.457 -+
  27.458 -+	if (copy_from_user(tmpbuf, buf, count)) {
  27.459 -+		kfree(tmpbuf);
  27.460 -+		return -EFAULT;
  27.461 -+	}
  27.462 -+	tmpbuf[count] = 0;
  27.463 -+
  27.464 -+	mutex_lock(&adom_mutex);
  27.465 -+
  27.466 -+	startp = tmpbuf;
  27.467 -+	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
  27.468 -+	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
  27.469 -+		val = simple_strtoul(startp, &endp, 0);
  27.470 -+		if (endp == startp)
  27.471 -+			break;
  27.472 -+		while (ispunct(*endp) || isspace(*endp))
  27.473 -+			endp++;
  27.474 -+		active_domains[i] = val;
  27.475 -+		if (active_domains[i] != val)
  27.476 -+			/* Overflow, force error below */
  27.477 -+			i = MAX_OPROF_DOMAINS + 1;
  27.478 -+		startp = endp;
  27.479 -+	}
  27.480 -+	/* Force error on trailing junk */
  27.481 -+	adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
  27.482 -+
  27.483 -+	kfree(tmpbuf);
  27.484 -+
  27.485 -+	if (adomains > MAX_OPROF_DOMAINS
  27.486 -+	    || oprofile_set_active(active_domains, adomains)) {
  27.487 -+		adomains = 0;
  27.488 -+		retval = -EINVAL;
  27.489 -+	}
  27.490 -+
  27.491 -+	mutex_unlock(&adom_mutex);
  27.492 -+	return retval;
  27.493 -+}
  27.494 -+
  27.495 -+static ssize_t adomain_read(struct file * file, char __user * buf, 
  27.496 -+			    size_t count, loff_t * offset)
  27.497 -+{
  27.498 -+	char * tmpbuf;
  27.499 -+	size_t len;
  27.500 -+	int i;
  27.501 -+	ssize_t retval;
  27.502 -+
  27.503 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  27.504 -+		return -ENOMEM;
  27.505 -+
  27.506 -+	mutex_lock(&adom_mutex);
  27.507 -+
  27.508 -+	len = 0;
  27.509 -+	for (i = 0; i < adomains; i++)
  27.510 -+		len += snprintf(tmpbuf + len,
  27.511 -+				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
  27.512 -+				"%u ", active_domains[i]);
  27.513 -+	WARN_ON(len > TMPBUFSIZE);
  27.514 -+	if (len != 0 && len <= TMPBUFSIZE)
  27.515 -+		tmpbuf[len-1] = '\n';
  27.516 -+
  27.517 -+	mutex_unlock(&adom_mutex);
  27.518 -+
  27.519 -+	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
  27.520 -+
  27.521 -+	kfree(tmpbuf);
  27.522 -+	return retval;
  27.523 -+}
  27.524 -+
  27.525 -+
  27.526 -+static struct file_operations active_domain_ops = {
  27.527 -+	.read		= adomain_read,
  27.528 -+	.write		= adomain_write,
  27.529 -+};
  27.530 -+
  27.531 -+static unsigned int pdomains = 0;
  27.532 -+static int passive_domains[MAX_OPROF_DOMAINS];
  27.533 -+static DEFINE_MUTEX(pdom_mutex);
  27.534 -+
  27.535 -+static ssize_t pdomain_write(struct file * file, char const __user * buf, 
  27.536 -+			     size_t count, loff_t * offset)
  27.537 -+{
  27.538 -+	char *tmpbuf;
  27.539 -+	char *startp, *endp;
  27.540 -+	int i;
  27.541 -+	unsigned long val;
  27.542 -+	ssize_t retval = count;
  27.543 -+	
  27.544 -+	if (*offset)
  27.545 -+		return -EINVAL;	
  27.546 -+	if (count > TMPBUFSIZE - 1)
  27.547 -+		return -EINVAL;
  27.548 -+
  27.549 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  27.550 -+		return -ENOMEM;
  27.551 -+
  27.552 -+	if (copy_from_user(tmpbuf, buf, count)) {
  27.553 -+		kfree(tmpbuf);
  27.554 -+		return -EFAULT;
  27.555 -+	}
  27.556 -+	tmpbuf[count] = 0;
  27.557 -+
  27.558 -+	mutex_lock(&pdom_mutex);
  27.559 -+
  27.560 -+	startp = tmpbuf;
  27.561 -+	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
  27.562 -+	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
  27.563 -+		val = simple_strtoul(startp, &endp, 0);
  27.564 -+		if (endp == startp)
  27.565 -+			break;
  27.566 -+		while (ispunct(*endp) || isspace(*endp))
  27.567 -+			endp++;
  27.568 -+		passive_domains[i] = val;
  27.569 -+		if (passive_domains[i] != val)
  27.570 -+			/* Overflow, force error below */
  27.571 -+			i = MAX_OPROF_DOMAINS + 1;
  27.572 -+		startp = endp;
  27.573 -+	}
  27.574 -+	/* Force error on trailing junk */
  27.575 -+	pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
  27.576 -+
  27.577 -+	kfree(tmpbuf);
  27.578 -+
  27.579 -+	if (pdomains > MAX_OPROF_DOMAINS
  27.580 -+	    || oprofile_set_passive(passive_domains, pdomains)) {
  27.581 -+		pdomains = 0;
  27.582 -+		retval = -EINVAL;
  27.583 -+	}
  27.584 -+
  27.585 -+	mutex_unlock(&pdom_mutex);
  27.586 -+	return retval;
  27.587 -+}
  27.588 -+
  27.589 -+static ssize_t pdomain_read(struct file * file, char __user * buf, 
  27.590 -+			    size_t count, loff_t * offset)
  27.591 -+{
  27.592 -+	char * tmpbuf;
  27.593 -+	size_t len;
  27.594 -+	int i;
  27.595 -+	ssize_t retval;
  27.596 -+
  27.597 -+	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
  27.598 -+		return -ENOMEM;
  27.599 -+
  27.600 -+	mutex_lock(&pdom_mutex);
  27.601 -+
  27.602 -+	len = 0;
  27.603 -+	for (i = 0; i < pdomains; i++)
  27.604 -+		len += snprintf(tmpbuf + len,
  27.605 -+				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
  27.606 -+				"%u ", passive_domains[i]);
  27.607 -+	WARN_ON(len > TMPBUFSIZE);
  27.608 -+	if (len != 0 && len <= TMPBUFSIZE)
  27.609 -+		tmpbuf[len-1] = '\n';
  27.610 -+
  27.611 -+	mutex_unlock(&pdom_mutex);
  27.612 -+
  27.613 -+	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
  27.614 -+
  27.615 -+	kfree(tmpbuf);
  27.616 -+	return retval;
  27.617 -+}
  27.618 -+
  27.619 -+static struct file_operations passive_domain_ops = {
  27.620 -+	.read		= pdomain_read,
  27.621 -+	.write		= pdomain_write,
  27.622 -+};
  27.623 -+
  27.624 - void oprofile_create_files(struct super_block * sb, struct dentry * root)
  27.625 - {
  27.626 - 	oprofilefs_create_file(sb, root, "enable", &enable_fops);
  27.627 - 	oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
  27.628 -+	oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
  27.629 -+	oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
  27.630 - 	oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
  27.631 - 	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
  27.632 - 	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
  27.633 -diff -pruN ../orig-linux-2.6.16.29/include/linux/oprofile.h ./include/linux/oprofile.h
  27.634 ---- ../orig-linux-2.6.16.29/include/linux/oprofile.h	2006-11-06 14:46:42.000000000 -0800
  27.635 -+++ ./include/linux/oprofile.h	2006-11-06 14:47:55.000000000 -0800
  27.636 -@@ -16,6 +16,8 @@
  27.637 - #include <linux/types.h>
  27.638 - #include <linux/spinlock.h>
  27.639 - #include <asm/atomic.h>
  27.640 -+
  27.641 -+#include <xen/interface/xenoprof.h>
  27.642 -  
  27.643 - struct super_block;
  27.644 - struct dentry;
  27.645 -@@ -27,6 +29,11 @@ struct oprofile_operations {
  27.646 - 	/* create any necessary configuration files in the oprofile fs.
  27.647 - 	 * Optional. */
  27.648 - 	int (*create_files)(struct super_block * sb, struct dentry * root);
  27.649 -+	/* setup active domains with Xen */
  27.650 -+	int (*set_active)(int *active_domains, unsigned int adomains);
  27.651 -+        /* setup passive domains with Xen */
  27.652 -+        int (*set_passive)(int *passive_domains, unsigned int pdomains);
  27.653 -+	
  27.654 - 	/* Do any necessary interrupt setup. Optional. */
  27.655 - 	int (*setup)(void);
  27.656 - 	/* Do any necessary interrupt shutdown. Optional. */
  27.657 -@@ -68,6 +75,8 @@ void oprofile_add_pc(unsigned long pc, i
  27.658 - /* add a backtrace entry, to be called from the ->backtrace callback */
  27.659 - void oprofile_add_trace(unsigned long eip);
  27.660 - 
  27.661 -+/* add a domain switch entry */
  27.662 -+int oprofile_add_domain_switch(int32_t domain_id);
  27.663 - 
  27.664 - /**
  27.665 -  * Create a file of the given name as a child of the given root, with
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/patches/linux-2.6.16.30/blktap-aio-16_03_06.patch	Mon Nov 27 13:50:01 2006 +0000
    28.3 @@ -0,0 +1,294 @@
    28.4 +diff -pruN ../orig-linux-2.6.16.29/fs/aio.c ./fs/aio.c
    28.5 +--- ../orig-linux-2.6.16.29/fs/aio.c	2006-09-12 19:02:10.000000000 +0100
    28.6 ++++ ./fs/aio.c	2006-09-19 13:58:49.000000000 +0100
    28.7 +@@ -34,6 +34,11 @@
    28.8 + #include <asm/uaccess.h>
    28.9 + #include <asm/mmu_context.h>
   28.10 + 
   28.11 ++#ifdef CONFIG_EPOLL
   28.12 ++#include <linux/poll.h>
   28.13 ++#include <linux/eventpoll.h>
   28.14 ++#endif
   28.15 ++
   28.16 + #if DEBUG > 1
   28.17 + #define dprintk		printk
   28.18 + #else
   28.19 +@@ -1016,6 +1021,10 @@ put_rq:
   28.20 + 	if (waitqueue_active(&ctx->wait))
   28.21 + 		wake_up(&ctx->wait);
   28.22 + 
   28.23 ++#ifdef CONFIG_EPOLL
   28.24 ++	if (ctx->file && waitqueue_active(&ctx->poll_wait))
   28.25 ++		wake_up(&ctx->poll_wait);
   28.26 ++#endif
   28.27 + 	if (ret)
   28.28 + 		put_ioctx(ctx);
   28.29 + 
   28.30 +@@ -1025,6 +1034,8 @@ put_rq:
   28.31 + /* aio_read_evt
   28.32 +  *	Pull an event off of the ioctx's event ring.  Returns the number of 
   28.33 +  *	events fetched (0 or 1 ;-)
   28.34 ++ *	If ent parameter is 0, just returns the number of events that would
   28.35 ++ *	be fetched.
   28.36 +  *	FIXME: make this use cmpxchg.
   28.37 +  *	TODO: make the ringbuffer user mmap()able (requires FIXME).
   28.38 +  */
   28.39 +@@ -1047,13 +1058,18 @@ static int aio_read_evt(struct kioctx *i
   28.40 + 
   28.41 + 	head = ring->head % info->nr;
   28.42 + 	if (head != ring->tail) {
   28.43 +-		struct io_event *evp = aio_ring_event(info, head, KM_USER1);
   28.44 +-		*ent = *evp;
   28.45 +-		head = (head + 1) % info->nr;
   28.46 +-		smp_mb(); /* finish reading the event before updatng the head */
   28.47 +-		ring->head = head;
   28.48 +-		ret = 1;
   28.49 +-		put_aio_ring_event(evp, KM_USER1);
   28.50 ++		if (ent) { /* event requested */
   28.51 ++			struct io_event *evp =
   28.52 ++				aio_ring_event(info, head, KM_USER1);
   28.53 ++			*ent = *evp;
   28.54 ++			head = (head + 1) % info->nr;
   28.55 ++			/* finish reading the event before updatng the head */
   28.56 ++			smp_mb();
   28.57 ++			ring->head = head;
   28.58 ++			ret = 1;
   28.59 ++			put_aio_ring_event(evp, KM_USER1);
   28.60 ++		} else /* only need to know availability */
   28.61 ++			ret = 1;
   28.62 + 	}
   28.63 + 	spin_unlock(&info->ring_lock);
   28.64 + 
   28.65 +@@ -1236,9 +1252,78 @@ static void io_destroy(struct kioctx *io
   28.66 + 
   28.67 + 	aio_cancel_all(ioctx);
   28.68 + 	wait_for_all_aios(ioctx);
   28.69 ++#ifdef CONFIG_EPOLL
   28.70 ++	/* forget the poll file, but it's up to the user to close it */
   28.71 ++	if (ioctx->file) {
   28.72 ++		ioctx->file->private_data = 0;
   28.73 ++		ioctx->file = 0;
   28.74 ++	}
   28.75 ++#endif
   28.76 + 	put_ioctx(ioctx);	/* once for the lookup */
   28.77 + }
   28.78 + 
   28.79 ++#ifdef CONFIG_EPOLL
   28.80 ++
   28.81 ++static int aio_queue_fd_close(struct inode *inode, struct file *file)
   28.82 ++{
   28.83 ++	struct kioctx *ioctx = file->private_data;
   28.84 ++	if (ioctx) {
   28.85 ++		file->private_data = 0;
   28.86 ++		spin_lock_irq(&ioctx->ctx_lock);
   28.87 ++		ioctx->file = 0;
   28.88 ++		spin_unlock_irq(&ioctx->ctx_lock);
   28.89 ++	}
   28.90 ++	return 0;
   28.91 ++}
   28.92 ++
   28.93 ++static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
   28.94 ++{	unsigned int pollflags = 0;
   28.95 ++	struct kioctx *ioctx = file->private_data;
   28.96 ++
   28.97 ++	if (ioctx) {
   28.98 ++
   28.99 ++		spin_lock_irq(&ioctx->ctx_lock);
  28.100 ++		/* Insert inside our poll wait queue */
  28.101 ++		poll_wait(file, &ioctx->poll_wait, wait);
  28.102 ++
  28.103 ++		/* Check our condition */
  28.104 ++		if (aio_read_evt(ioctx, 0))
  28.105 ++			pollflags = POLLIN | POLLRDNORM;
  28.106 ++		spin_unlock_irq(&ioctx->ctx_lock);
  28.107 ++	}
  28.108 ++
  28.109 ++	return pollflags;
  28.110 ++}
  28.111 ++
  28.112 ++static struct file_operations aioq_fops = {
  28.113 ++	.release	= aio_queue_fd_close,
  28.114 ++	.poll		= aio_queue_fd_poll
  28.115 ++};
  28.116 ++
  28.117 ++/* make_aio_fd:
  28.118 ++ *  Create a file descriptor that can be used to poll the event queue.
  28.119 ++ *  Based and piggybacked on the excellent epoll code.
  28.120 ++ */
  28.121 ++
  28.122 ++static int make_aio_fd(struct kioctx *ioctx)
  28.123 ++{
  28.124 ++	int error, fd;
  28.125 ++	struct inode *inode;
  28.126 ++	struct file *file;
  28.127 ++
  28.128 ++	error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
  28.129 ++	if (error)
  28.130 ++		return error;
  28.131 ++
  28.132 ++	/* associate the file with the IO context */
  28.133 ++	file->private_data = ioctx;
  28.134 ++	ioctx->file = file;
  28.135 ++	init_waitqueue_head(&ioctx->poll_wait);
  28.136 ++	return fd;
  28.137 ++}
  28.138 ++#endif
  28.139 ++
  28.140 ++
  28.141 + /* sys_io_setup:
  28.142 +  *	Create an aio_context capable of receiving at least nr_events.
  28.143 +  *	ctxp must not point to an aio_context that already exists, and
  28.144 +@@ -1251,18 +1336,30 @@ static void io_destroy(struct kioctx *io
  28.145 +  *	resources are available.  May fail with -EFAULT if an invalid
  28.146 +  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
  28.147 +  *	implemented.
  28.148 ++ *
  28.149 ++ *	To request a selectable fd, the user context has to be initialized
  28.150 ++ *	to 1, instead of 0, and the return value is the fd.
  28.151 ++ *	This keeps the system call compatible, since a non-zero value
  28.152 ++ *	was not allowed so far.
  28.153 +  */
  28.154 + asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
  28.155 + {
  28.156 + 	struct kioctx *ioctx = NULL;
  28.157 + 	unsigned long ctx;
  28.158 + 	long ret;
  28.159 ++	int make_fd = 0;
  28.160 + 
  28.161 + 	ret = get_user(ctx, ctxp);
  28.162 + 	if (unlikely(ret))
  28.163 + 		goto out;
  28.164 + 
  28.165 + 	ret = -EINVAL;
  28.166 ++#ifdef CONFIG_EPOLL
  28.167 ++	if (ctx == 1) {
  28.168 ++		make_fd = 1;
  28.169 ++		ctx = 0;
  28.170 ++	}
  28.171 ++#endif
  28.172 + 	if (unlikely(ctx || nr_events == 0)) {
  28.173 + 		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
  28.174 + 		         ctx, nr_events);
  28.175 +@@ -1273,8 +1370,12 @@ asmlinkage long sys_io_setup(unsigned nr
  28.176 + 	ret = PTR_ERR(ioctx);
  28.177 + 	if (!IS_ERR(ioctx)) {
  28.178 + 		ret = put_user(ioctx->user_id, ctxp);
  28.179 +-		if (!ret)
  28.180 +-			return 0;
  28.181 ++#ifdef CONFIG_EPOLL
  28.182 ++		if (make_fd && ret >= 0)
  28.183 ++			ret = make_aio_fd(ioctx);
  28.184 ++#endif
  28.185 ++		if (ret >= 0)
  28.186 ++			return ret;
  28.187 + 
  28.188 + 		get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
  28.189 + 		io_destroy(ioctx);
  28.190 +diff -pruN ../orig-linux-2.6.16.29/fs/eventpoll.c ./fs/eventpoll.c
  28.191 +--- ../orig-linux-2.6.16.29/fs/eventpoll.c	2006-09-12 19:02:10.000000000 +0100
  28.192 ++++ ./fs/eventpoll.c	2006-09-19 13:58:49.000000000 +0100
  28.193 +@@ -235,8 +235,6 @@ struct ep_pqueue {
  28.194 + 
  28.195 + static void ep_poll_safewake_init(struct poll_safewake *psw);
  28.196 + static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
  28.197 +-static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  28.198 +-		    struct eventpoll *ep);
  28.199 + static int ep_alloc(struct eventpoll **pep);
  28.200 + static void ep_free(struct eventpoll *ep);
  28.201 + static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
  28.202 +@@ -266,7 +264,7 @@ static int ep_events_transfer(struct eve
  28.203 + static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
  28.204 + 		   int maxevents, long timeout);
  28.205 + static int eventpollfs_delete_dentry(struct dentry *dentry);
  28.206 +-static struct inode *ep_eventpoll_inode(void);
  28.207 ++static struct inode *ep_eventpoll_inode(struct file_operations *fops);
  28.208 + static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
  28.209 + 					      int flags, const char *dev_name,
  28.210 + 					      void *data);
  28.211 +@@ -525,7 +523,7 @@ asmlinkage long sys_epoll_create(int siz
  28.212 + 	 * Creates all the items needed to setup an eventpoll file. That is,
  28.213 + 	 * a file structure, and inode and a free file descriptor.
  28.214 + 	 */
  28.215 +-	error = ep_getfd(&fd, &inode, &file, ep);
  28.216 ++	error = ep_getfd(&fd, &inode, &file, ep, &eventpoll_fops);
  28.217 + 	if (error)
  28.218 + 		goto eexit_2;
  28.219 + 
  28.220 +@@ -710,8 +708,8 @@ eexit_1:
  28.221 + /*
  28.222 +  * Creates the file descriptor to be used by the epoll interface.
  28.223 +  */
  28.224 +-static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  28.225 +-		    struct eventpoll *ep)
  28.226 ++int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  28.227 ++		    struct eventpoll *ep, struct file_operations *fops)
  28.228 + {
  28.229 + 	struct qstr this;
  28.230 + 	char name[32];
  28.231 +@@ -727,7 +725,7 @@ static int ep_getfd(int *efd, struct ino
  28.232 + 		goto eexit_1;
  28.233 + 
  28.234 + 	/* Allocates an inode from the eventpoll file system */
  28.235 +-	inode = ep_eventpoll_inode();
  28.236 ++	inode = ep_eventpoll_inode(fops);
  28.237 + 	error = PTR_ERR(inode);
  28.238 + 	if (IS_ERR(inode))
  28.239 + 		goto eexit_2;
  28.240 +@@ -758,7 +756,7 @@ static int ep_getfd(int *efd, struct ino
  28.241 + 
  28.242 + 	file->f_pos = 0;
  28.243 + 	file->f_flags = O_RDONLY;
  28.244 +-	file->f_op = &eventpoll_fops;
  28.245 ++	file->f_op = fops;
  28.246 + 	file->f_mode = FMODE_READ;
  28.247 + 	file->f_version = 0;
  28.248 + 	file->private_data = ep;
  28.249 +@@ -1574,7 +1572,7 @@ static int eventpollfs_delete_dentry(str
  28.250 + }
  28.251 + 
  28.252 + 
  28.253 +-static struct inode *ep_eventpoll_inode(void)
  28.254 ++static struct inode *ep_eventpoll_inode(struct file_operations *fops)
  28.255 + {
  28.256 + 	int error = -ENOMEM;
  28.257 + 	struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
  28.258 +@@ -1582,7 +1580,7 @@ static struct inode *ep_eventpoll_inode(
  28.259 + 	if (!inode)
  28.260 + 		goto eexit_1;
  28.261 + 
  28.262 +-	inode->i_fop = &eventpoll_fops;
  28.263 ++	inode->i_fop = fops;
  28.264 + 
  28.265 + 	/*
  28.266 + 	 * Mark the inode dirty from the very beginning,
  28.267 +diff -pruN ../orig-linux-2.6.16.29/include/linux/aio.h ./include/linux/aio.h
  28.268 +--- ../orig-linux-2.6.16.29/include/linux/aio.h	2006-09-12 19:02:10.000000000 +0100
  28.269 ++++ ./include/linux/aio.h	2006-09-19 13:58:49.000000000 +0100
  28.270 +@@ -191,6 +191,11 @@ struct kioctx {
  28.271 + 	struct aio_ring_info	ring_info;
  28.272 + 
  28.273 + 	struct work_struct	wq;
  28.274 ++#ifdef CONFIG_EPOLL
  28.275 ++	// poll integration
  28.276 ++	wait_queue_head_t       poll_wait;
  28.277 ++	struct file		*file;
  28.278 ++#endif
  28.279 + };
  28.280 + 
  28.281 + /* prototypes */
  28.282 +diff -pruN ../orig-linux-2.6.16.29/include/linux/eventpoll.h ./include/linux/eventpoll.h
  28.283 +--- ../orig-linux-2.6.16.29/include/linux/eventpoll.h	2006-09-12 19:02:10.000000000 +0100
  28.284 ++++ ./include/linux/eventpoll.h	2006-09-19 13:58:49.000000000 +0100
  28.285 +@@ -86,6 +86,12 @@ static inline void eventpoll_release(str
  28.286 + }
  28.287 + 
  28.288 + 
  28.289 ++/*
  28.290 ++ * called by aio code to create fd that can poll the  aio event queueQ
  28.291 ++ */
  28.292 ++struct eventpoll;
  28.293 ++int ep_getfd(int *efd, struct inode **einode, struct file **efile,
  28.294 ++             struct eventpoll *ep, struct file_operations *fops);
  28.295 + #else
  28.296 + 
  28.297 + static inline void eventpoll_init_file(struct file *file) {}
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/patches/linux-2.6.16.30/device_bind.patch	Mon Nov 27 13:50:01 2006 +0000
    29.3 @@ -0,0 +1,15 @@
    29.4 +diff -pruN ../orig-linux-2.6.16.29/drivers/base/bus.c ./drivers/base/bus.c
    29.5 +--- ../orig-linux-2.6.16.29/drivers/base/bus.c	2006-09-12 19:02:10.000000000 +0100
    29.6 ++++ ./drivers/base/bus.c	2006-09-19 13:58:54.000000000 +0100
    29.7 +@@ -188,6 +188,11 @@ static ssize_t driver_bind(struct device
    29.8 + 		up(&dev->sem);
    29.9 + 		if (dev->parent)
   29.10 + 			up(&dev->parent->sem);
   29.11 ++
   29.12 ++		if (err > 0) 		/* success */
   29.13 ++			err = count;
   29.14 ++		else if (err == 0)	/* driver didn't accept device */
   29.15 ++			err = -ENODEV;
   29.16 + 	}
   29.17 + 	put_device(dev);
   29.18 + 	put_bus(bus);
    30.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.2 +++ b/patches/linux-2.6.16.30/fix-hz-suspend.patch	Mon Nov 27 13:50:01 2006 +0000
    30.3 @@ -0,0 +1,26 @@
    30.4 +diff -pruN ../orig-linux-2.6.16.29/kernel/timer.c ./kernel/timer.c
    30.5 +--- ../orig-linux-2.6.16.29/kernel/timer.c	2006-09-12 19:02:10.000000000 +0100
    30.6 ++++ ./kernel/timer.c	2006-09-19 13:58:58.000000000 +0100
    30.7 +@@ -555,6 +555,22 @@ found:
    30.8 + 	}
    30.9 + 	spin_unlock(&base->t_base.lock);
   30.10 + 
   30.11 ++	/*
   30.12 ++	 * It can happen that other CPUs service timer IRQs and increment
   30.13 ++	 * jiffies, but we have not yet got a local timer tick to process
   30.14 ++	 * the timer wheels.  In that case, the expiry time can be before
   30.15 ++	 * jiffies, but since the high-resolution timer here is relative to
   30.16 ++	 * jiffies, the default expression when high-resolution timers are
   30.17 ++	 * not active,
   30.18 ++	 *
   30.19 ++	 *   time_before(MAX_JIFFY_OFFSET + jiffies, expires)
   30.20 ++	 *
   30.21 ++	 * would falsely evaluate to true.  If that is the case, just
   30.22 ++	 * return jiffies so that we can immediately fire the local timer
   30.23 ++	 */
   30.24 ++	if (time_before(expires, jiffies))
   30.25 ++		return jiffies;
   30.26 ++
   30.27 + 	if (time_before(hr_expires, expires))
   30.28 + 		return hr_expires;
   30.29 + 
    31.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.2 +++ b/patches/linux-2.6.16.30/fix-ide-cd-pio-mode.patch	Mon Nov 27 13:50:01 2006 +0000
    31.3 @@ -0,0 +1,18 @@
    31.4 +diff -pruN ../orig-linux-2.6.16.29/drivers/ide/ide-lib.c ./drivers/ide/ide-lib.c
    31.5 +--- ../orig-linux-2.6.16.29/drivers/ide/ide-lib.c	2006-09-12 19:02:10.000000000 +0100
    31.6 ++++ ./drivers/ide/ide-lib.c	2006-09-19 13:59:03.000000000 +0100
    31.7 +@@ -410,10 +410,10 @@ void ide_toggle_bounce(ide_drive_t *driv
    31.8 + {
    31.9 + 	u64 addr = BLK_BOUNCE_HIGH;	/* dma64_addr_t */
   31.10 + 
   31.11 +-	if (!PCI_DMA_BUS_IS_PHYS) {
   31.12 +-		addr = BLK_BOUNCE_ANY;
   31.13 +-	} else if (on && drive->media == ide_disk) {
   31.14 +-		if (HWIF(drive)->pci_dev)
   31.15 ++	if (on && drive->media == ide_disk) {
   31.16 ++		if (!PCI_DMA_BUS_IS_PHYS)
   31.17 ++			addr = BLK_BOUNCE_ANY;
   31.18 ++		else if (HWIF(drive)->pci_dev)
   31.19 + 			addr = HWIF(drive)->pci_dev->dma_mask;
   31.20 + 	}
   31.21 + 
    32.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.2 +++ b/patches/linux-2.6.16.30/i386-mach-io-check-nmi.patch	Mon Nov 27 13:50:01 2006 +0000
    32.3 @@ -0,0 +1,45 @@
    32.4 +diff -pruN ../orig-linux-2.6.16.29/arch/i386/kernel/traps.c ./arch/i386/kernel/traps.c
    32.5 +--- ../orig-linux-2.6.16.29/arch/i386/kernel/traps.c	2006-09-12 19:02:10.000000000 +0100
    32.6 ++++ ./arch/i386/kernel/traps.c	2006-09-19 13:59:06.000000000 +0100
    32.7 +@@ -567,18 +567,11 @@ static void mem_parity_error(unsigned ch
    32.8 + 
    32.9 + static void io_check_error(unsigned char reason, struct pt_regs * regs)
   32.10 + {
   32.11 +-	unsigned long i;
   32.12 +-
   32.13 + 	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
   32.14 + 	show_registers(regs);
   32.15 + 
   32.16 + 	/* Re-enable the IOCK line, wait for a few seconds */
   32.17 +-	reason = (reason & 0xf) | 8;
   32.18 +-	outb(reason, 0x61);
   32.19 +-	i = 2000;
   32.20 +-	while (--i) udelay(1000);
   32.21 +-	reason &= ~8;
   32.22 +-	outb(reason, 0x61);
   32.23 ++	clear_io_check_error(reason);
   32.24 + }
   32.25 + 
   32.26 + static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
   32.27 +diff -pruN ../orig-linux-2.6.16.29/include/asm-i386/mach-default/mach_traps.h ./include/asm-i386/mach-default/mach_traps.h
   32.28 +--- ../orig-linux-2.6.16.29/include/asm-i386/mach-default/mach_traps.h	2006-09-12 19:02:10.000000000 +0100
   32.29 ++++ ./include/asm-i386/mach-default/mach_traps.h	2006-09-19 13:59:06.000000000 +0100
   32.30 +@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
   32.31 + 	outb(reason, 0x61);
   32.32 + }
   32.33 + 
   32.34 ++static inline void clear_io_check_error(unsigned char reason)
   32.35 ++{
   32.36 ++	unsigned long i;
   32.37 ++
   32.38 ++	reason = (reason & 0xf) | 8;
   32.39 ++	outb(reason, 0x61);
   32.40 ++	i = 2000;
   32.41 ++	while (--i) udelay(1000);
   32.42 ++	reason &= ~8;
   32.43 ++	outb(reason, 0x61);
   32.44 ++}
   32.45 ++
   32.46 + static inline unsigned char get_nmi_reason(void)
   32.47 + {
   32.48 + 	return inb(0x61);
    33.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.2 +++ b/patches/linux-2.6.16.30/ipv6-no-autoconf.patch	Mon Nov 27 13:50:01 2006 +0000
    33.3 @@ -0,0 +1,19 @@
    33.4 +diff -pruN ../orig-linux-2.6.16.29/net/ipv6/addrconf.c ./net/ipv6/addrconf.c
    33.5 +--- ../orig-linux-2.6.16.29/net/ipv6/addrconf.c	2006-09-12 19:02:10.000000000 +0100
    33.6 ++++ ./net/ipv6/addrconf.c	2006-09-19 13:59:11.000000000 +0100
    33.7 +@@ -2471,6 +2471,7 @@ static void addrconf_dad_start(struct in
    33.8 + 	spin_lock_bh(&ifp->lock);
    33.9 + 
   33.10 + 	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
   33.11 ++	    !(dev->flags&IFF_MULTICAST) ||
   33.12 + 	    !(ifp->flags&IFA_F_TENTATIVE)) {
   33.13 + 		ifp->flags &= ~IFA_F_TENTATIVE;
   33.14 + 		spin_unlock_bh(&ifp->lock);
   33.15 +@@ -2555,6 +2556,7 @@ static void addrconf_dad_completed(struc
   33.16 + 	if (ifp->idev->cnf.forwarding == 0 &&
   33.17 + 	    ifp->idev->cnf.rtr_solicits > 0 &&
   33.18 + 	    (dev->flags&IFF_LOOPBACK) == 0 &&
   33.19 ++	    (dev->flags & IFF_MULTICAST) &&
   33.20 + 	    (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
   33.21 + 		struct in6_addr all_routers;
   33.22 + 
    34.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.2 +++ b/patches/linux-2.6.16.30/net-csum.patch	Mon Nov 27 13:50:01 2006 +0000
    34.3 @@ -0,0 +1,63 @@
    34.4 +diff -pruN ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_tcp.c ./net/ipv4/netfilter/ip_nat_proto_tcp.c
    34.5 +--- ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-12 19:02:10.000000000 +0100
    34.6 ++++ ./net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-19 13:59:15.000000000 +0100
    34.7 +@@ -129,7 +129,12 @@ tcp_manip_pkt(struct sk_buff **pskb,
    34.8 + 	if (hdrsize < sizeof(*hdr))
    34.9 + 		return 1;
   34.10 + 
   34.11 +-	hdr->check = ip_nat_cheat_check(~oldip, newip,
   34.12 ++#ifdef CONFIG_XEN
   34.13 ++	if ((*pskb)->proto_csum_blank)
   34.14 ++		hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
   34.15 ++	else
   34.16 ++#endif
   34.17 ++		hdr->check = ip_nat_cheat_check(~oldip, newip,
   34.18 + 					ip_nat_cheat_check(oldport ^ 0xFFFF,
   34.19 + 							   newport,
   34.20 + 							   hdr->check));
   34.21 +diff -pruN ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_udp.c ./net/ipv4/netfilter/ip_nat_proto_udp.c
   34.22 +--- ../orig-linux-2.6.16.29/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-12 19:02:10.000000000 +0100
   34.23 ++++ ./net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-19 13:59:15.000000000 +0100
   34.24 +@@ -113,11 +113,17 @@ udp_manip_pkt(struct sk_buff **pskb,
   34.25 + 		newport = tuple->dst.u.udp.port;
   34.26 + 		portptr = &hdr->dest;
   34.27 + 	}
   34.28 +-	if (hdr->check) /* 0 is a special case meaning no checksum */
   34.29 +-		hdr->check = ip_nat_cheat_check(~oldip, newip,
   34.30 ++	if (hdr->check) { /* 0 is a special case meaning no checksum */
   34.31 ++#ifdef CONFIG_XEN
   34.32 ++		if ((*pskb)->proto_csum_blank)
   34.33 ++			hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
   34.34 ++		else
   34.35 ++#endif
   34.36 ++			hdr->check = ip_nat_cheat_check(~oldip, newip,
   34.37 + 					ip_nat_cheat_check(*portptr ^ 0xFFFF,
   34.38 + 							   newport,
   34.39 + 							   hdr->check));
   34.40 ++	}
   34.41 + 	*portptr = newport;
   34.42 + 	return 1;
   34.43 + }
   34.44 +diff -pruN ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
   34.45 +--- ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c	2006-09-12 19:02:10.000000000 +0100
   34.46 ++++ ./net/ipv4/xfrm4_output.c	2006-09-19 13:59:15.000000000 +0100
   34.47 +@@ -17,6 +17,8 @@
   34.48 + #include <net/xfrm.h>
   34.49 + #include <net/icmp.h>
   34.50 + 
   34.51 ++extern int skb_checksum_setup(struct sk_buff *skb);
   34.52 ++
   34.53 + /* Add encapsulation header.
   34.54 +  *
   34.55 +  * In transport mode, the IP header will be moved forward to make space
   34.56 +@@ -103,6 +105,10 @@ static int xfrm4_output_one(struct sk_bu
   34.57 + 	struct xfrm_state *x = dst->xfrm;
   34.58 + 	int err;
   34.59 + 	
   34.60 ++	err = skb_checksum_setup(skb);
   34.61 ++	if (err)
   34.62 ++		goto error_nolock;
   34.63 ++
   34.64 + 	if (skb->ip_summed == CHECKSUM_HW) {
   34.65 + 		err = skb_checksum_help(skb, 0);
   34.66 + 		if (err)
    35.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.2 +++ b/patches/linux-2.6.16.30/net-gso-0-base.patch	Mon Nov 27 13:50:01 2006 +0000
    35.3 @@ -0,0 +1,2835 @@
    35.4 +diff -pruN ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt ./Documentation/networking/netdevices.txt
    35.5 +--- ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt	2006-09-12 19:02:10.000000000 +0100
    35.6 ++++ ./Documentation/networking/netdevices.txt	2006-09-19 13:59:20.000000000 +0100
    35.7 +@@ -42,9 +42,9 @@ dev->get_stats:
    35.8 + 	Context: nominally process, but don't sleep inside an rwlock
    35.9 + 
   35.10 + dev->hard_start_xmit:
   35.11 +-	Synchronization: dev->xmit_lock spinlock.
   35.12 ++	Synchronization: netif_tx_lock spinlock.
   35.13 + 	When the driver sets NETIF_F_LLTX in dev->features this will be
   35.14 +-	called without holding xmit_lock. In this case the driver 
   35.15 ++	called without holding netif_tx_lock. In this case the driver
   35.16 + 	has to lock by itself when needed. It is recommended to use a try lock
   35.17 + 	for this and return -1 when the spin lock fails. 
   35.18 + 	The locking there should also properly protect against 
   35.19 +@@ -62,12 +62,12 @@ dev->hard_start_xmit:
   35.20 + 	  Only valid when NETIF_F_LLTX is set.
   35.21 + 
   35.22 + dev->tx_timeout:
   35.23 +-	Synchronization: dev->xmit_lock spinlock.
   35.24 ++	Synchronization: netif_tx_lock spinlock.
   35.25 + 	Context: BHs disabled
   35.26 + 	Notes: netif_queue_stopped() is guaranteed true
   35.27 + 
   35.28 + dev->set_multicast_list:
   35.29 +-	Synchronization: dev->xmit_lock spinlock.
   35.30 ++	Synchronization: netif_tx_lock spinlock.
   35.31 + 	Context: BHs disabled
   35.32 + 
   35.33 + dev->poll:
   35.34 +diff -pruN ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c ./drivers/block/aoe/aoenet.c
   35.35 +--- ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c	2006-09-12 19:02:10.000000000 +0100
   35.36 ++++ ./drivers/block/aoe/aoenet.c	2006-09-19 13:59:20.000000000 +0100
   35.37 +@@ -95,9 +95,8 @@ mac_addr(char addr[6])
   35.38 + static struct sk_buff *
   35.39 + skb_check(struct sk_buff *skb)
   35.40 + {
   35.41 +-	if (skb_is_nonlinear(skb))
   35.42 + 	if ((skb = skb_share_check(skb, GFP_ATOMIC)))
   35.43 +-	if (skb_linearize(skb, GFP_ATOMIC) < 0) {
   35.44 ++	if (skb_linearize(skb)) {
   35.45 + 		dev_kfree_skb(skb);
   35.46 + 		return NULL;
   35.47 + 	}
   35.48 +diff -pruN ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c
   35.49 +--- ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-12 19:02:10.000000000 +0100
   35.50 ++++ ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-19 13:59:20.000000000 +0100
   35.51 +@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_
   35.52 + 
   35.53 + 	ipoib_mcast_stop_thread(dev, 0);
   35.54 + 
   35.55 +-	spin_lock_irqsave(&dev->xmit_lock, flags);
   35.56 ++	local_irq_save(flags);
   35.57 ++	netif_tx_lock(dev);
   35.58 + 	spin_lock(&priv->lock);
   35.59 + 
   35.60 + 	/*
   35.61 +@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_
   35.62 + 	}
   35.63 + 
   35.64 + 	spin_unlock(&priv->lock);
   35.65 +-	spin_unlock_irqrestore(&dev->xmit_lock, flags);
   35.66 ++	netif_tx_unlock(dev);
   35.67 ++	local_irq_restore(flags);
   35.68 + 
   35.69 + 	/* We have to cancel outside of the spinlock */
   35.70 + 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
   35.71 +diff -pruN ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c ./drivers/media/dvb/dvb-core/dvb_net.c
   35.72 +--- ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c	2006-09-12 19:02:10.000000000 +0100
   35.73 ++++ ./drivers/media/dvb/dvb-core/dvb_net.c	2006-09-19 13:59:20.000000000 +0100
   35.74 +@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void 
   35.75 + 
   35.76 + 	dvb_net_feed_stop(dev);
   35.77 + 	priv->rx_mode = RX_MODE_UNI;
   35.78 +-	spin_lock_bh(&dev->xmit_lock);
   35.79 ++	netif_tx_lock_bh(dev);
   35.80 + 
   35.81 + 	if (dev->flags & IFF_PROMISC) {
   35.82 + 		dprintk("%s: promiscuous mode\n", dev->name);
   35.83 +@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void 
   35.84 + 		}
   35.85 + 	}
   35.86 + 
   35.87 +-	spin_unlock_bh(&dev->xmit_lock);
   35.88 ++	netif_tx_unlock_bh(dev);
   35.89 + 	dvb_net_feed_start(dev);
   35.90 + }
   35.91 + 
   35.92 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/8139cp.c ./drivers/net/8139cp.c
   35.93 +--- ../orig-linux-2.6.16.29/drivers/net/8139cp.c	2006-09-12 19:02:10.000000000 +0100
   35.94 ++++ ./drivers/net/8139cp.c	2006-09-19 13:59:20.000000000 +0100
   35.95 +@@ -794,7 +794,7 @@ static int cp_start_xmit (struct sk_buff
   35.96 + 	entry = cp->tx_head;
   35.97 + 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
   35.98 + 	if (dev->features & NETIF_F_TSO)
   35.99 +-		mss = skb_shinfo(skb)->tso_size;
  35.100 ++		mss = skb_shinfo(skb)->gso_size;
  35.101 + 
  35.102 + 	if (skb_shinfo(skb)->nr_frags == 0) {
  35.103 + 		struct cp_desc *txd = &cp->tx_ring[entry];
  35.104 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/bnx2.c ./drivers/net/bnx2.c
  35.105 +--- ../orig-linux-2.6.16.29/drivers/net/bnx2.c	2006-09-12 19:02:10.000000000 +0100
  35.106 ++++ ./drivers/net/bnx2.c	2006-09-19 13:59:20.000000000 +0100
  35.107 +@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
  35.108 + 		skb = tx_buf->skb;
  35.109 + #ifdef BCM_TSO 
  35.110 + 		/* partial BD completions possible with TSO packets */
  35.111 +-		if (skb_shinfo(skb)->tso_size) {
  35.112 ++		if (skb_shinfo(skb)->gso_size) {
  35.113 + 			u16 last_idx, last_ring_idx;
  35.114 + 
  35.115 + 			last_idx = sw_cons +
  35.116 +@@ -1948,7 +1948,7 @@ bnx2_poll(struct net_device *dev, int *b
  35.117 + 	return 1;
  35.118 + }
  35.119 + 
  35.120 +-/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
  35.121 ++/* Called with rtnl_lock from vlan functions and also netif_tx_lock
  35.122 +  * from set_multicast.
  35.123 +  */
  35.124 + static void
  35.125 +@@ -4403,7 +4403,7 @@ bnx2_vlan_rx_kill_vid(struct net_device 
  35.126 + }
  35.127 + #endif
  35.128 + 
  35.129 +-/* Called with dev->xmit_lock.
  35.130 ++/* Called with netif_tx_lock.
  35.131 +  * hard_start_xmit is pseudo-lockless - a lock is only required when
  35.132 +  * the tx queue is full. This way, we get the benefit of lockless
  35.133 +  * operations most of the time without the complexities to handle
  35.134 +@@ -4441,7 +4441,7 @@ bnx2_start_xmit(struct sk_buff *skb, str
  35.135 + 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
  35.136 + 	}
  35.137 + #ifdef BCM_TSO 
  35.138 +-	if ((mss = skb_shinfo(skb)->tso_size) &&
  35.139 ++	if ((mss = skb_shinfo(skb)->gso_size) &&
  35.140 + 		(skb->len > (bp->dev->mtu + ETH_HLEN))) {
  35.141 + 		u32 tcp_opt_len, ip_tcp_len;
  35.142 + 
  35.143 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c ./drivers/net/bonding/bond_main.c
  35.144 +--- ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c	2006-09-12 19:02:10.000000000 +0100
  35.145 ++++ ./drivers/net/bonding/bond_main.c	2006-09-19 13:59:20.000000000 +0100
  35.146 +@@ -1145,8 +1145,7 @@ int bond_sethwaddr(struct net_device *bo
  35.147 + }
  35.148 + 
  35.149 + #define BOND_INTERSECT_FEATURES \
  35.150 +-	(NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\
  35.151 +-	NETIF_F_TSO|NETIF_F_UFO)
  35.152 ++	(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
  35.153 + 
  35.154 + /* 
  35.155 +  * Compute the common dev->feature set available to all slaves.  Some
  35.156 +@@ -1164,9 +1163,7 @@ static int bond_compute_features(struct 
  35.157 + 		features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
  35.158 + 
  35.159 + 	if ((features & NETIF_F_SG) && 
  35.160 +-	    !(features & (NETIF_F_IP_CSUM |
  35.161 +-			  NETIF_F_NO_CSUM |
  35.162 +-			  NETIF_F_HW_CSUM)))
  35.163 ++	    !(features & NETIF_F_ALL_CSUM))
  35.164 + 		features &= ~NETIF_F_SG;
  35.165 + 
  35.166 + 	/* 
  35.167 +@@ -4147,7 +4144,7 @@ static int bond_init(struct net_device *
  35.168 + 	 */
  35.169 + 	bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
  35.170 + 
  35.171 +-	/* don't acquire bond device's xmit_lock when 
  35.172 ++	/* don't acquire bond device's netif_tx_lock when
  35.173 + 	 * transmitting */
  35.174 + 	bond_dev->features |= NETIF_F_LLTX;
  35.175 + 
  35.176 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c ./drivers/net/chelsio/sge.c
  35.177 +--- ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c	2006-09-12 19:02:10.000000000 +0100
  35.178 ++++ ./drivers/net/chelsio/sge.c	2006-09-19 13:59:20.000000000 +0100
  35.179 +@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
  35.180 + 	struct cpl_tx_pkt *cpl;
  35.181 + 
  35.182 + #ifdef NETIF_F_TSO
  35.183 +-	if (skb_shinfo(skb)->tso_size) {
  35.184 ++	if (skb_shinfo(skb)->gso_size) {
  35.185 + 		int eth_type;
  35.186 + 		struct cpl_tx_pkt_lso *hdr;
  35.187 + 
  35.188 +@@ -1434,7 +1434,7 @@ int t1_start_xmit(struct sk_buff *skb, s
  35.189 + 		hdr->ip_hdr_words = skb->nh.iph->ihl;
  35.190 + 		hdr->tcp_hdr_words = skb->h.th->doff;
  35.191 + 		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
  35.192 +-						skb_shinfo(skb)->tso_size));
  35.193 ++						skb_shinfo(skb)->gso_size));
  35.194 + 		hdr->len = htonl(skb->len - sizeof(*hdr));
  35.195 + 		cpl = (struct cpl_tx_pkt *)hdr;
  35.196 + 		sge->stats.tx_lso_pkts++;
  35.197 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c ./drivers/net/e1000/e1000_main.c
  35.198 +--- ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c	2006-09-12 19:02:10.000000000 +0100
  35.199 ++++ ./drivers/net/e1000/e1000_main.c	2006-09-19 13:59:20.000000000 +0100
  35.200 +@@ -2526,7 +2526,7 @@ e1000_tso(struct e1000_adapter *adapter,
  35.201 + 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
  35.202 + 	int err;
  35.203 + 
  35.204 +-	if (skb_shinfo(skb)->tso_size) {
  35.205 ++	if (skb_shinfo(skb)->gso_size) {
  35.206 + 		if (skb_header_cloned(skb)) {
  35.207 + 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  35.208 + 			if (err)
  35.209 +@@ -2534,7 +2534,7 @@ e1000_tso(struct e1000_adapter *adapter,
  35.210 + 		}
  35.211 + 
  35.212 + 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  35.213 +-		mss = skb_shinfo(skb)->tso_size;
  35.214 ++		mss = skb_shinfo(skb)->gso_size;
  35.215 + 		if (skb->protocol == ntohs(ETH_P_IP)) {
  35.216 + 			skb->nh.iph->tot_len = 0;
  35.217 + 			skb->nh.iph->check = 0;
  35.218 +@@ -2651,7 +2651,7 @@ e1000_tx_map(struct e1000_adapter *adapt
  35.219 + 		 * tso gets written back prematurely before the data is fully
  35.220 + 		 * DMAd to the controller */
  35.221 + 		if (!skb->data_len && tx_ring->last_tx_tso &&
  35.222 +-				!skb_shinfo(skb)->tso_size) {
  35.223 ++				!skb_shinfo(skb)->gso_size) {
  35.224 + 			tx_ring->last_tx_tso = 0;
  35.225 + 			size -= 4;
  35.226 + 		}
  35.227 +@@ -2893,7 +2893,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
  35.228 + 	}
  35.229 + 
  35.230 + #ifdef NETIF_F_TSO
  35.231 +-	mss = skb_shinfo(skb)->tso_size;
  35.232 ++	mss = skb_shinfo(skb)->gso_size;
  35.233 + 	/* The controller does a simple calculation to 
  35.234 + 	 * make sure there is enough room in the FIFO before
  35.235 + 	 * initiating the DMA for each buffer.  The calc is:
  35.236 +@@ -2935,7 +2935,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
  35.237 + #ifdef NETIF_F_TSO
  35.238 + 	/* Controller Erratum workaround */
  35.239 + 	if (!skb->data_len && tx_ring->last_tx_tso &&
  35.240 +-		!skb_shinfo(skb)->tso_size)
  35.241 ++		!skb_shinfo(skb)->gso_size)
  35.242 + 		count++;
  35.243 + #endif
  35.244 + 
  35.245 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/forcedeth.c ./drivers/net/forcedeth.c
  35.246 +--- ../orig-linux-2.6.16.29/drivers/net/forcedeth.c	2006-09-12 19:02:10.000000000 +0100
  35.247 ++++ ./drivers/net/forcedeth.c	2006-09-19 13:59:20.000000000 +0100
  35.248 +@@ -482,9 +482,9 @@ typedef union _ring_type {
  35.249 +  * critical parts:
  35.250 +  * - rx is (pseudo-) lockless: it relies on the single-threading provided
  35.251 +  *	by the arch code for interrupts.
  35.252 +- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
  35.253 ++ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  35.254 +  *	needs dev->priv->lock :-(
  35.255 +- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
  35.256 ++ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  35.257 +  */
  35.258 + 
  35.259 + /* in dev: base, irq */
  35.260 +@@ -1016,7 +1016,7 @@ static void drain_ring(struct net_device
  35.261 + 
  35.262 + /*
  35.263 +  * nv_start_xmit: dev->hard_start_xmit function
  35.264 +- * Called with dev->xmit_lock held.
  35.265 ++ * Called with netif_tx_lock held.
  35.266 +  */
  35.267 + static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  35.268 + {
  35.269 +@@ -1105,8 +1105,8 @@ static int nv_start_xmit(struct sk_buff 
  35.270 + 	np->tx_skbuff[nr] = skb;
  35.271 + 
  35.272 + #ifdef NETIF_F_TSO
  35.273 +-	if (skb_shinfo(skb)->tso_size)
  35.274 +-		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
  35.275 ++	if (skb_shinfo(skb)->gso_size)
  35.276 ++		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  35.277 + 	else
  35.278 + #endif
  35.279 + 	tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
  35.280 +@@ -1203,7 +1203,7 @@ static void nv_tx_done(struct net_device
  35.281 + 
  35.282 + /*
  35.283 +  * nv_tx_timeout: dev->tx_timeout function
  35.284 +- * Called with dev->xmit_lock held.
  35.285 ++ * Called with netif_tx_lock held.
  35.286 +  */
  35.287 + static void nv_tx_timeout(struct net_device *dev)
  35.288 + {
  35.289 +@@ -1524,7 +1524,7 @@ static int nv_change_mtu(struct net_devi
  35.290 + 		 * Changing the MTU is a rare event, it shouldn't matter.
  35.291 + 		 */
  35.292 + 		disable_irq(dev->irq);
  35.293 +-		spin_lock_bh(&dev->xmit_lock);
  35.294 ++		netif_tx_lock_bh(dev);
  35.295 + 		spin_lock(&np->lock);
  35.296 + 		/* stop engines */
  35.297 + 		nv_stop_rx(dev);
  35.298 +@@ -1559,7 +1559,7 @@ static int nv_change_mtu(struct net_devi
  35.299 + 		nv_start_rx(dev);
  35.300 + 		nv_start_tx(dev);
  35.301 + 		spin_unlock(&np->lock);
  35.302 +-		spin_unlock_bh(&dev->xmit_lock);
  35.303 ++		netif_tx_unlock_bh(dev);
  35.304 + 		enable_irq(dev->irq);
  35.305 + 	}
  35.306 + 	return 0;
  35.307 +@@ -1594,7 +1594,7 @@ static int nv_set_mac_address(struct net
  35.308 + 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  35.309 + 
  35.310 + 	if (netif_running(dev)) {
  35.311 +-		spin_lock_bh(&dev->xmit_lock);
  35.312 ++		netif_tx_lock_bh(dev);
  35.313 + 		spin_lock_irq(&np->lock);
  35.314 + 
  35.315 + 		/* stop rx engine */
  35.316 +@@ -1606,7 +1606,7 @@ static int nv_set_mac_address(struct net
  35.317 + 		/* restart rx engine */
  35.318 + 		nv_start_rx(dev);
  35.319 + 		spin_unlock_irq(&np->lock);
  35.320 +-		spin_unlock_bh(&dev->xmit_lock);
  35.321 ++		netif_tx_unlock_bh(dev);
  35.322 + 	} else {
  35.323 + 		nv_copy_mac_to_hw(dev);
  35.324 + 	}
  35.325 +@@ -1615,7 +1615,7 @@ static int nv_set_mac_address(struct net
  35.326 + 
  35.327 + /*
  35.328 +  * nv_set_multicast: dev->set_multicast function
  35.329 +- * Called with dev->xmit_lock held.
  35.330 ++ * Called with netif_tx_lock held.
  35.331 +  */
  35.332 + static void nv_set_multicast(struct net_device *dev)
  35.333 + {
  35.334 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c ./drivers/net/hamradio/6pack.c
  35.335 +--- ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c	2006-09-12 19:02:10.000000000 +0100
  35.336 ++++ ./drivers/net/hamradio/6pack.c	2006-09-19 13:59:20.000000000 +0100
  35.337 +@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net
  35.338 + {
  35.339 + 	struct sockaddr_ax25 *sa = addr;
  35.340 + 
  35.341 +-	spin_lock_irq(&dev->xmit_lock);
  35.342 ++	netif_tx_lock_bh(dev);
  35.343 + 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
  35.344 +-	spin_unlock_irq(&dev->xmit_lock);
  35.345 ++	netif_tx_unlock_bh(dev);
  35.346 + 
  35.347 + 	return 0;
  35.348 + }
  35.349 +@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_stru
  35.350 + 			break;
  35.351 + 		}
  35.352 + 
  35.353 +-		spin_lock_irq(&dev->xmit_lock);
  35.354 ++		netif_tx_lock_bh(dev);
  35.355 + 		memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
  35.356 +-		spin_unlock_irq(&dev->xmit_lock);
  35.357 ++		netif_tx_unlock_bh(dev);
  35.358 + 
  35.359 + 		err = 0;
  35.360 + 		break;
  35.361 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c ./drivers/net/hamradio/mkiss.c
  35.362 +--- ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c	2006-09-12 19:02:10.000000000 +0100
  35.363 ++++ ./drivers/net/hamradio/mkiss.c	2006-09-19 13:59:20.000000000 +0100
  35.364 +@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net
  35.365 + {
  35.366 + 	struct sockaddr_ax25 *sa = addr;
  35.367 + 
  35.368 +-	spin_lock_irq(&dev->xmit_lock);
  35.369 ++	netif_tx_lock_bh(dev);
  35.370 + 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
  35.371 +-	spin_unlock_irq(&dev->xmit_lock);
  35.372 ++	netif_tx_unlock_bh(dev);
  35.373 + 
  35.374 + 	return 0;
  35.375 + }
  35.376 +@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct
  35.377 + 			break;
  35.378 + 		}
  35.379 + 
  35.380 +-		spin_lock_irq(&dev->xmit_lock);
  35.381 ++		netif_tx_lock_bh(dev);
  35.382 + 		memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
  35.383 +-		spin_unlock_irq(&dev->xmit_lock);
  35.384 ++		netif_tx_unlock_bh(dev);
  35.385 + 
  35.386 + 		err = 0;
  35.387 + 		break;
  35.388 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/ifb.c ./drivers/net/ifb.c
  35.389 +--- ../orig-linux-2.6.16.29/drivers/net/ifb.c	2006-09-12 19:02:10.000000000 +0100
  35.390 ++++ ./drivers/net/ifb.c	2006-09-19 13:59:20.000000000 +0100
  35.391 +@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev
  35.392 + 	dp->st_task_enter++;
  35.393 + 	if ((skb = skb_peek(&dp->tq)) == NULL) {
  35.394 + 		dp->st_txq_refl_try++;
  35.395 +-		if (spin_trylock(&_dev->xmit_lock)) {
  35.396 ++		if (netif_tx_trylock(_dev)) {
  35.397 + 			dp->st_rxq_enter++;
  35.398 + 			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
  35.399 + 				skb_queue_tail(&dp->tq, skb);
  35.400 + 				dp->st_rx2tx_tran++;
  35.401 + 			}
  35.402 +-			spin_unlock(&_dev->xmit_lock);
  35.403 ++			netif_tx_unlock(_dev);
  35.404 + 		} else {
  35.405 + 			/* reschedule */
  35.406 + 			dp->st_rxq_notenter++;
  35.407 +@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev
  35.408 + 		}
  35.409 + 	}
  35.410 + 
  35.411 +-	if (spin_trylock(&_dev->xmit_lock)) {
  35.412 ++	if (netif_tx_trylock(_dev)) {
  35.413 + 		dp->st_rxq_check++;
  35.414 + 		if ((skb = skb_peek(&dp->rq)) == NULL) {
  35.415 + 			dp->tasklet_pending = 0;
  35.416 +@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev
  35.417 + 				netif_wake_queue(_dev);
  35.418 + 		} else {
  35.419 + 			dp->st_rxq_rsch++;
  35.420 +-			spin_unlock(&_dev->xmit_lock);
  35.421 ++			netif_tx_unlock(_dev);
  35.422 + 			goto resched;
  35.423 + 		}
  35.424 +-		spin_unlock(&_dev->xmit_lock);
  35.425 ++		netif_tx_unlock(_dev);
  35.426 + 	} else {
  35.427 + resched:
  35.428 + 		dp->tasklet_pending = 1;
  35.429 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c ./drivers/net/irda/vlsi_ir.c
  35.430 +--- ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c	2006-09-12 19:02:10.000000000 +0100
  35.431 ++++ ./drivers/net/irda/vlsi_ir.c	2006-09-19 13:59:20.000000000 +0100
  35.432 +@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct s
  35.433 + 			    ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
  35.434 + 			    	break;
  35.435 + 			udelay(100);
  35.436 +-			/* must not sleep here - we are called under xmit_lock! */
  35.437 ++			/* must not sleep here - called under netif_tx_lock! */
  35.438 + 		}
  35.439 + 	}
  35.440 + 
  35.441 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c ./drivers/net/ixgb/ixgb_main.c
  35.442 +--- ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c	2006-09-12 19:02:10.000000000 +0100
  35.443 ++++ ./drivers/net/ixgb/ixgb_main.c	2006-09-19 13:59:20.000000000 +0100
  35.444 +@@ -1163,7 +1163,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
  35.445 + 	uint16_t ipcse, tucse, mss;
  35.446 + 	int err;
  35.447 + 
  35.448 +-	if(likely(skb_shinfo(skb)->tso_size)) {
  35.449 ++	if(likely(skb_shinfo(skb)->gso_size)) {
  35.450 + 		if (skb_header_cloned(skb)) {
  35.451 + 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  35.452 + 			if (err)
  35.453 +@@ -1171,7 +1171,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
  35.454 + 		}
  35.455 + 
  35.456 + 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  35.457 +-		mss = skb_shinfo(skb)->tso_size;
  35.458 ++		mss = skb_shinfo(skb)->gso_size;
  35.459 + 		skb->nh.iph->tot_len = 0;
  35.460 + 		skb->nh.iph->check = 0;
  35.461 + 		skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
  35.462 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/loopback.c ./drivers/net/loopback.c
  35.463 +--- ../orig-linux-2.6.16.29/drivers/net/loopback.c	2006-09-12 19:02:10.000000000 +0100
  35.464 ++++ ./drivers/net/loopback.c	2006-09-19 13:59:20.000000000 +0100
  35.465 +@@ -74,7 +74,7 @@ static void emulate_large_send_offload(s
  35.466 + 	struct iphdr *iph = skb->nh.iph;
  35.467 + 	struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
  35.468 + 	unsigned int doffset = (iph->ihl + th->doff) * 4;
  35.469 +-	unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
  35.470 ++	unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
  35.471 + 	unsigned int offset = 0;
  35.472 + 	u32 seq = ntohl(th->seq);
  35.473 + 	u16 id  = ntohs(iph->id);
  35.474 +@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff 
  35.475 + #endif
  35.476 + 
  35.477 + #ifdef LOOPBACK_TSO
  35.478 +-	if (skb_shinfo(skb)->tso_size) {
  35.479 ++	if (skb_shinfo(skb)->gso_size) {
  35.480 + 		BUG_ON(skb->protocol != htons(ETH_P_IP));
  35.481 + 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
  35.482 + 
  35.483 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c ./drivers/net/mv643xx_eth.c
  35.484 +--- ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c	2006-09-12 19:02:10.000000000 +0100
  35.485 ++++ ./drivers/net/mv643xx_eth.c	2006-09-19 13:59:20.000000000 +0100
  35.486 +@@ -1107,7 +1107,7 @@ static int mv643xx_eth_start_xmit(struct
  35.487 + 
  35.488 + #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
  35.489 + 	if (has_tiny_unaligned_frags(skb)) {
  35.490 +-		if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
  35.491 ++		if (__skb_linearize(skb)) {
  35.492 + 			stats->tx_dropped++;
  35.493 + 			printk(KERN_DEBUG "%s: failed to linearize tiny "
  35.494 + 					"unaligned fragment\n", dev->name);
  35.495 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/natsemi.c ./drivers/net/natsemi.c
  35.496 +--- ../orig-linux-2.6.16.29/drivers/net/natsemi.c	2006-09-12 19:02:10.000000000 +0100
  35.497 ++++ ./drivers/net/natsemi.c	2006-09-19 13:59:20.000000000 +0100
  35.498 +@@ -323,12 +323,12 @@ performance critical codepaths:
  35.499 + The rx process only runs in the interrupt handler. Access from outside
  35.500 + the interrupt handler is only permitted after disable_irq().
  35.501 + 
  35.502 +-The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
  35.503 ++The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
  35.504 + is set, then access is permitted under spin_lock_irq(&np->lock).
  35.505 + 
  35.506 + Thus configuration functions that want to access everything must call
  35.507 + 	disable_irq(dev->irq);
  35.508 +-	spin_lock_bh(dev->xmit_lock);
  35.509 ++	netif_tx_lock_bh(dev);
  35.510 + 	spin_lock_irq(&np->lock);
  35.511 + 
  35.512 + IV. Notes
  35.513 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/r8169.c ./drivers/net/r8169.c
  35.514 +--- ../orig-linux-2.6.16.29/drivers/net/r8169.c	2006-09-12 19:02:10.000000000 +0100
  35.515 ++++ ./drivers/net/r8169.c	2006-09-19 13:59:20.000000000 +0100
  35.516 +@@ -2171,7 +2171,7 @@ static int rtl8169_xmit_frags(struct rtl
  35.517 + static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
  35.518 + {
  35.519 + 	if (dev->features & NETIF_F_TSO) {
  35.520 +-		u32 mss = skb_shinfo(skb)->tso_size;
  35.521 ++		u32 mss = skb_shinfo(skb)->gso_size;
  35.522 + 
  35.523 + 		if (mss)
  35.524 + 			return LargeSend | ((mss & MSSMask) << MSSShift);
  35.525 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/s2io.c ./drivers/net/s2io.c
  35.526 +--- ../orig-linux-2.6.16.29/drivers/net/s2io.c	2006-09-12 19:02:10.000000000 +0100
  35.527 ++++ ./drivers/net/s2io.c	2006-09-19 13:59:20.000000000 +0100
  35.528 +@@ -3522,8 +3522,8 @@ static int s2io_xmit(struct sk_buff *skb
  35.529 + 	txdp->Control_1 = 0;
  35.530 + 	txdp->Control_2 = 0;
  35.531 + #ifdef NETIF_F_TSO
  35.532 +-	mss = skb_shinfo(skb)->tso_size;
  35.533 +-	if (mss) {
  35.534 ++	mss = skb_shinfo(skb)->gso_size;
  35.535 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
  35.536 + 		txdp->Control_1 |= TXD_TCP_LSO_EN;
  35.537 + 		txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
  35.538 + 	}
  35.539 +@@ -3543,10 +3543,10 @@ static int s2io_xmit(struct sk_buff *skb
  35.540 + 	}
  35.541 + 
  35.542 + 	frg_len = skb->len - skb->data_len;
  35.543 +-	if (skb_shinfo(skb)->ufo_size) {
  35.544 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
  35.545 + 		int ufo_size;
  35.546 + 
  35.547 +-		ufo_size = skb_shinfo(skb)->ufo_size;
  35.548 ++		ufo_size = skb_shinfo(skb)->gso_size;
  35.549 + 		ufo_size &= ~7;
  35.550 + 		txdp->Control_1 |= TXD_UFO_EN;
  35.551 + 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
  35.552 +@@ -3572,7 +3572,7 @@ static int s2io_xmit(struct sk_buff *skb
  35.553 + 	txdp->Host_Control = (unsigned long) skb;
  35.554 + 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
  35.555 + 
  35.556 +-	if (skb_shinfo(skb)->ufo_size)
  35.557 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  35.558 + 		txdp->Control_1 |= TXD_UFO_EN;
  35.559 + 
  35.560 + 	frg_cnt = skb_shinfo(skb)->nr_frags;
  35.561 +@@ -3587,12 +3587,12 @@ static int s2io_xmit(struct sk_buff *skb
  35.562 + 		    (sp->pdev, frag->page, frag->page_offset,
  35.563 + 		     frag->size, PCI_DMA_TODEVICE);
  35.564 + 		txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
  35.565 +-		if (skb_shinfo(skb)->ufo_size)
  35.566 ++		if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  35.567 + 			txdp->Control_1 |= TXD_UFO_EN;
  35.568 + 	}
  35.569 + 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
  35.570 + 
  35.571 +-	if (skb_shinfo(skb)->ufo_size)
  35.572 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  35.573 + 		frg_cnt++; /* as Txd0 was used for inband header */
  35.574 + 
  35.575 + 	tx_fifo = mac_control->tx_FIFO_start[queue];
  35.576 +@@ -3606,7 +3606,7 @@ static int s2io_xmit(struct sk_buff *skb
  35.577 + 	if (mss)
  35.578 + 		val64 |= TX_FIFO_SPECIAL_FUNC;
  35.579 + #endif
  35.580 +-	if (skb_shinfo(skb)->ufo_size)
  35.581 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
  35.582 + 		val64 |= TX_FIFO_SPECIAL_FUNC;
  35.583 + 	writeq(val64, &tx_fifo->List_Control);
  35.584 + 
  35.585 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/sky2.c ./drivers/net/sky2.c
  35.586 +--- ../orig-linux-2.6.16.29/drivers/net/sky2.c	2006-09-12 19:02:10.000000000 +0100
  35.587 ++++ ./drivers/net/sky2.c	2006-09-19 13:59:20.000000000 +0100
  35.588 +@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
  35.589 + 	count = sizeof(dma_addr_t) / sizeof(u32);
  35.590 + 	count += skb_shinfo(skb)->nr_frags * count;
  35.591 + 
  35.592 +-	if (skb_shinfo(skb)->tso_size)
  35.593 ++	if (skb_shinfo(skb)->gso_size)
  35.594 + 		++count;
  35.595 + 
  35.596 + 	if (skb->ip_summed == CHECKSUM_HW)
  35.597 +@@ -1197,7 +1197,7 @@ static int sky2_xmit_frame(struct sk_buf
  35.598 + 	}
  35.599 + 
  35.600 + 	/* Check for TCP Segmentation Offload */
  35.601 +-	mss = skb_shinfo(skb)->tso_size;
  35.602 ++	mss = skb_shinfo(skb)->gso_size;
  35.603 + 	if (mss != 0) {
  35.604 + 		/* just drop the packet if non-linear expansion fails */
  35.605 + 		if (skb_header_cloned(skb) &&
  35.606 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/tg3.c ./drivers/net/tg3.c
  35.607 +--- ../orig-linux-2.6.16.29/drivers/net/tg3.c	2006-09-12 19:02:10.000000000 +0100
  35.608 ++++ ./drivers/net/tg3.c	2006-09-19 13:59:20.000000000 +0100
  35.609 +@@ -3664,7 +3664,7 @@ static int tg3_start_xmit(struct sk_buff
  35.610 + #if TG3_TSO_SUPPORT != 0
  35.611 + 	mss = 0;
  35.612 + 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
  35.613 +-	    (mss = skb_shinfo(skb)->tso_size) != 0) {
  35.614 ++	    (mss = skb_shinfo(skb)->gso_size) != 0) {
  35.615 + 		int tcp_opt_len, ip_tcp_len;
  35.616 + 
  35.617 + 		if (skb_header_cloned(skb) &&
  35.618 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c ./drivers/net/tulip/winbond-840.c
  35.619 +--- ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c	2006-09-12 19:02:10.000000000 +0100
  35.620 ++++ ./drivers/net/tulip/winbond-840.c	2006-09-19 13:59:20.000000000 +0100
  35.621 +@@ -1605,11 +1605,11 @@ static void __devexit w840_remove1 (stru
  35.622 +  * - get_stats:
  35.623 +  * 	spin_lock_irq(np->lock), doesn't touch hw if not present
  35.624 +  * - hard_start_xmit:
  35.625 +- * 	netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
  35.626 ++ * 	synchronize_irq + netif_tx_disable;
  35.627 +  * - tx_timeout:
  35.628 +- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
  35.629 ++ * 	netif_device_detach + netif_tx_disable;
  35.630 +  * - set_multicast_list
  35.631 +- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
  35.632 ++ * 	netif_device_detach + netif_tx_disable;
  35.633 +  * - interrupt handler
  35.634 +  * 	doesn't touch hw if not present, synchronize_irq waits for
  35.635 +  * 	running instances of the interrupt handler.
  35.636 +@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev 
  35.637 + 		netif_device_detach(dev);
  35.638 + 		update_csr6(dev, 0);
  35.639 + 		iowrite32(0, ioaddr + IntrEnable);
  35.640 +-		netif_stop_queue(dev);
  35.641 + 		spin_unlock_irq(&np->lock);
  35.642 + 
  35.643 +-		spin_unlock_wait(&dev->xmit_lock);
  35.644 + 		synchronize_irq(dev->irq);
  35.645 ++		netif_tx_disable(dev);
  35.646 + 	
  35.647 + 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
  35.648 + 
  35.649 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/typhoon.c ./drivers/net/typhoon.c
  35.650 +--- ../orig-linux-2.6.16.29/drivers/net/typhoon.c	2006-09-12 19:02:10.000000000 +0100
  35.651 ++++ ./drivers/net/typhoon.c	2006-09-19 13:59:20.000000000 +0100
  35.652 +@@ -340,7 +340,7 @@ enum state_values {
  35.653 + #endif
  35.654 + 
  35.655 + #if defined(NETIF_F_TSO)
  35.656 +-#define skb_tso_size(x)		(skb_shinfo(x)->tso_size)
  35.657 ++#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
  35.658 + #define TSO_NUM_DESCRIPTORS	2
  35.659 + #define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
  35.660 + #else
  35.661 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/via-velocity.c ./drivers/net/via-velocity.c
  35.662 +--- ../orig-linux-2.6.16.29/drivers/net/via-velocity.c	2006-09-12 19:02:10.000000000 +0100
  35.663 ++++ ./drivers/net/via-velocity.c	2006-09-19 13:59:20.000000000 +0100
  35.664 +@@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff 
  35.665 + 
  35.666 + 	int pktlen = skb->len;
  35.667 + 
  35.668 ++#ifdef VELOCITY_ZERO_COPY_SUPPORT
  35.669 ++	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
  35.670 ++		kfree_skb(skb);
  35.671 ++		return 0;
  35.672 ++	}
  35.673 ++#endif
  35.674 ++
  35.675 + 	spin_lock_irqsave(&vptr->lock, flags);
  35.676 + 
  35.677 + 	index = vptr->td_curr[qnum];
  35.678 +@@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff 
  35.679 + 	 */
  35.680 + 	if (pktlen < ETH_ZLEN) {
  35.681 + 		/* Cannot occur until ZC support */
  35.682 +-		if(skb_linearize(skb, GFP_ATOMIC))
  35.683 +-			return 0; 
  35.684 + 		pktlen = ETH_ZLEN;
  35.685 + 		memcpy(tdinfo->buf, skb->data, skb->len);
  35.686 + 		memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
  35.687 +@@ -1933,7 +1938,6 @@ static int velocity_xmit(struct sk_buff 
  35.688 + 		int nfrags = skb_shinfo(skb)->nr_frags;
  35.689 + 		tdinfo->skb = skb;
  35.690 + 		if (nfrags > 6) {
  35.691 +-			skb_linearize(skb, GFP_ATOMIC);
  35.692 + 			memcpy(tdinfo->buf, skb->data, skb->len);
  35.693 + 			tdinfo->skb_dma[0] = tdinfo->buf_dma;
  35.694 + 			td_ptr->tdesc0.pktsize = 
  35.695 +diff -pruN ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c ./drivers/net/wireless/orinoco.c
  35.696 +--- ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c	2006-09-12 19:02:10.000000000 +0100
  35.697 ++++ ./drivers/net/wireless/orinoco.c	2006-09-19 13:59:20.000000000 +0100
  35.698 +@@ -1835,7 +1835,9 @@ static int __orinoco_program_rids(struct
  35.699 + 	/* Set promiscuity / multicast*/
  35.700 + 	priv->promiscuous = 0;
  35.701 + 	priv->mc_count = 0;
  35.702 +-	__orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
  35.703 ++
  35.704 ++	/* FIXME: what about netif_tx_lock */
  35.705 ++	__orinoco_set_multicast_list(dev);
  35.706 + 
  35.707 + 	return 0;
  35.708 + }
  35.709 +diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c ./drivers/s390/net/qeth_eddp.c
  35.710 +--- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c	2006-09-12 19:02:10.000000000 +0100
  35.711 ++++ ./drivers/s390/net/qeth_eddp.c	2006-09-19 13:59:20.000000000 +0100
  35.712 +@@ -421,7 +421,7 @@ __qeth_eddp_fill_context_tcp(struct qeth
  35.713 +        }
  35.714 + 	tcph = eddp->skb->h.th;
  35.715 + 	while (eddp->skb_offset < eddp->skb->len) {
  35.716 +-		data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
  35.717 ++		data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
  35.718 + 			       (int)(eddp->skb->len - eddp->skb_offset));
  35.719 + 		/* prepare qdio hdr */
  35.720 + 		if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
  35.721 +@@ -516,20 +516,20 @@ qeth_eddp_calc_num_pages(struct qeth_edd
  35.722 + 	
  35.723 + 	QETH_DBF_TEXT(trace, 5, "eddpcanp");
  35.724 + 	/* can we put multiple skbs in one page? */
  35.725 +-	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
  35.726 ++	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
  35.727 + 	if (skbs_per_page > 1){
  35.728 +-		ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
  35.729 ++		ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
  35.730 + 				 skbs_per_page + 1;
  35.731 + 		ctx->elements_per_skb = 1;
  35.732 + 	} else {
  35.733 + 		/* no -> how many elements per skb? */
  35.734 +-		ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
  35.735 ++		ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
  35.736 + 				     PAGE_SIZE) >> PAGE_SHIFT;
  35.737 + 		ctx->num_pages = ctx->elements_per_skb *
  35.738 +-				 (skb_shinfo(skb)->tso_segs + 1);
  35.739 ++				 (skb_shinfo(skb)->gso_segs + 1);
  35.740 + 	}
  35.741 + 	ctx->num_elements = ctx->elements_per_skb *
  35.742 +-			    (skb_shinfo(skb)->tso_segs + 1);
  35.743 ++			    (skb_shinfo(skb)->gso_segs + 1);
  35.744 + }
  35.745 + 
  35.746 + static inline struct qeth_eddp_context *
  35.747 +diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c ./drivers/s390/net/qeth_main.c
  35.748 +--- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c	2006-09-12 19:02:10.000000000 +0100
  35.749 ++++ ./drivers/s390/net/qeth_main.c	2006-09-19 13:59:20.000000000 +0100
  35.750 +@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
  35.751 + 	queue = card->qdio.out_qs
  35.752 + 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
  35.753 + 
  35.754 +-	if (skb_shinfo(skb)->tso_size)
  35.755 ++	if (skb_shinfo(skb)->gso_size)
  35.756 + 		large_send = card->options.large_send;
  35.757 + 
  35.758 + 	/*are we able to do TSO ? If so ,prepare and send it from here */
  35.759 +@@ -4501,7 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
  35.760 + 		card->stats.tx_packets++;
  35.761 + 		card->stats.tx_bytes += skb->len;
  35.762 + #ifdef CONFIG_QETH_PERF_STATS
  35.763 +-		if (skb_shinfo(skb)->tso_size &&
  35.764 ++		if (skb_shinfo(skb)->gso_size &&
  35.765 + 		   !(large_send == QETH_LARGE_SEND_NO)) {
  35.766 + 			card->perf_stats.large_send_bytes += skb->len;
  35.767 + 			card->perf_stats.large_send_cnt++;
  35.768 +diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h ./drivers/s390/net/qeth_tso.h
  35.769 +--- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h	2006-09-12 19:02:10.000000000 +0100
  35.770 ++++ ./drivers/s390/net/qeth_tso.h	2006-09-19 13:59:20.000000000 +0100
  35.771 +@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *c
  35.772 + 	hdr->ext.hdr_version = 1;
  35.773 + 	hdr->ext.hdr_len     = 28;
  35.774 + 	/*insert non-fix values */
  35.775 +-	hdr->ext.mss = skb_shinfo(skb)->tso_size;
  35.776 ++	hdr->ext.mss = skb_shinfo(skb)->gso_size;
  35.777 + 	hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
  35.778 + 	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
  35.779 + 				       sizeof(struct qeth_hdr_tso));
  35.780 +diff -pruN ../orig-linux-2.6.16.29/include/linux/ethtool.h ./include/linux/ethtool.h
  35.781 +--- ../orig-linux-2.6.16.29/include/linux/ethtool.h	2006-09-12 19:02:10.000000000 +0100
  35.782 ++++ ./include/linux/ethtool.h	2006-09-19 13:59:20.000000000 +0100
  35.783 +@@ -408,6 +408,8 @@ struct ethtool_ops {
  35.784 + #define ETHTOOL_GPERMADDR	0x00000020 /* Get permanent hardware address */
  35.785 + #define ETHTOOL_GUFO		0x00000021 /* Get UFO enable (ethtool_value) */
  35.786 + #define ETHTOOL_SUFO		0x00000022 /* Set UFO enable (ethtool_value) */
  35.787 ++#define ETHTOOL_GGSO		0x00000023 /* Get GSO enable (ethtool_value) */
  35.788 ++#define ETHTOOL_SGSO		0x00000024 /* Set GSO enable (ethtool_value) */
  35.789 + 
  35.790 + /* compatibility with older code */
  35.791 + #define SPARC_ETH_GSET		ETHTOOL_GSET
  35.792 +diff -pruN ../orig-linux-2.6.16.29/include/linux/netdevice.h ./include/linux/netdevice.h
  35.793 +--- ../orig-linux-2.6.16.29/include/linux/netdevice.h	2006-09-12 19:02:10.000000000 +0100
  35.794 ++++ ./include/linux/netdevice.h	2006-09-19 13:59:20.000000000 +0100
  35.795 +@@ -230,7 +230,8 @@ enum netdev_state_t
  35.796 + 	__LINK_STATE_SCHED,
  35.797 + 	__LINK_STATE_NOCARRIER,
  35.798 + 	__LINK_STATE_RX_SCHED,
  35.799 +-	__LINK_STATE_LINKWATCH_PENDING
  35.800 ++	__LINK_STATE_LINKWATCH_PENDING,
  35.801 ++	__LINK_STATE_QDISC_RUNNING,
  35.802 + };
  35.803 + 
  35.804 + 
  35.805 +@@ -306,9 +307,17 @@ struct net_device
  35.806 + #define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */
  35.807 + #define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */
  35.808 + #define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */
  35.809 +-#define NETIF_F_TSO		2048	/* Can offload TCP/IP segmentation */
  35.810 ++#define NETIF_F_GSO		2048	/* Enable software GSO. */
  35.811 + #define NETIF_F_LLTX		4096	/* LockLess TX */
  35.812 +-#define NETIF_F_UFO             8192    /* Can offload UDP Large Send*/
  35.813 ++
  35.814 ++	/* Segmentation offload features */
  35.815 ++#define NETIF_F_GSO_SHIFT	16
  35.816 ++#define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  35.817 ++#define NETIF_F_UFO		(SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
  35.818 ++#define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  35.819 ++
  35.820 ++#define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  35.821 ++#define NETIF_F_ALL_CSUM	(NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
  35.822 + 
  35.823 + 	struct net_device	*next_sched;
  35.824 + 
  35.825 +@@ -394,6 +403,9 @@ struct net_device
  35.826 + 	struct list_head	qdisc_list;
  35.827 + 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
  35.828 + 
  35.829 ++	/* Partially transmitted GSO packet. */
  35.830 ++	struct sk_buff		*gso_skb;
  35.831 ++
  35.832 + 	/* ingress path synchronizer */
  35.833 + 	spinlock_t		ingress_lock;
  35.834 + 	struct Qdisc		*qdisc_ingress;
  35.835 +@@ -402,7 +414,7 @@ struct net_device
  35.836 +  * One part is mostly used on xmit path (device)
  35.837 +  */
  35.838 + 	/* hard_start_xmit synchronizer */
  35.839 +-	spinlock_t		xmit_lock ____cacheline_aligned_in_smp;
  35.840 ++	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
  35.841 + 	/* cpu id of processor entered to hard_start_xmit or -1,
  35.842 + 	   if nobody entered there.
  35.843 + 	 */
  35.844 +@@ -527,6 +539,8 @@ struct packet_type {
  35.845 + 					 struct net_device *,
  35.846 + 					 struct packet_type *,
  35.847 + 					 struct net_device *);
  35.848 ++	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
  35.849 ++						int features);
  35.850 + 	void			*af_packet_priv;
  35.851 + 	struct list_head	list;
  35.852 + };
  35.853 +@@ -693,7 +707,8 @@ extern int		dev_change_name(struct net_d
  35.854 + extern int		dev_set_mtu(struct net_device *, int);
  35.855 + extern int		dev_set_mac_address(struct net_device *,
  35.856 + 					    struct sockaddr *);
  35.857 +-extern void		dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
  35.858 ++extern int		dev_hard_start_xmit(struct sk_buff *skb,
  35.859 ++					    struct net_device *dev);
  35.860 + 
  35.861 + extern void		dev_init(void);
  35.862 + 
  35.863 +@@ -900,11 +915,43 @@ static inline void __netif_rx_complete(s
  35.864 + 	clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
  35.865 + }
  35.866 + 
  35.867 ++static inline void netif_tx_lock(struct net_device *dev)
  35.868 ++{
  35.869 ++	spin_lock(&dev->_xmit_lock);
  35.870 ++	dev->xmit_lock_owner = smp_processor_id();
  35.871 ++}
  35.872 ++
  35.873 ++static inline void netif_tx_lock_bh(struct net_device *dev)
  35.874 ++{
  35.875 ++	spin_lock_bh(&dev->_xmit_lock);
  35.876 ++	dev->xmit_lock_owner = smp_processor_id();
  35.877 ++}
  35.878 ++
  35.879 ++static inline int netif_tx_trylock(struct net_device *dev)
  35.880 ++{
  35.881 ++	int err = spin_trylock(&dev->_xmit_lock);
  35.882 ++	if (!err)
  35.883 ++		dev->xmit_lock_owner = smp_processor_id();
  35.884 ++	return err;
  35.885 ++}
  35.886 ++
  35.887 ++static inline void netif_tx_unlock(struct net_device *dev)
  35.888 ++{
  35.889 ++	dev->xmit_lock_owner = -1;
  35.890 ++	spin_unlock(&dev->_xmit_lock);
  35.891 ++}
  35.892 ++
  35.893 ++static inline void netif_tx_unlock_bh(struct net_device *dev)
  35.894 ++{
  35.895 ++	dev->xmit_lock_owner = -1;
  35.896 ++	spin_unlock_bh(&dev->_xmit_lock);
  35.897 ++}
  35.898 ++
  35.899 + static inline void netif_tx_disable(struct net_device *dev)
  35.900 + {
  35.901 +-	spin_lock_bh(&dev->xmit_lock);
  35.902 ++	netif_tx_lock_bh(dev);
  35.903 + 	netif_stop_queue(dev);
  35.904 +-	spin_unlock_bh(&dev->xmit_lock);
  35.905 ++	netif_tx_unlock_bh(dev);
  35.906 + }
  35.907 + 
  35.908 + /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  35.909 +@@ -932,6 +979,7 @@ extern int		netdev_max_backlog;
  35.910 + extern int		weight_p;
  35.911 + extern int		netdev_set_master(struct net_device *dev, struct net_device *master);
  35.912 + extern int skb_checksum_help(struct sk_buff *skb, int inward);
  35.913 ++extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  35.914 + #ifdef CONFIG_BUG
  35.915 + extern void netdev_rx_csum_fault(struct net_device *dev);
  35.916 + #else
  35.917 +@@ -951,6 +999,18 @@ extern void dev_seq_stop(struct seq_file
  35.918 + 
  35.919 + extern void linkwatch_run_queue(void);
  35.920 + 
  35.921 ++static inline int skb_gso_ok(struct sk_buff *skb, int features)
  35.922 ++{
  35.923 ++	int feature = skb_shinfo(skb)->gso_size ?
  35.924 ++		      skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
  35.925 ++	return (features & feature) == feature;
  35.926 ++}
  35.927 ++
  35.928 ++static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  35.929 ++{
  35.930 ++	return !skb_gso_ok(skb, dev->features);
  35.931 ++}
  35.932 ++
  35.933 + #endif /* __KERNEL__ */
  35.934 + 
  35.935 + #endif	/* _LINUX_DEV_H */
  35.936 +diff -pruN ../orig-linux-2.6.16.29/include/linux/skbuff.h ./include/linux/skbuff.h
  35.937 +--- ../orig-linux-2.6.16.29/include/linux/skbuff.h	2006-09-12 19:02:10.000000000 +0100
  35.938 ++++ ./include/linux/skbuff.h	2006-09-19 13:59:20.000000000 +0100
  35.939 +@@ -134,9 +134,10 @@ struct skb_frag_struct {
  35.940 + struct skb_shared_info {
  35.941 + 	atomic_t	dataref;
  35.942 + 	unsigned short	nr_frags;
  35.943 +-	unsigned short	tso_size;
  35.944 +-	unsigned short	tso_segs;
  35.945 +-	unsigned short  ufo_size;
  35.946 ++	unsigned short	gso_size;
  35.947 ++	/* Warning: this field is not always filled in (UFO)! */
  35.948 ++	unsigned short	gso_segs;
  35.949 ++	unsigned short  gso_type;
  35.950 + 	unsigned int    ip6_frag_id;
  35.951 + 	struct sk_buff	*frag_list;
  35.952 + 	skb_frag_t	frags[MAX_SKB_FRAGS];
  35.953 +@@ -168,6 +169,14 @@ enum {
  35.954 + 	SKB_FCLONE_CLONE,
  35.955 + };
  35.956 + 
  35.957 ++enum {
  35.958 ++	SKB_GSO_TCPV4 = 1 << 0,
  35.959 ++	SKB_GSO_UDPV4 = 1 << 1,
  35.960 ++
  35.961 ++	/* This indicates the skb is from an untrusted source. */
  35.962 ++	SKB_GSO_DODGY = 1 << 2,
  35.963 ++};
  35.964 ++
  35.965 + /** 
  35.966 +  *	struct sk_buff - socket buffer
  35.967 +  *	@next: Next buffer in list
  35.968 +@@ -1148,18 +1157,34 @@ static inline int skb_can_coalesce(struc
  35.969 + 	return 0;
  35.970 + }
  35.971 + 
  35.972 ++static inline int __skb_linearize(struct sk_buff *skb)
  35.973 ++{
  35.974 ++	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
  35.975 ++}
  35.976 ++
  35.977 + /**
  35.978 +  *	skb_linearize - convert paged skb to linear one
  35.979 +  *	@skb: buffer to linarize
  35.980 +- *	@gfp: allocation mode
  35.981 +  *
  35.982 +  *	If there is no free memory -ENOMEM is returned, otherwise zero
  35.983 +  *	is returned and the old skb data released.
  35.984 +  */
  35.985 +-extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
  35.986 +-static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
  35.987 ++static inline int skb_linearize(struct sk_buff *skb)
  35.988 ++{
  35.989 ++	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
  35.990 ++}
  35.991 ++
  35.992 ++/**
  35.993 ++ *	skb_linearize_cow - make sure skb is linear and writable
  35.994 ++ *	@skb: buffer to process
  35.995 ++ *
  35.996 ++ *	If there is no free memory -ENOMEM is returned, otherwise zero
  35.997 ++ *	is returned and the old skb data released.
  35.998 ++ */
  35.999 ++static inline int skb_linearize_cow(struct sk_buff *skb)
 35.1000 + {
 35.1001 +-	return __skb_linearize(skb, gfp);
 35.1002 ++	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
 35.1003 ++	       __skb_linearize(skb) : 0;
 35.1004 + }
 35.1005 + 
 35.1006 + /**
 35.1007 +@@ -1254,6 +1279,7 @@ extern void	       skb_split(struct sk_b
 35.1008 + 				 struct sk_buff *skb1, const u32 len);
 35.1009 + 
 35.1010 + extern void	       skb_release_data(struct sk_buff *skb);
 35.1011 ++extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
 35.1012 + 
 35.1013 + static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
 35.1014 + 				       int len, void *buffer)
 35.1015 +diff -pruN ../orig-linux-2.6.16.29/include/net/pkt_sched.h ./include/net/pkt_sched.h
 35.1016 +--- ../orig-linux-2.6.16.29/include/net/pkt_sched.h	2006-09-12 19:02:10.000000000 +0100
 35.1017 ++++ ./include/net/pkt_sched.h	2006-09-19 13:59:20.000000000 +0100
 35.1018 +@@ -218,12 +218,13 @@ extern struct qdisc_rate_table *qdisc_ge
 35.1019 + 		struct rtattr *tab);
 35.1020 + extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
 35.1021 + 
 35.1022 +-extern int qdisc_restart(struct net_device *dev);
 35.1023 ++extern void __qdisc_run(struct net_device *dev);
 35.1024 + 
 35.1025 + static inline void qdisc_run(struct net_device *dev)
 35.1026 + {
 35.1027 +-	while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
 35.1028 +-		/* NOTHING */;
 35.1029 ++	if (!netif_queue_stopped(dev) &&
 35.1030 ++	    !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
 35.1031 ++		__qdisc_run(dev);
 35.1032 + }
 35.1033 + 
 35.1034 + extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
 35.1035 +diff -pruN ../orig-linux-2.6.16.29/include/net/protocol.h ./include/net/protocol.h
 35.1036 +--- ../orig-linux-2.6.16.29/include/net/protocol.h	2006-09-12 19:02:10.000000000 +0100
 35.1037 ++++ ./include/net/protocol.h	2006-09-19 13:59:20.000000000 +0100
 35.1038 +@@ -37,6 +37,8 @@
 35.1039 + struct net_protocol {
 35.1040 + 	int			(*handler)(struct sk_buff *skb);
 35.1041 + 	void			(*err_handler)(struct sk_buff *skb, u32 info);
 35.1042 ++	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
 35.1043 ++					       int features);
 35.1044 + 	int			no_policy;
 35.1045 + };
 35.1046 + 
 35.1047 +diff -pruN ../orig-linux-2.6.16.29/include/net/sock.h ./include/net/sock.h
 35.1048 +--- ../orig-linux-2.6.16.29/include/net/sock.h	2006-09-12 19:02:10.000000000 +0100
 35.1049 ++++ ./include/net/sock.h	2006-09-19 13:59:20.000000000 +0100
 35.1050 +@@ -1064,9 +1064,13 @@ static inline void sk_setup_caps(struct 
 35.1051 + {
 35.1052 + 	__sk_dst_set(sk, dst);
 35.1053 + 	sk->sk_route_caps = dst->dev->features;
 35.1054 ++	if (sk->sk_route_caps & NETIF_F_GSO)
 35.1055 ++		sk->sk_route_caps |= NETIF_F_TSO;
 35.1056 + 	if (sk->sk_route_caps & NETIF_F_TSO) {
 35.1057 + 		if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
 35.1058 + 			sk->sk_route_caps &= ~NETIF_F_TSO;
 35.1059 ++		else 
 35.1060 ++			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
 35.1061 + 	}
 35.1062 + }
 35.1063 + 
 35.1064 +diff -pruN ../orig-linux-2.6.16.29/include/net/tcp.h ./include/net/tcp.h
 35.1065 +--- ../orig-linux-2.6.16.29/include/net/tcp.h	2006-09-12 19:02:10.000000000 +0100
 35.1066 ++++ ./include/net/tcp.h	2006-09-19 13:59:20.000000000 +0100
 35.1067 +@@ -552,13 +552,13 @@ struct tcp_skb_cb {
 35.1068 +  */
 35.1069 + static inline int tcp_skb_pcount(const struct sk_buff *skb)
 35.1070 + {
 35.1071 +-	return skb_shinfo(skb)->tso_segs;
 35.1072 ++	return skb_shinfo(skb)->gso_segs;
 35.1073 + }
 35.1074 + 
 35.1075 + /* This is valid iff tcp_skb_pcount() > 1. */
 35.1076 + static inline int tcp_skb_mss(const struct sk_buff *skb)
 35.1077 + {
 35.1078 +-	return skb_shinfo(skb)->tso_size;
 35.1079 ++	return skb_shinfo(skb)->gso_size;
 35.1080 + }
 35.1081 + 
 35.1082 + static inline void tcp_dec_pcount_approx(__u32 *count,
 35.1083 +@@ -1063,6 +1063,8 @@ extern struct request_sock_ops tcp_reque
 35.1084 + 
 35.1085 + extern int tcp_v4_destroy_sock(struct sock *sk);
 35.1086 + 
 35.1087 ++extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
 35.1088 ++
 35.1089 + #ifdef CONFIG_PROC_FS
 35.1090 + extern int  tcp4_proc_init(void);
 35.1091 + extern void tcp4_proc_exit(void);
 35.1092 +diff -pruN ../orig-linux-2.6.16.29/net/atm/clip.c ./net/atm/clip.c
 35.1093 +--- ../orig-linux-2.6.16.29/net/atm/clip.c	2006-09-12 19:02:10.000000000 +0100
 35.1094 ++++ ./net/atm/clip.c	2006-09-19 13:59:20.000000000 +0100
 35.1095 +@@ -101,7 +101,7 @@ static void unlink_clip_vcc(struct clip_
 35.1096 + 		printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc);
 35.1097 + 		return;
 35.1098 + 	}
 35.1099 +-	spin_lock_bh(&entry->neigh->dev->xmit_lock);	/* block clip_start_xmit() */
 35.1100 ++	netif_tx_lock_bh(entry->neigh->dev);	/* block clip_start_xmit() */
 35.1101 + 	entry->neigh->used = jiffies;
 35.1102 + 	for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
 35.1103 + 		if (*walk == clip_vcc) {
 35.1104 +@@ -125,7 +125,7 @@ static void unlink_clip_vcc(struct clip_
 35.1105 + 	printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
 35.1106 + 	  "0x%p)\n",entry,clip_vcc);
 35.1107 + out:
 35.1108 +-	spin_unlock_bh(&entry->neigh->dev->xmit_lock);
 35.1109 ++	netif_tx_unlock_bh(entry->neigh->dev);
 35.1110 + }
 35.1111 + 
 35.1112 + /* The neighbour entry n->lock is held. */
 35.1113 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_device.c ./net/bridge/br_device.c
 35.1114 +--- ../orig-linux-2.6.16.29/net/bridge/br_device.c	2006-09-12 19:02:10.000000000 +0100
 35.1115 ++++ ./net/bridge/br_device.c	2006-09-19 13:59:20.000000000 +0100
 35.1116 +@@ -146,9 +146,9 @@ static int br_set_tx_csum(struct net_dev
 35.1117 + 	struct net_bridge *br = netdev_priv(dev);
 35.1118 + 
 35.1119 + 	if (data)
 35.1120 +-		br->feature_mask |= NETIF_F_IP_CSUM;
 35.1121 ++		br->feature_mask |= NETIF_F_NO_CSUM;
 35.1122 + 	else
 35.1123 +-		br->feature_mask &= ~NETIF_F_IP_CSUM;
 35.1124 ++		br->feature_mask &= ~NETIF_F_ALL_CSUM;
 35.1125 + 
 35.1126 + 	br_features_recompute(br);
 35.1127 + 	return 0;
 35.1128 +@@ -185,6 +185,6 @@ void br_dev_setup(struct net_device *dev
 35.1129 + 	dev->set_mac_address = br_set_mac_address;
 35.1130 + 	dev->priv_flags = IFF_EBRIDGE;
 35.1131 + 
 35.1132 +- 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
 35.1133 +- 		| NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM;
 35.1134 ++ 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
 35.1135 ++ 			NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
 35.1136 + }
 35.1137 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_forward.c ./net/bridge/br_forward.c
 35.1138 +--- ../orig-linux-2.6.16.29/net/bridge/br_forward.c	2006-09-12 19:02:10.000000000 +0100
 35.1139 ++++ ./net/bridge/br_forward.c	2006-09-19 13:59:20.000000000 +0100
 35.1140 +@@ -32,7 +32,7 @@ static inline int should_deliver(const s
 35.1141 + int br_dev_queue_push_xmit(struct sk_buff *skb)
 35.1142 + {
 35.1143 + 	/* drop mtu oversized packets except tso */
 35.1144 +-	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
 35.1145 ++	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
 35.1146 + 		kfree_skb(skb);
 35.1147 + 	else {
 35.1148 + #ifdef CONFIG_BRIDGE_NETFILTER
 35.1149 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_if.c ./net/bridge/br_if.c
 35.1150 +--- ../orig-linux-2.6.16.29/net/bridge/br_if.c	2006-09-12 19:02:10.000000000 +0100
 35.1151 ++++ ./net/bridge/br_if.c	2006-09-19 13:59:20.000000000 +0100
 35.1152 +@@ -385,17 +385,28 @@ void br_features_recompute(struct net_br
 35.1153 + 	struct net_bridge_port *p;
 35.1154 + 	unsigned long features, checksum;
 35.1155 + 
 35.1156 +-	features = br->feature_mask &~ NETIF_F_IP_CSUM;
 35.1157 +-	checksum = br->feature_mask & NETIF_F_IP_CSUM;
 35.1158 ++	checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
 35.1159 ++	features = br->feature_mask & ~NETIF_F_ALL_CSUM;
 35.1160 + 
 35.1161 + 	list_for_each_entry(p, &br->port_list, list) {
 35.1162 +-		if (!(p->dev->features 
 35.1163 +-		      & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)))
 35.1164 ++		unsigned long feature = p->dev->features;
 35.1165 ++
 35.1166 ++		if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
 35.1167 ++			checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
 35.1168 ++		if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
 35.1169 ++			checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
 35.1170 ++		if (!(feature & NETIF_F_IP_CSUM))
 35.1171 + 			checksum = 0;
 35.1172 +-		features &= p->dev->features;
 35.1173 ++
 35.1174 ++		if (feature & NETIF_F_GSO)
 35.1175 ++			feature |= NETIF_F_TSO;
 35.1176 ++		feature |= NETIF_F_GSO;
 35.1177 ++
 35.1178 ++		features &= feature;
 35.1179 + 	}
 35.1180 + 
 35.1181 +-	br->dev->features = features | checksum | NETIF_F_LLTX;
 35.1182 ++	br->dev->features = features | checksum | NETIF_F_LLTX |
 35.1183 ++			    NETIF_F_GSO_ROBUST;
 35.1184 + }
 35.1185 + 
 35.1186 + /* called with RTNL */
 35.1187 +diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c ./net/bridge/br_netfilter.c
 35.1188 +--- ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c	2006-09-12 19:02:10.000000000 +0100
 35.1189 ++++ ./net/bridge/br_netfilter.c	2006-09-19 13:59:20.000000000 +0100
 35.1190 +@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
 35.1191 + {
 35.1192 + 	if (skb->protocol == htons(ETH_P_IP) &&
 35.1193 + 	    skb->len > skb->dev->mtu &&
 35.1194 +-	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
 35.1195 ++	    !skb_shinfo(skb)->gso_size)
 35.1196 + 		return ip_fragment(skb, br_dev_queue_push_xmit);
 35.1197 + 	else
 35.1198 + 		return br_dev_queue_push_xmit(skb);
 35.1199 +diff -pruN ../orig-linux-2.6.16.29/net/core/dev.c ./net/core/dev.c
 35.1200 +--- ../orig-linux-2.6.16.29/net/core/dev.c	2006-09-12 19:02:10.000000000 +0100
 35.1201 ++++ ./net/core/dev.c	2006-09-19 13:59:20.000000000 +0100
 35.1202 +@@ -115,6 +115,7 @@
 35.1203 + #include <net/iw_handler.h>
 35.1204 + #endif	/* CONFIG_NET_RADIO */
 35.1205 + #include <asm/current.h>
 35.1206 ++#include <linux/err.h>
 35.1207 + 
 35.1208 + /*
 35.1209 +  *	The list of packet types we will receive (as opposed to discard)
 35.1210 +@@ -1032,7 +1033,7 @@ static inline void net_timestamp(struct 
 35.1211 +  *	taps currently in use.
 35.1212 +  */
 35.1213 + 
 35.1214 +-void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 35.1215 ++static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 35.1216 + {
 35.1217 + 	struct packet_type *ptype;
 35.1218 + 
 35.1219 +@@ -1106,6 +1107,45 @@ out:	
 35.1220 + 	return ret;
 35.1221 + }
 35.1222 + 
 35.1223 ++/**
 35.1224 ++ *	skb_gso_segment - Perform segmentation on skb.
 35.1225 ++ *	@skb: buffer to segment
 35.1226 ++ *	@features: features for the output path (see dev->features)
 35.1227 ++ *
 35.1228 ++ *	This function segments the given skb and returns a list of segments.
 35.1229 ++ *
 35.1230 ++ *	It may return NULL if the skb requires no segmentation.  This is
 35.1231 ++ *	only possible when GSO is used for verifying header integrity.
 35.1232 ++ */
 35.1233 ++struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
 35.1234 ++{
 35.1235 ++	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
 35.1236 ++	struct packet_type *ptype;
 35.1237 ++	int type = skb->protocol;
 35.1238 ++
 35.1239 ++	BUG_ON(skb_shinfo(skb)->frag_list);
 35.1240 ++	BUG_ON(skb->ip_summed != CHECKSUM_HW);
 35.1241 ++
 35.1242 ++	skb->mac.raw = skb->data;
 35.1243 ++	skb->mac_len = skb->nh.raw - skb->data;
 35.1244 ++	__skb_pull(skb, skb->mac_len);
 35.1245 ++
 35.1246 ++	rcu_read_lock();
 35.1247 ++	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
 35.1248 ++		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
 35.1249 ++			segs = ptype->gso_segment(skb, features);
 35.1250 ++			break;
 35.1251 ++		}
 35.1252 ++	}
 35.1253 ++	rcu_read_unlock();
 35.1254 ++
 35.1255 ++	__skb_push(skb, skb->data - skb->mac.raw);
 35.1256 ++
 35.1257 ++	return segs;
 35.1258 ++}
 35.1259 ++
 35.1260 ++EXPORT_SYMBOL(skb_gso_segment);
 35.1261 ++
 35.1262 + /* Take action when hardware reception checksum errors are detected. */
 35.1263 + #ifdef CONFIG_BUG
 35.1264 + void netdev_rx_csum_fault(struct net_device *dev)
 35.1265 +@@ -1142,75 +1182,108 @@ static inline int illegal_highdma(struct
 35.1266 + #define illegal_highdma(dev, skb)	(0)
 35.1267 + #endif
 35.1268 + 
 35.1269 +-/* Keep head the same: replace data */
 35.1270 +-int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
 35.1271 ++struct dev_gso_cb {
 35.1272 ++	void (*destructor)(struct sk_buff *skb);
 35.1273 ++};
 35.1274 ++
 35.1275 ++#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
 35.1276 ++
 35.1277 ++static void dev_gso_skb_destructor(struct sk_buff *skb)
 35.1278 ++{
 35.1279 ++	struct dev_gso_cb *cb;
 35.1280 ++
 35.1281 ++	do {
 35.1282 ++		struct sk_buff *nskb = skb->next;
 35.1283 ++
 35.1284 ++		skb->next = nskb->next;
 35.1285 ++		nskb->next = NULL;
 35.1286 ++		kfree_skb(nskb);
 35.1287 ++	} while (skb->next);
 35.1288 ++
 35.1289 ++	cb = DEV_GSO_CB(skb);
 35.1290 ++	if (cb->destructor)
 35.1291 ++		cb->destructor(skb);
 35.1292 ++}
 35.1293 ++
 35.1294 ++/**
 35.1295 ++ *	dev_gso_segment - Perform emulated hardware segmentation on skb.
 35.1296 ++ *	@skb: buffer to segment
 35.1297 ++ *
 35.1298 ++ *	This function segments the given skb and stores the list of segments
 35.1299