direct-io.hg

changeset 10515:6e7027a2abca

[NET]: Added GSO support

Imported GSO patch.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
author kaf24@firebug.cl.cam.ac.uk
date Wed Jun 28 12:03:01 2006 +0100 (2006-06-28)
parents b217e03e1db5
children 2eac0e15e0c2
files linux-2.6-xen-sparse/include/linux/skbuff.h linux-2.6-xen-sparse/net/core/dev.c linux-2.6-xen-sparse/net/core/skbuff.c patches/linux-2.6.16.13/net-gso.patch
line diff
     1.1 --- a/linux-2.6-xen-sparse/include/linux/skbuff.h	Wed Jun 28 10:32:43 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/include/linux/skbuff.h	Wed Jun 28 12:03:01 2006 +0100
     1.3 @@ -134,9 +134,10 @@ struct skb_frag_struct {
     1.4  struct skb_shared_info {
     1.5  	atomic_t	dataref;
     1.6  	unsigned short	nr_frags;
     1.7 -	unsigned short	tso_size;
     1.8 -	unsigned short	tso_segs;
     1.9 -	unsigned short  ufo_size;
    1.10 +	unsigned short	gso_size;
    1.11 +	/* Warning: this field is not always filled in (UFO)! */
    1.12 +	unsigned short	gso_segs;
    1.13 +	unsigned short  gso_type;
    1.14  	unsigned int    ip6_frag_id;
    1.15  	struct sk_buff	*frag_list;
    1.16  	skb_frag_t	frags[MAX_SKB_FRAGS];
    1.17 @@ -168,6 +169,14 @@ enum {
    1.18  	SKB_FCLONE_CLONE,
    1.19  };
    1.20  
    1.21 +enum {
    1.22 +	SKB_GSO_TCPV4 = 1 << 0,
    1.23 +	SKB_GSO_UDPV4 = 1 << 1,
    1.24 +
    1.25 +	/* This indicates the skb is from an untrusted source. */
    1.26 +	SKB_GSO_DODGY = 1 << 2,
    1.27 +};
    1.28 +
    1.29  /** 
    1.30   *	struct sk_buff - socket buffer
    1.31   *	@next: Next buffer in list
    1.32 @@ -1157,18 +1166,34 @@ static inline int skb_can_coalesce(struc
    1.33  	return 0;
    1.34  }
    1.35  
    1.36 +static inline int __skb_linearize(struct sk_buff *skb)
    1.37 +{
    1.38 +	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
    1.39 +}
    1.40 +
    1.41  /**
    1.42   *	skb_linearize - convert paged skb to linear one
    1.43   *	@skb: buffer to linarize
    1.44 - *	@gfp: allocation mode
    1.45   *
    1.46   *	If there is no free memory -ENOMEM is returned, otherwise zero
    1.47   *	is returned and the old skb data released.
    1.48   */
    1.49 -extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
    1.50 -static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
    1.51 +static inline int skb_linearize(struct sk_buff *skb)
    1.52  {
    1.53 -	return __skb_linearize(skb, gfp);
    1.54 +	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
    1.55 +}
    1.56 +
    1.57 +/**
    1.58 + *	skb_linearize_cow - make sure skb is linear and writable
    1.59 + *	@skb: buffer to process
    1.60 + *
    1.61 + *	If there is no free memory -ENOMEM is returned, otherwise zero
    1.62 + *	is returned and the old skb data released.
    1.63 + */
    1.64 +static inline int skb_linearize_cow(struct sk_buff *skb)
    1.65 +{
    1.66 +	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
    1.67 +	       __skb_linearize(skb) : 0;
    1.68  }
    1.69  
    1.70  /**
    1.71 @@ -1263,6 +1288,7 @@ extern void	       skb_split(struct sk_b
    1.72  				 struct sk_buff *skb1, const u32 len);
    1.73  
    1.74  extern void	       skb_release_data(struct sk_buff *skb);
    1.75 +extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
    1.76  
    1.77  static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
    1.78  				       int len, void *buffer)
     2.1 --- a/linux-2.6-xen-sparse/net/core/dev.c	Wed Jun 28 10:32:43 2006 +0100
     2.2 +++ b/linux-2.6-xen-sparse/net/core/dev.c	Wed Jun 28 12:03:01 2006 +0100
     2.3 @@ -115,6 +115,7 @@
     2.4  #include <net/iw_handler.h>
     2.5  #endif	/* CONFIG_NET_RADIO */
     2.6  #include <asm/current.h>
     2.7 +#include <linux/err.h>
     2.8  
     2.9  #ifdef CONFIG_XEN
    2.10  #include <net/ip.h>
    2.11 @@ -1038,7 +1039,7 @@ static inline void net_timestamp(struct 
    2.12   *	taps currently in use.
    2.13   */
    2.14  
    2.15 -void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
    2.16 +static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
    2.17  {
    2.18  	struct packet_type *ptype;
    2.19  
    2.20 @@ -1112,6 +1113,45 @@ out:
    2.21  	return ret;
    2.22  }
    2.23  
    2.24 +/**
    2.25 + *	skb_gso_segment - Perform segmentation on skb.
    2.26 + *	@skb: buffer to segment
    2.27 + *	@features: features for the output path (see dev->features)
    2.28 + *
    2.29 + *	This function segments the given skb and returns a list of segments.
    2.30 + *
    2.31 + *	It may return NULL if the skb requires no segmentation.  This is
    2.32 + *	only possible when GSO is used for verifying header integrity.
    2.33 + */
    2.34 +struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
    2.35 +{
    2.36 +	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
    2.37 +	struct packet_type *ptype;
    2.38 +	int type = skb->protocol;
    2.39 +
    2.40 +	BUG_ON(skb_shinfo(skb)->frag_list);
    2.41 +	BUG_ON(skb->ip_summed != CHECKSUM_HW);
    2.42 +
    2.43 +	skb->mac.raw = skb->data;
    2.44 +	skb->mac_len = skb->nh.raw - skb->data;
    2.45 +	__skb_pull(skb, skb->mac_len);
    2.46 +
    2.47 +	rcu_read_lock();
    2.48 +	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
    2.49 +		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
    2.50 +			segs = ptype->gso_segment(skb, features);
    2.51 +			break;
    2.52 +		}
    2.53 +	}
    2.54 +	rcu_read_unlock();
    2.55 +
    2.56 +	__skb_push(skb, skb->data - skb->mac.raw);
    2.57 +
    2.58 +	return segs;
    2.59 +}
    2.60 +
    2.61 +EXPORT_SYMBOL(skb_gso_segment);
    2.62 +
    2.63  /* Take action when hardware reception checksum errors are detected. */
    2.64  #ifdef CONFIG_BUG
    2.65  void netdev_rx_csum_fault(struct net_device *dev)
    2.66 @@ -1148,75 +1188,108 @@ static inline int illegal_highdma(struct
    2.67  #define illegal_highdma(dev, skb)	(0)
    2.68  #endif
    2.69  
    2.70 -/* Keep head the same: replace data */
    2.71 -int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
    2.72 +struct dev_gso_cb {
    2.73 +	void (*destructor)(struct sk_buff *skb);
    2.74 +};
    2.75 +
    2.76 +#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
    2.77 +
    2.78 +static void dev_gso_skb_destructor(struct sk_buff *skb)
    2.79 +{
    2.80 +	struct dev_gso_cb *cb;
    2.81 +
    2.82 +	do {
    2.83 +		struct sk_buff *nskb = skb->next;
    2.84 +
    2.85 +		skb->next = nskb->next;
    2.86 +		nskb->next = NULL;
    2.87 +		kfree_skb(nskb);
    2.88 +	} while (skb->next);
    2.89 +
    2.90 +	cb = DEV_GSO_CB(skb);
    2.91 +	if (cb->destructor)
    2.92 +		cb->destructor(skb);
    2.93 +}
    2.94 +
    2.95 +/**
    2.96 + *	dev_gso_segment - Perform emulated hardware segmentation on skb.
    2.97 + *	@skb: buffer to segment
    2.98 + *
    2.99 + *	This function segments the given skb and stores the list of segments
   2.100 + *	in skb->next.
   2.101 + */
   2.102 +static int dev_gso_segment(struct sk_buff *skb)
   2.103  {
   2.104 -	unsigned int size;
   2.105 -	u8 *data;
   2.106 -	long offset;
   2.107 -	struct skb_shared_info *ninfo;
   2.108 -	int headerlen = skb->data - skb->head;
   2.109 -	int expand = (skb->tail + skb->data_len) - skb->end;
   2.110 -
   2.111 -	if (skb_shared(skb))
   2.112 -		BUG();
   2.113 -
   2.114 -	if (expand <= 0)
   2.115 -		expand = 0;
   2.116 -
   2.117 -	size = skb->end - skb->head + expand;
   2.118 -	size = SKB_DATA_ALIGN(size);
   2.119 -	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
   2.120 -	if (!data)
   2.121 -		return -ENOMEM;
   2.122 -
   2.123 -	/* Copy entire thing */
   2.124 -	if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
   2.125 -		BUG();
   2.126 -
   2.127 -	/* Set up shinfo */
   2.128 -	ninfo = (struct skb_shared_info*)(data + size);
   2.129 -	atomic_set(&ninfo->dataref, 1);
   2.130 -	ninfo->tso_size = skb_shinfo(skb)->tso_size;
   2.131 -	ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
   2.132 -	ninfo->nr_frags = 0;
   2.133 -	ninfo->frag_list = NULL;
   2.134 -
   2.135 -	/* Offset between the two in bytes */
   2.136 -	offset = data - skb->head;
   2.137 -
   2.138 -	/* Free old data. */
   2.139 -	skb_release_data(skb);
   2.140 -
   2.141 -	skb->head = data;
   2.142 -	skb->end  = data + size;
   2.143 -
   2.144 -	/* Set up new pointers */
   2.145 -	skb->h.raw   += offset;
   2.146 -	skb->nh.raw  += offset;
   2.147 -	skb->mac.raw += offset;
   2.148 -	skb->tail    += offset;
   2.149 -	skb->data    += offset;
   2.150 -
   2.151 -	/* We are no longer a clone, even if we were. */
   2.152 -	skb->cloned    = 0;
   2.153 -
   2.154 -	skb->tail     += skb->data_len;
   2.155 -	skb->data_len  = 0;
   2.156 +	struct net_device *dev = skb->dev;
   2.157 +	struct sk_buff *segs;
   2.158 +	int features = dev->features & ~(illegal_highdma(dev, skb) ?
   2.159 +					 NETIF_F_SG : 0);
   2.160 +
   2.161 +	segs = skb_gso_segment(skb, features);
   2.162 +
   2.163 +	/* Verifying header integrity only. */
   2.164 +	if (!segs)
   2.165 +		return 0;
   2.166 +
   2.167 +	if (unlikely(IS_ERR(segs)))
   2.168 +		return PTR_ERR(segs);
   2.169 +
   2.170 +	skb->next = segs;
   2.171 +	DEV_GSO_CB(skb)->destructor = skb->destructor;
   2.172 +	skb->destructor = dev_gso_skb_destructor;
   2.173 +
   2.174 +	return 0;
   2.175 +}
   2.176 +
   2.177 +int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
   2.178 +{
   2.179 +	if (likely(!skb->next)) {
   2.180 +		if (netdev_nit)
   2.181 +			dev_queue_xmit_nit(skb, dev);
   2.182 +
   2.183 +		if (netif_needs_gso(dev, skb)) {
   2.184 +			if (unlikely(dev_gso_segment(skb)))
   2.185 +				goto out_kfree_skb;
   2.186 +			if (skb->next)
   2.187 +				goto gso;
   2.188 +		}
   2.189 +
   2.190 +		return dev->hard_start_xmit(skb, dev);
   2.191 +	}
   2.192 +
   2.193 +gso:
   2.194 +	do {
   2.195 +		struct sk_buff *nskb = skb->next;
   2.196 +		int rc;
   2.197 +
   2.198 +		skb->next = nskb->next;
   2.199 +		nskb->next = NULL;
   2.200 +		rc = dev->hard_start_xmit(nskb, dev);
   2.201 +		if (unlikely(rc)) {
   2.202 +			nskb->next = skb->next;
   2.203 +			skb->next = nskb;
   2.204 +			return rc;
   2.205 +		}
   2.206 +		if (unlikely(netif_queue_stopped(dev) && skb->next))
   2.207 +			return NETDEV_TX_BUSY;
   2.208 +	} while (skb->next);
   2.209 +	
   2.210 +	skb->destructor = DEV_GSO_CB(skb)->destructor;
   2.211 +
   2.212 +out_kfree_skb:
   2.213 +	kfree_skb(skb);
   2.214  	return 0;
   2.215  }
   2.216  
   2.217  #define HARD_TX_LOCK(dev, cpu) {			\
   2.218  	if ((dev->features & NETIF_F_LLTX) == 0) {	\
   2.219 -		spin_lock(&dev->xmit_lock);		\
   2.220 -		dev->xmit_lock_owner = cpu;		\
   2.221 +		netif_tx_lock(dev);			\
   2.222  	}						\
   2.223  }
   2.224  
   2.225  #define HARD_TX_UNLOCK(dev) {				\
   2.226  	if ((dev->features & NETIF_F_LLTX) == 0) {	\
   2.227 -		dev->xmit_lock_owner = -1;		\
   2.228 -		spin_unlock(&dev->xmit_lock);		\
   2.229 +		netif_tx_unlock(dev);			\
   2.230  	}						\
   2.231  }
   2.232  
   2.233 @@ -1289,9 +1362,19 @@ int dev_queue_xmit(struct sk_buff *skb)
   2.234  	struct Qdisc *q;
   2.235  	int rc = -ENOMEM;
   2.236  
   2.237 + 	/* If a checksum-deferred packet is forwarded to a device that needs a
   2.238 + 	 * checksum, correct the pointers and force checksumming.
   2.239 + 	 */
   2.240 + 	if (skb_checksum_setup(skb))
   2.241 + 		goto out_kfree_skb;
   2.242 +
   2.243 +	/* GSO will handle the following emulations directly. */
   2.244 +	if (netif_needs_gso(dev, skb))
   2.245 +		goto gso;
   2.246 +
   2.247  	if (skb_shinfo(skb)->frag_list &&
   2.248  	    !(dev->features & NETIF_F_FRAGLIST) &&
   2.249 -	    __skb_linearize(skb, GFP_ATOMIC))
   2.250 +	    __skb_linearize(skb))
   2.251  		goto out_kfree_skb;
   2.252  
   2.253  	/* Fragmented skb is linearized if device does not support SG,
   2.254 @@ -1300,31 +1383,26 @@ int dev_queue_xmit(struct sk_buff *skb)
   2.255  	 */
   2.256  	if (skb_shinfo(skb)->nr_frags &&
   2.257  	    (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
   2.258 -	    __skb_linearize(skb, GFP_ATOMIC))
   2.259 +	    __skb_linearize(skb))
   2.260  		goto out_kfree_skb;
   2.261  
   2.262 - 	/* If a checksum-deferred packet is forwarded to a device that needs a
   2.263 - 	 * checksum, correct the pointers and force checksumming.
   2.264 - 	 */
   2.265 - 	if(skb_checksum_setup(skb))
   2.266 - 		goto out_kfree_skb;
   2.267 -  
   2.268  	/* If packet is not checksummed and device does not support
   2.269  	 * checksumming for this protocol, complete checksumming here.
   2.270  	 */
   2.271  	if (skb->ip_summed == CHECKSUM_HW &&
   2.272 -	    (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
   2.273 +	    (!(dev->features & NETIF_F_GEN_CSUM) &&
   2.274  	     (!(dev->features & NETIF_F_IP_CSUM) ||
   2.275  	      skb->protocol != htons(ETH_P_IP))))
   2.276  	      	if (skb_checksum_help(skb, 0))
   2.277  	      		goto out_kfree_skb;
   2.278  
   2.279 +gso:
   2.280  	spin_lock_prefetch(&dev->queue_lock);
   2.281  
   2.282  	/* Disable soft irqs for various locks below. Also 
   2.283  	 * stops preemption for RCU. 
   2.284  	 */
   2.285 -	local_bh_disable(); 
   2.286 +	rcu_read_lock_bh(); 
   2.287  
   2.288  	/* Updates of qdisc are serialized by queue_lock. 
   2.289  	 * The struct Qdisc which is pointed to by qdisc is now a 
   2.290 @@ -1358,8 +1436,8 @@ int dev_queue_xmit(struct sk_buff *skb)
   2.291  	/* The device has no queue. Common case for software devices:
   2.292  	   loopback, all the sorts of tunnels...
   2.293  
   2.294 -	   Really, it is unlikely that xmit_lock protection is necessary here.
   2.295 -	   (f.e. loopback and IP tunnels are clean ignoring statistics
   2.296 +	   Really, it is unlikely that netif_tx_lock protection is necessary
   2.297 +	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
   2.298  	   counters.)
   2.299  	   However, it is possible, that they rely on protection
   2.300  	   made by us here.
   2.301 @@ -1375,11 +1453,8 @@ int dev_queue_xmit(struct sk_buff *skb)
   2.302  			HARD_TX_LOCK(dev, cpu);
   2.303  
   2.304  			if (!netif_queue_stopped(dev)) {
   2.305 -				if (netdev_nit)
   2.306 -					dev_queue_xmit_nit(skb, dev);
   2.307 -
   2.308  				rc = 0;
   2.309 -				if (!dev->hard_start_xmit(skb, dev)) {
   2.310 +				if (!dev_hard_start_xmit(skb, dev)) {
   2.311  					HARD_TX_UNLOCK(dev);
   2.312  					goto out;
   2.313  				}
   2.314 @@ -1398,13 +1473,13 @@ int dev_queue_xmit(struct sk_buff *skb)
   2.315  	}
   2.316  
   2.317  	rc = -ENETDOWN;
   2.318 -	local_bh_enable();
   2.319 +	rcu_read_unlock_bh();
   2.320  
   2.321  out_kfree_skb:
   2.322  	kfree_skb(skb);
   2.323  	return rc;
   2.324  out:
   2.325 -	local_bh_enable();
   2.326 +	rcu_read_unlock_bh();
   2.327  	return rc;
   2.328  }
   2.329  
   2.330 @@ -2732,7 +2807,7 @@ int register_netdevice(struct net_device
   2.331  	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
   2.332  
   2.333  	spin_lock_init(&dev->queue_lock);
   2.334 -	spin_lock_init(&dev->xmit_lock);
   2.335 +	spin_lock_init(&dev->_xmit_lock);
   2.336  	dev->xmit_lock_owner = -1;
   2.337  #ifdef CONFIG_NET_CLS_ACT
   2.338  	spin_lock_init(&dev->ingress_lock);
   2.339 @@ -2776,9 +2851,7 @@ int register_netdevice(struct net_device
   2.340  
   2.341  	/* Fix illegal SG+CSUM combinations. */
   2.342  	if ((dev->features & NETIF_F_SG) &&
   2.343 -	    !(dev->features & (NETIF_F_IP_CSUM |
   2.344 -			       NETIF_F_NO_CSUM |
   2.345 -			       NETIF_F_HW_CSUM))) {
   2.346 +	    !(dev->features & NETIF_F_ALL_CSUM)) {
   2.347  		printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
   2.348  		       dev->name);
   2.349  		dev->features &= ~NETIF_F_SG;
   2.350 @@ -3330,7 +3403,6 @@ subsys_initcall(net_dev_init);
   2.351  EXPORT_SYMBOL(__dev_get_by_index);
   2.352  EXPORT_SYMBOL(__dev_get_by_name);
   2.353  EXPORT_SYMBOL(__dev_remove_pack);
   2.354 -EXPORT_SYMBOL(__skb_linearize);
   2.355  EXPORT_SYMBOL(dev_valid_name);
   2.356  EXPORT_SYMBOL(dev_add_pack);
   2.357  EXPORT_SYMBOL(dev_alloc_name);
     3.1 --- a/linux-2.6-xen-sparse/net/core/skbuff.c	Wed Jun 28 10:32:43 2006 +0100
     3.2 +++ b/linux-2.6-xen-sparse/net/core/skbuff.c	Wed Jun 28 12:03:01 2006 +0100
     3.3 @@ -165,9 +165,9 @@ struct sk_buff *__alloc_skb(unsigned int
     3.4  	shinfo = skb_shinfo(skb);
     3.5  	atomic_set(&shinfo->dataref, 1);
     3.6  	shinfo->nr_frags  = 0;
     3.7 -	shinfo->tso_size = 0;
     3.8 -	shinfo->tso_segs = 0;
     3.9 -	shinfo->ufo_size = 0;
    3.10 +	shinfo->gso_size = 0;
    3.11 +	shinfo->gso_segs = 0;
    3.12 +	shinfo->gso_type = 0;
    3.13  	shinfo->ip6_frag_id = 0;
    3.14  	shinfo->frag_list = NULL;
    3.15  
    3.16 @@ -237,9 +237,9 @@ struct sk_buff *alloc_skb_from_cache(kme
    3.17  	shinfo = skb_shinfo(skb);
    3.18  	atomic_set(&shinfo->dataref, 1);
    3.19  	shinfo->nr_frags  = 0;
    3.20 -	shinfo->tso_size = 0;
    3.21 -	shinfo->tso_segs = 0;
    3.22 -	shinfo->ufo_size = 0;
    3.23 +	shinfo->gso_size = 0;
    3.24 +	shinfo->gso_segs = 0;
    3.25 +	shinfo->gso_type = 0;
    3.26  	shinfo->ip6_frag_id = 0;
    3.27  	shinfo->frag_list = NULL;
    3.28  
    3.29 @@ -524,8 +524,9 @@ static void copy_skb_header(struct sk_bu
    3.30  	new->tc_index	= old->tc_index;
    3.31  #endif
    3.32  	atomic_set(&new->users, 1);
    3.33 -	skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
    3.34 -	skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
    3.35 +	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
    3.36 +	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
    3.37 +	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
    3.38  }
    3.39  
    3.40  /**
    3.41 @@ -1800,6 +1801,133 @@ int skb_append_datato_frags(struct sock 
    3.42  	return 0;
    3.43  }
    3.44  
    3.45 +/**
    3.46 + *	skb_segment - Perform protocol segmentation on skb.
    3.47 + *	@skb: buffer to segment
    3.48 + *	@features: features for the output path (see dev->features)
    3.49 + *
    3.50 + *	This function performs segmentation on the given skb.  It returns
    3.51 + *	the segment at the given position.  It returns NULL if there are
    3.52 + *	no more segments to generate, or when an error is encountered.
    3.53 + */
    3.54 +struct sk_buff *skb_segment(struct sk_buff *skb, int features)
    3.55 +{
    3.56 +	struct sk_buff *segs = NULL;
    3.57 +	struct sk_buff *tail = NULL;
    3.58 +	unsigned int mss = skb_shinfo(skb)->gso_size;
    3.59 +	unsigned int doffset = skb->data - skb->mac.raw;
    3.60 +	unsigned int offset = doffset;
    3.61 +	unsigned int headroom;
    3.62 +	unsigned int len;
    3.63 +	int sg = features & NETIF_F_SG;
    3.64 +	int nfrags = skb_shinfo(skb)->nr_frags;
    3.65 +	int err = -ENOMEM;
    3.66 +	int i = 0;
    3.67 +	int pos;
    3.68 +
    3.69 +	__skb_push(skb, doffset);
    3.70 +	headroom = skb_headroom(skb);
    3.71 +	pos = skb_headlen(skb);
    3.72 +
    3.73 +	do {
    3.74 +		struct sk_buff *nskb;
    3.75 +		skb_frag_t *frag;
    3.76 +		int hsize, nsize;
    3.77 +		int k;
    3.78 +		int size;
    3.79 +
    3.80 +		len = skb->len - offset;
    3.81 +		if (len > mss)
    3.82 +			len = mss;
    3.83 +
    3.84 +		hsize = skb_headlen(skb) - offset;
    3.85 +		if (hsize < 0)
    3.86 +			hsize = 0;
    3.87 +		nsize = hsize + doffset;
    3.88 +		if (nsize > len + doffset || !sg)
    3.89 +			nsize = len + doffset;
    3.90 +
    3.91 +		nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
    3.92 +		if (unlikely(!nskb))
    3.93 +			goto err;
    3.94 +
    3.95 +		if (segs)
    3.96 +			tail->next = nskb;
    3.97 +		else
    3.98 +			segs = nskb;
    3.99 +		tail = nskb;
   3.100 +
   3.101 +		nskb->dev = skb->dev;
   3.102 +		nskb->priority = skb->priority;
   3.103 +		nskb->protocol = skb->protocol;
   3.104 +		nskb->dst = dst_clone(skb->dst);
   3.105 +		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
   3.106 +		nskb->pkt_type = skb->pkt_type;
   3.107 +		nskb->mac_len = skb->mac_len;
   3.108 +
   3.109 +		skb_reserve(nskb, headroom);
   3.110 +		nskb->mac.raw = nskb->data;
   3.111 +		nskb->nh.raw = nskb->data + skb->mac_len;
   3.112 +		nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
   3.113 +		memcpy(skb_put(nskb, doffset), skb->data, doffset);
   3.114 +
   3.115 +		if (!sg) {
   3.116 +			nskb->csum = skb_copy_and_csum_bits(skb, offset,
   3.117 +							    skb_put(nskb, len),
   3.118 +							    len, 0);
   3.119 +			continue;
   3.120 +		}
   3.121 +
   3.122 +		frag = skb_shinfo(nskb)->frags;
   3.123 +		k = 0;
   3.124 +
   3.125 +		nskb->ip_summed = CHECKSUM_HW;
   3.126 +		nskb->csum = skb->csum;
   3.127 +		memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
   3.128 +
   3.129 +		while (pos < offset + len) {
   3.130 +			BUG_ON(i >= nfrags);
   3.131 +
   3.132 +			*frag = skb_shinfo(skb)->frags[i];
   3.133 +			get_page(frag->page);
   3.134 +			size = frag->size;
   3.135 +
   3.136 +			if (pos < offset) {
   3.137 +				frag->page_offset += offset - pos;
   3.138 +				frag->size -= offset - pos;
   3.139 +			}
   3.140 +
   3.141 +			k++;
   3.142 +
   3.143 +			if (pos + size <= offset + len) {
   3.144 +				i++;
   3.145 +				pos += size;
   3.146 +			} else {
   3.147 +				frag->size -= pos + size - (offset + len);
   3.148 +				break;
   3.149 +			}
   3.150 +
   3.151 +			frag++;
   3.152 +		}
   3.153 +
   3.154 +		skb_shinfo(nskb)->nr_frags = k;
   3.155 +		nskb->data_len = len - hsize;
   3.156 +		nskb->len += nskb->data_len;
   3.157 +		nskb->truesize += nskb->data_len;
   3.158 +	} while ((offset += len) < skb->len);
   3.159 +
   3.160 +	return segs;
   3.161 +
   3.162 +err:
   3.163 +	while ((skb = segs)) {
   3.164 +		segs = skb->next;
   3.165 +		kfree(skb);
   3.166 +	}
   3.167 +	return ERR_PTR(err);
   3.168 +}
   3.169 +
   3.170 +EXPORT_SYMBOL_GPL(skb_segment);
   3.171 +
   3.172  void __init skb_init(void)
   3.173  {
   3.174  	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/patches/linux-2.6.16.13/net-gso.patch	Wed Jun 28 12:03:01 2006 +0100
     4.3 @@ -0,0 +1,2907 @@
     4.4 +diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
     4.5 +index 3c0a5ba..847cedb 100644
     4.6 +--- a/Documentation/networking/netdevices.txt
     4.7 ++++ b/Documentation/networking/netdevices.txt
     4.8 +@@ -42,9 +42,9 @@ dev->get_stats:
     4.9 + 	Context: nominally process, but don't sleep inside an rwlock
    4.10 + 
    4.11 + dev->hard_start_xmit:
    4.12 +-	Synchronization: dev->xmit_lock spinlock.
    4.13 ++	Synchronization: netif_tx_lock spinlock.
    4.14 + 	When the driver sets NETIF_F_LLTX in dev->features this will be
    4.15 +-	called without holding xmit_lock. In this case the driver 
    4.16 ++	called without holding netif_tx_lock. In this case the driver
    4.17 + 	has to lock by itself when needed. It is recommended to use a try lock
    4.18 + 	for this and return -1 when the spin lock fails. 
    4.19 + 	The locking there should also properly protect against 
    4.20 +@@ -62,12 +62,12 @@ dev->hard_start_xmit:
    4.21 + 	  Only valid when NETIF_F_LLTX is set.
    4.22 + 
    4.23 + dev->tx_timeout:
    4.24 +-	Synchronization: dev->xmit_lock spinlock.
    4.25 ++	Synchronization: netif_tx_lock spinlock.
    4.26 + 	Context: BHs disabled
    4.27 + 	Notes: netif_queue_stopped() is guaranteed true
    4.28 + 
    4.29 + dev->set_multicast_list:
    4.30 +-	Synchronization: dev->xmit_lock spinlock.
    4.31 ++	Synchronization: netif_tx_lock spinlock.
    4.32 + 	Context: BHs disabled
    4.33 + 
    4.34 + dev->poll:
    4.35 +diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
    4.36 +index 4be9769..2e7cac7 100644
    4.37 +--- a/drivers/block/aoe/aoenet.c
    4.38 ++++ b/drivers/block/aoe/aoenet.c
    4.39 +@@ -95,9 +95,8 @@ mac_addr(char addr[6])
    4.40 + static struct sk_buff *
    4.41 + skb_check(struct sk_buff *skb)
    4.42 + {
    4.43 +-	if (skb_is_nonlinear(skb))
    4.44 + 	if ((skb = skb_share_check(skb, GFP_ATOMIC)))
    4.45 +-	if (skb_linearize(skb, GFP_ATOMIC) < 0) {
    4.46 ++	if (skb_linearize(skb)) {
    4.47 + 		dev_kfree_skb(skb);
    4.48 + 		return NULL;
    4.49 + 	}
    4.50 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
    4.51 +index a2408d7..c90e620 100644
    4.52 +--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
    4.53 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
    4.54 +@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_
    4.55 + 
    4.56 + 	ipoib_mcast_stop_thread(dev, 0);
    4.57 + 
    4.58 +-	spin_lock_irqsave(&dev->xmit_lock, flags);
    4.59 ++	local_irq_save(flags);
    4.60 ++	netif_tx_lock(dev);
    4.61 + 	spin_lock(&priv->lock);
    4.62 + 
    4.63 + 	/*
    4.64 +@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_
    4.65 + 	}
    4.66 + 
    4.67 + 	spin_unlock(&priv->lock);
    4.68 +-	spin_unlock_irqrestore(&dev->xmit_lock, flags);
    4.69 ++	netif_tx_unlock(dev);
    4.70 ++	local_irq_restore(flags);
    4.71 + 
    4.72 + 	/* We have to cancel outside of the spinlock */
    4.73 + 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
    4.74 +diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
    4.75 +index 6711eb6..8d2351f 100644
    4.76 +--- a/drivers/media/dvb/dvb-core/dvb_net.c
    4.77 ++++ b/drivers/media/dvb/dvb-core/dvb_net.c
    4.78 +@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void 
    4.79 + 
    4.80 + 	dvb_net_feed_stop(dev);
    4.81 + 	priv->rx_mode = RX_MODE_UNI;
    4.82 +-	spin_lock_bh(&dev->xmit_lock);
    4.83 ++	netif_tx_lock_bh(dev);
    4.84 + 
    4.85 + 	if (dev->flags & IFF_PROMISC) {
    4.86 + 		dprintk("%s: promiscuous mode\n", dev->name);
    4.87 +@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void 
    4.88 + 		}
    4.89 + 	}
    4.90 + 
    4.91 +-	spin_unlock_bh(&dev->xmit_lock);
    4.92 ++	netif_tx_unlock_bh(dev);
    4.93 + 	dvb_net_feed_start(dev);
    4.94 + }
    4.95 + 
    4.96 +diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
    4.97 +index dd41049..6615583 100644
    4.98 +--- a/drivers/net/8139cp.c
    4.99 ++++ b/drivers/net/8139cp.c
   4.100 +@@ -794,7 +794,7 @@ #endif
   4.101 + 	entry = cp->tx_head;
   4.102 + 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
   4.103 + 	if (dev->features & NETIF_F_TSO)
   4.104 +-		mss = skb_shinfo(skb)->tso_size;
   4.105 ++		mss = skb_shinfo(skb)->gso_size;
   4.106 + 
   4.107 + 	if (skb_shinfo(skb)->nr_frags == 0) {
   4.108 + 		struct cp_desc *txd = &cp->tx_ring[entry];
   4.109 +diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
   4.110 +index a24200d..b5e39a1 100644
   4.111 +--- a/drivers/net/bnx2.c
   4.112 ++++ b/drivers/net/bnx2.c
   4.113 +@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
   4.114 + 		skb = tx_buf->skb;
   4.115 + #ifdef BCM_TSO 
   4.116 + 		/* partial BD completions possible with TSO packets */
   4.117 +-		if (skb_shinfo(skb)->tso_size) {
   4.118 ++		if (skb_shinfo(skb)->gso_size) {
   4.119 + 			u16 last_idx, last_ring_idx;
   4.120 + 
   4.121 + 			last_idx = sw_cons +
   4.122 +@@ -1948,7 +1948,7 @@ bnx2_poll(struct net_device *dev, int *b
   4.123 + 	return 1;
   4.124 + }
   4.125 + 
   4.126 +-/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
   4.127 ++/* Called with rtnl_lock from vlan functions and also netif_tx_lock
   4.128 +  * from set_multicast.
   4.129 +  */
   4.130 + static void
   4.131 +@@ -4403,7 +4403,7 @@ bnx2_vlan_rx_kill_vid(struct net_device 
   4.132 + }
   4.133 + #endif
   4.134 + 
   4.135 +-/* Called with dev->xmit_lock.
   4.136 ++/* Called with netif_tx_lock.
   4.137 +  * hard_start_xmit is pseudo-lockless - a lock is only required when
   4.138 +  * the tx queue is full. This way, we get the benefit of lockless
   4.139 +  * operations most of the time without the complexities to handle
   4.140 +@@ -4441,7 +4441,7 @@ bnx2_start_xmit(struct sk_buff *skb, str
   4.141 + 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
   4.142 + 	}
   4.143 + #ifdef BCM_TSO 
   4.144 +-	if ((mss = skb_shinfo(skb)->tso_size) &&
   4.145 ++	if ((mss = skb_shinfo(skb)->gso_size) &&
   4.146 + 		(skb->len > (bp->dev->mtu + ETH_HLEN))) {
   4.147 + 		u32 tcp_opt_len, ip_tcp_len;
   4.148 + 
   4.149 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
   4.150 +index bcf9f17..e970921 100644
   4.151 +--- a/drivers/net/bonding/bond_main.c
   4.152 ++++ b/drivers/net/bonding/bond_main.c
   4.153 +@@ -1145,8 +1145,7 @@ int bond_sethwaddr(struct net_device *bo
   4.154 + }
   4.155 + 
   4.156 + #define BOND_INTERSECT_FEATURES \
   4.157 +-	(NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\
   4.158 +-	NETIF_F_TSO|NETIF_F_UFO)
   4.159 ++	(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
   4.160 + 
   4.161 + /* 
   4.162 +  * Compute the common dev->feature set available to all slaves.  Some
   4.163 +@@ -1164,9 +1163,7 @@ static int bond_compute_features(struct 
   4.164 + 		features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
   4.165 + 
   4.166 + 	if ((features & NETIF_F_SG) && 
   4.167 +-	    !(features & (NETIF_F_IP_CSUM |
   4.168 +-			  NETIF_F_NO_CSUM |
   4.169 +-			  NETIF_F_HW_CSUM)))
   4.170 ++	    !(features & NETIF_F_ALL_CSUM))
   4.171 + 		features &= ~NETIF_F_SG;
   4.172 + 
   4.173 + 	/* 
   4.174 +@@ -4147,7 +4144,7 @@ static int bond_init(struct net_device *
   4.175 + 	 */
   4.176 + 	bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
   4.177 + 
   4.178 +-	/* don't acquire bond device's xmit_lock when 
   4.179 ++	/* don't acquire bond device's netif_tx_lock when
   4.180 + 	 * transmitting */
   4.181 + 	bond_dev->features |= NETIF_F_LLTX;
   4.182 + 
   4.183 +diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
   4.184 +index 30ff8ea..7b7d360 100644
   4.185 +--- a/drivers/net/chelsio/sge.c
   4.186 ++++ b/drivers/net/chelsio/sge.c
   4.187 +@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
   4.188 + 	struct cpl_tx_pkt *cpl;
   4.189 + 
   4.190 + #ifdef NETIF_F_TSO
   4.191 +-	if (skb_shinfo(skb)->tso_size) {
   4.192 ++	if (skb_shinfo(skb)->gso_size) {
   4.193 + 		int eth_type;
   4.194 + 		struct cpl_tx_pkt_lso *hdr;
   4.195 + 
   4.196 +@@ -1434,7 +1434,7 @@ #ifdef NETIF_F_TSO
   4.197 + 		hdr->ip_hdr_words = skb->nh.iph->ihl;
   4.198 + 		hdr->tcp_hdr_words = skb->h.th->doff;
   4.199 + 		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
   4.200 +-						skb_shinfo(skb)->tso_size));
   4.201 ++						skb_shinfo(skb)->gso_size));
   4.202 + 		hdr->len = htonl(skb->len - sizeof(*hdr));
   4.203 + 		cpl = (struct cpl_tx_pkt *)hdr;
   4.204 + 		sge->stats.tx_lso_pkts++;
   4.205 +diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
   4.206 +index fa29402..681d284 100644
   4.207 +--- a/drivers/net/e1000/e1000_main.c
   4.208 ++++ b/drivers/net/e1000/e1000_main.c
   4.209 +@@ -2526,7 +2526,7 @@ #ifdef NETIF_F_TSO
   4.210 + 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
   4.211 + 	int err;
   4.212 + 
   4.213 +-	if (skb_shinfo(skb)->tso_size) {
   4.214 ++	if (skb_shinfo(skb)->gso_size) {
   4.215 + 		if (skb_header_cloned(skb)) {
   4.216 + 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   4.217 + 			if (err)
   4.218 +@@ -2534,7 +2534,7 @@ #ifdef NETIF_F_TSO
   4.219 + 		}
   4.220 + 
   4.221 + 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
   4.222 +-		mss = skb_shinfo(skb)->tso_size;
   4.223 ++		mss = skb_shinfo(skb)->gso_size;
   4.224 + 		if (skb->protocol == ntohs(ETH_P_IP)) {
   4.225 + 			skb->nh.iph->tot_len = 0;
   4.226 + 			skb->nh.iph->check = 0;
   4.227 +@@ -2651,7 +2651,7 @@ #ifdef NETIF_F_TSO
   4.228 + 		 * tso gets written back prematurely before the data is fully
   4.229 + 		 * DMAd to the controller */
   4.230 + 		if (!skb->data_len && tx_ring->last_tx_tso &&
   4.231 +-				!skb_shinfo(skb)->tso_size) {
   4.232 ++				!skb_shinfo(skb)->gso_size) {
   4.233 + 			tx_ring->last_tx_tso = 0;
   4.234 + 			size -= 4;
   4.235 + 		}
   4.236 +@@ -2893,7 +2893,7 @@ #endif
   4.237 + 	}
   4.238 + 
   4.239 + #ifdef NETIF_F_TSO
   4.240 +-	mss = skb_shinfo(skb)->tso_size;
   4.241 ++	mss = skb_shinfo(skb)->gso_size;
   4.242 + 	/* The controller does a simple calculation to 
   4.243 + 	 * make sure there is enough room in the FIFO before
   4.244 + 	 * initiating the DMA for each buffer.  The calc is:
   4.245 +@@ -2935,7 +2935,7 @@ #endif
   4.246 + #ifdef NETIF_F_TSO
   4.247 + 	/* Controller Erratum workaround */
   4.248 + 	if (!skb->data_len && tx_ring->last_tx_tso &&
   4.249 +-		!skb_shinfo(skb)->tso_size)
   4.250 ++		!skb_shinfo(skb)->gso_size)
   4.251 + 		count++;
   4.252 + #endif
   4.253 + 
   4.254 +diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
   4.255 +index 3682ec6..c35f16e 100644
   4.256 +--- a/drivers/net/forcedeth.c
   4.257 ++++ b/drivers/net/forcedeth.c
   4.258 +@@ -482,9 +482,9 @@ #define LPA_1000HALF	0x0400
   4.259 +  * critical parts:
   4.260 +  * - rx is (pseudo-) lockless: it relies on the single-threading provided
   4.261 +  *	by the arch code for interrupts.
   4.262 +- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
   4.263 ++ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
   4.264 +  *	needs dev->priv->lock :-(
   4.265 +- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
   4.266 ++ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
   4.267 +  */
   4.268 + 
   4.269 + /* in dev: base, irq */
   4.270 +@@ -1016,7 +1016,7 @@ static void drain_ring(struct net_device
   4.271 + 
   4.272 + /*
   4.273 +  * nv_start_xmit: dev->hard_start_xmit function
   4.274 +- * Called with dev->xmit_lock held.
   4.275 ++ * Called with netif_tx_lock held.
   4.276 +  */
   4.277 + static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
   4.278 + {
   4.279 +@@ -1105,8 +1105,8 @@ static int nv_start_xmit(struct sk_buff 
   4.280 + 	np->tx_skbuff[nr] = skb;
   4.281 + 
   4.282 + #ifdef NETIF_F_TSO
   4.283 +-	if (skb_shinfo(skb)->tso_size)
   4.284 +-		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
   4.285 ++	if (skb_shinfo(skb)->gso_size)
   4.286 ++		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
   4.287 + 	else
   4.288 + #endif
   4.289 + 	tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
   4.290 +@@ -1203,7 +1203,7 @@ static void nv_tx_done(struct net_device
   4.291 + 
   4.292 + /*
   4.293 +  * nv_tx_timeout: dev->tx_timeout function
   4.294 +- * Called with dev->xmit_lock held.
   4.295 ++ * Called with netif_tx_lock held.
   4.296 +  */
   4.297 + static void nv_tx_timeout(struct net_device *dev)
   4.298 + {
   4.299 +@@ -1524,7 +1524,7 @@ static int nv_change_mtu(struct net_devi
   4.300 + 		 * Changing the MTU is a rare event, it shouldn't matter.
   4.301 + 		 */
   4.302 + 		disable_irq(dev->irq);
   4.303 +-		spin_lock_bh(&dev->xmit_lock);
   4.304 ++		netif_tx_lock_bh(dev);
   4.305 + 		spin_lock(&np->lock);
   4.306 + 		/* stop engines */
   4.307 + 		nv_stop_rx(dev);
   4.308 +@@ -1559,7 +1559,7 @@ static int nv_change_mtu(struct net_devi
   4.309 + 		nv_start_rx(dev);
   4.310 + 		nv_start_tx(dev);
   4.311 + 		spin_unlock(&np->lock);
   4.312 +-		spin_unlock_bh(&dev->xmit_lock);
   4.313 ++		netif_tx_unlock_bh(dev);
   4.314 + 		enable_irq(dev->irq);
   4.315 + 	}
   4.316 + 	return 0;
   4.317 +@@ -1594,7 +1594,7 @@ static int nv_set_mac_address(struct net
   4.318 + 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
   4.319 + 
   4.320 + 	if (netif_running(dev)) {
   4.321 +-		spin_lock_bh(&dev->xmit_lock);
   4.322 ++		netif_tx_lock_bh(dev);
   4.323 + 		spin_lock_irq(&np->lock);
   4.324 + 
   4.325 + 		/* stop rx engine */
   4.326 +@@ -1606,7 +1606,7 @@ static int nv_set_mac_address(struct net
   4.327 + 		/* restart rx engine */
   4.328 + 		nv_start_rx(dev);
   4.329 + 		spin_unlock_irq(&np->lock);
   4.330 +-		spin_unlock_bh(&dev->xmit_lock);
   4.331 ++		netif_tx_unlock_bh(dev);
   4.332 + 	} else {
   4.333 + 		nv_copy_mac_to_hw(dev);
   4.334 + 	}
   4.335 +@@ -1615,7 +1615,7 @@ static int nv_set_mac_address(struct net
   4.336 + 
   4.337 + /*
   4.338 +  * nv_set_multicast: dev->set_multicast function
   4.339 +- * Called with dev->xmit_lock held.
   4.340 ++ * Called with netif_tx_lock held.
   4.341 +  */
   4.342 + static void nv_set_multicast(struct net_device *dev)
   4.343 + {
   4.344 +diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
   4.345 +index 102c1f0..d12605f 100644
   4.346 +--- a/drivers/net/hamradio/6pack.c
   4.347 ++++ b/drivers/net/hamradio/6pack.c
   4.348 +@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net
   4.349 + {
   4.350 + 	struct sockaddr_ax25 *sa = addr;
   4.351 + 
   4.352 +-	spin_lock_irq(&dev->xmit_lock);
   4.353 ++	netif_tx_lock_bh(dev);
   4.354 + 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
   4.355 +-	spin_unlock_irq(&dev->xmit_lock);
   4.356 ++	netif_tx_unlock_bh(dev);
   4.357 + 
   4.358 + 	return 0;
   4.359 + }
   4.360 +@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_stru
   4.361 + 			break;
   4.362 + 		}
   4.363 + 
   4.364 +-		spin_lock_irq(&dev->xmit_lock);
   4.365 ++		netif_tx_lock_bh(dev);
   4.366 + 		memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
   4.367 +-		spin_unlock_irq(&dev->xmit_lock);
   4.368 ++		netif_tx_unlock_bh(dev);
   4.369 + 
   4.370 + 		err = 0;
   4.371 + 		break;
   4.372 +diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
   4.373 +index dc5e9d5..5c66f5a 100644
   4.374 +--- a/drivers/net/hamradio/mkiss.c
   4.375 ++++ b/drivers/net/hamradio/mkiss.c
   4.376 +@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net
   4.377 + {
   4.378 + 	struct sockaddr_ax25 *sa = addr;
   4.379 + 
   4.380 +-	spin_lock_irq(&dev->xmit_lock);
   4.381 ++	netif_tx_lock_bh(dev);
   4.382 + 	memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
   4.383 +-	spin_unlock_irq(&dev->xmit_lock);
   4.384 ++	netif_tx_unlock_bh(dev);
   4.385 + 
   4.386 + 	return 0;
   4.387 + }
   4.388 +@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct
   4.389 + 			break;
   4.390 + 		}
   4.391 + 
   4.392 +-		spin_lock_irq(&dev->xmit_lock);
   4.393 ++		netif_tx_lock_bh(dev);
   4.394 + 		memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
   4.395 +-		spin_unlock_irq(&dev->xmit_lock);
   4.396 ++		netif_tx_unlock_bh(dev);
   4.397 + 
   4.398 + 		err = 0;
   4.399 + 		break;
   4.400 +diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
   4.401 +index 31fb2d7..2e222ef 100644
   4.402 +--- a/drivers/net/ifb.c
   4.403 ++++ b/drivers/net/ifb.c
   4.404 +@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev
   4.405 + 	dp->st_task_enter++;
   4.406 + 	if ((skb = skb_peek(&dp->tq)) == NULL) {
   4.407 + 		dp->st_txq_refl_try++;
   4.408 +-		if (spin_trylock(&_dev->xmit_lock)) {
   4.409 ++		if (netif_tx_trylock(_dev)) {
   4.410 + 			dp->st_rxq_enter++;
   4.411 + 			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
   4.412 + 				skb_queue_tail(&dp->tq, skb);
   4.413 + 				dp->st_rx2tx_tran++;
   4.414 + 			}
   4.415 +-			spin_unlock(&_dev->xmit_lock);
   4.416 ++			netif_tx_unlock(_dev);
   4.417 + 		} else {
   4.418 + 			/* reschedule */
   4.419 + 			dp->st_rxq_notenter++;
   4.420 +@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev
   4.421 + 		}
   4.422 + 	}
   4.423 + 
   4.424 +-	if (spin_trylock(&_dev->xmit_lock)) {
   4.425 ++	if (netif_tx_trylock(_dev)) {
   4.426 + 		dp->st_rxq_check++;
   4.427 + 		if ((skb = skb_peek(&dp->rq)) == NULL) {
   4.428 + 			dp->tasklet_pending = 0;
   4.429 +@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev
   4.430 + 				netif_wake_queue(_dev);
   4.431 + 		} else {
   4.432 + 			dp->st_rxq_rsch++;
   4.433 +-			spin_unlock(&_dev->xmit_lock);
   4.434 ++			netif_tx_unlock(_dev);
   4.435 + 			goto resched;
   4.436 + 		}
   4.437 +-		spin_unlock(&_dev->xmit_lock);
   4.438 ++		netif_tx_unlock(_dev);
   4.439 + 	} else {
   4.440 + resched:
   4.441 + 		dp->tasklet_pending = 1;
   4.442 +diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
   4.443 +index a9f49f0..339d4a7 100644
   4.444 +--- a/drivers/net/irda/vlsi_ir.c
   4.445 ++++ b/drivers/net/irda/vlsi_ir.c
   4.446 +@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct s
   4.447 + 			    ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
   4.448 + 			    	break;
   4.449 + 			udelay(100);
   4.450 +-			/* must not sleep here - we are called under xmit_lock! */
   4.451 ++			/* must not sleep here - called under netif_tx_lock! */
   4.452 + 		}
   4.453 + 	}
   4.454 + 
   4.455 +diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
   4.456 +index f9f77e4..bdab369 100644
   4.457 +--- a/drivers/net/ixgb/ixgb_main.c
   4.458 ++++ b/drivers/net/ixgb/ixgb_main.c
   4.459 +@@ -1163,7 +1163,7 @@ #ifdef NETIF_F_TSO
   4.460 + 	uint16_t ipcse, tucse, mss;
   4.461 + 	int err;
   4.462 + 
   4.463 +-	if(likely(skb_shinfo(skb)->tso_size)) {
   4.464 ++	if(likely(skb_shinfo(skb)->gso_size)) {
   4.465 + 		if (skb_header_cloned(skb)) {
   4.466 + 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
   4.467 + 			if (err)
   4.468 +@@ -1171,7 +1171,7 @@ #ifdef NETIF_F_TSO
   4.469 + 		}
   4.470 + 
   4.471 + 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
   4.472 +-		mss = skb_shinfo(skb)->tso_size;
   4.473 ++		mss = skb_shinfo(skb)->gso_size;
   4.474 + 		skb->nh.iph->tot_len = 0;
   4.475 + 		skb->nh.iph->check = 0;
   4.476 + 		skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
   4.477 +diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
   4.478 +index 690a1aa..9bcaa80 100644
   4.479 +--- a/drivers/net/loopback.c
   4.480 ++++ b/drivers/net/loopback.c
   4.481 +@@ -74,7 +74,7 @@ static void emulate_large_send_offload(s
   4.482 + 	struct iphdr *iph = skb->nh.iph;
   4.483 + 	struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
   4.484 + 	unsigned int doffset = (iph->ihl + th->doff) * 4;
   4.485 +-	unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
   4.486 ++	unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
   4.487 + 	unsigned int offset = 0;
   4.488 + 	u32 seq = ntohl(th->seq);
   4.489 + 	u16 id  = ntohs(iph->id);
   4.490 +@@ -139,7 +139,7 @@ #ifndef LOOPBACK_MUST_CHECKSUM
   4.491 + #endif
   4.492 + 
   4.493 + #ifdef LOOPBACK_TSO
   4.494 +-	if (skb_shinfo(skb)->tso_size) {
   4.495 ++	if (skb_shinfo(skb)->gso_size) {
   4.496 + 		BUG_ON(skb->protocol != htons(ETH_P_IP));
   4.497 + 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
   4.498 + 
   4.499 +diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
   4.500 +index c0998ef..0fac9d5 100644
   4.501 +--- a/drivers/net/mv643xx_eth.c
   4.502 ++++ b/drivers/net/mv643xx_eth.c
   4.503 +@@ -1107,7 +1107,7 @@ static int mv643xx_eth_start_xmit(struct
   4.504 + 
   4.505 + #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
   4.506 + 	if (has_tiny_unaligned_frags(skb)) {
   4.507 +-		if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
   4.508 ++		if (__skb_linearize(skb)) {
   4.509 + 			stats->tx_dropped++;
   4.510 + 			printk(KERN_DEBUG "%s: failed to linearize tiny "
   4.511 + 					"unaligned fragment\n", dev->name);
   4.512 +diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
   4.513 +index 9d6d254..c9ed624 100644
   4.514 +--- a/drivers/net/natsemi.c
   4.515 ++++ b/drivers/net/natsemi.c
   4.516 +@@ -323,12 +323,12 @@ performance critical codepaths:
   4.517 + The rx process only runs in the interrupt handler. Access from outside
   4.518 + the interrupt handler is only permitted after disable_irq().
   4.519 + 
   4.520 +-The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
   4.521 ++The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
   4.522 + is set, then access is permitted under spin_lock_irq(&np->lock).
   4.523 + 
   4.524 + Thus configuration functions that want to access everything must call
   4.525 + 	disable_irq(dev->irq);
   4.526 +-	spin_lock_bh(dev->xmit_lock);
   4.527 ++	netif_tx_lock_bh(dev);
   4.528 + 	spin_lock_irq(&np->lock);
   4.529 + 
   4.530 + IV. Notes
   4.531 +diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
   4.532 +index 8cc0d0b..e53b313 100644
   4.533 +--- a/drivers/net/r8169.c
   4.534 ++++ b/drivers/net/r8169.c
   4.535 +@@ -2171,7 +2171,7 @@ static int rtl8169_xmit_frags(struct rtl
   4.536 + static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
   4.537 + {
   4.538 + 	if (dev->features & NETIF_F_TSO) {
   4.539 +-		u32 mss = skb_shinfo(skb)->tso_size;
   4.540 ++		u32 mss = skb_shinfo(skb)->gso_size;
   4.541 + 
   4.542 + 		if (mss)
   4.543 + 			return LargeSend | ((mss & MSSMask) << MSSShift);
   4.544 +diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
   4.545 +index b7f00d6..439f45f 100644
   4.546 +--- a/drivers/net/s2io.c
   4.547 ++++ b/drivers/net/s2io.c
   4.548 +@@ -3522,8 +3522,8 @@ #endif
   4.549 + 	txdp->Control_1 = 0;
   4.550 + 	txdp->Control_2 = 0;
   4.551 + #ifdef NETIF_F_TSO
   4.552 +-	mss = skb_shinfo(skb)->tso_size;
   4.553 +-	if (mss) {
   4.554 ++	mss = skb_shinfo(skb)->gso_size;
   4.555 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
   4.556 + 		txdp->Control_1 |= TXD_TCP_LSO_EN;
   4.557 + 		txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
   4.558 + 	}
   4.559 +@@ -3543,10 +3543,10 @@ #endif
   4.560 + 	}
   4.561 + 
   4.562 + 	frg_len = skb->len - skb->data_len;
   4.563 +-	if (skb_shinfo(skb)->ufo_size) {
   4.564 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
   4.565 + 		int ufo_size;
   4.566 + 
   4.567 +-		ufo_size = skb_shinfo(skb)->ufo_size;
   4.568 ++		ufo_size = skb_shinfo(skb)->gso_size;
   4.569 + 		ufo_size &= ~7;
   4.570 + 		txdp->Control_1 |= TXD_UFO_EN;
   4.571 + 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
   4.572 +@@ -3572,7 +3572,7 @@ #endif
   4.573 + 	txdp->Host_Control = (unsigned long) skb;
   4.574 + 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
   4.575 + 
   4.576 +-	if (skb_shinfo(skb)->ufo_size)
   4.577 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   4.578 + 		txdp->Control_1 |= TXD_UFO_EN;
   4.579 + 
   4.580 + 	frg_cnt = skb_shinfo(skb)->nr_frags;
   4.581 +@@ -3587,12 +3587,12 @@ #endif
   4.582 + 		    (sp->pdev, frag->page, frag->page_offset,
   4.583 + 		     frag->size, PCI_DMA_TODEVICE);
   4.584 + 		txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
   4.585 +-		if (skb_shinfo(skb)->ufo_size)
   4.586 ++		if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   4.587 + 			txdp->Control_1 |= TXD_UFO_EN;
   4.588 + 	}
   4.589 + 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
   4.590 + 
   4.591 +-	if (skb_shinfo(skb)->ufo_size)
   4.592 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   4.593 + 		frg_cnt++; /* as Txd0 was used for inband header */
   4.594 + 
   4.595 + 	tx_fifo = mac_control->tx_FIFO_start[queue];
   4.596 +@@ -3606,7 +3606,7 @@ #ifdef NETIF_F_TSO
   4.597 + 	if (mss)
   4.598 + 		val64 |= TX_FIFO_SPECIAL_FUNC;
   4.599 + #endif
   4.600 +-	if (skb_shinfo(skb)->ufo_size)
   4.601 ++	if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
   4.602 + 		val64 |= TX_FIFO_SPECIAL_FUNC;
   4.603 + 	writeq(val64, &tx_fifo->List_Control);
   4.604 + 
   4.605 +diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
   4.606 +index 0618cd5..2a55eb3 100644
   4.607 +--- a/drivers/net/sky2.c
   4.608 ++++ b/drivers/net/sky2.c
   4.609 +@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
   4.610 + 	count = sizeof(dma_addr_t) / sizeof(u32);
   4.611 + 	count += skb_shinfo(skb)->nr_frags * count;
   4.612 + 
   4.613 +-	if (skb_shinfo(skb)->tso_size)
   4.614 ++	if (skb_shinfo(skb)->gso_size)
   4.615 + 		++count;
   4.616 + 
   4.617 + 	if (skb->ip_summed == CHECKSUM_HW)
   4.618 +@@ -1197,7 +1197,7 @@ static int sky2_xmit_frame(struct sk_buf
   4.619 + 	}
   4.620 + 
   4.621 + 	/* Check for TCP Segmentation Offload */
   4.622 +-	mss = skb_shinfo(skb)->tso_size;
   4.623 ++	mss = skb_shinfo(skb)->gso_size;
   4.624 + 	if (mss != 0) {
   4.625 + 		/* just drop the packet if non-linear expansion fails */
   4.626 + 		if (skb_header_cloned(skb) &&
   4.627 +diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
   4.628 +index caf4102..fc9164a 100644
   4.629 +--- a/drivers/net/tg3.c
   4.630 ++++ b/drivers/net/tg3.c
   4.631 +@@ -3664,7 +3664,7 @@ static int tg3_start_xmit(struct sk_buff
   4.632 + #if TG3_TSO_SUPPORT != 0
   4.633 + 	mss = 0;
   4.634 + 	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
   4.635 +-	    (mss = skb_shinfo(skb)->tso_size) != 0) {
   4.636 ++	    (mss = skb_shinfo(skb)->gso_size) != 0) {
   4.637 + 		int tcp_opt_len, ip_tcp_len;
   4.638 + 
   4.639 + 		if (skb_header_cloned(skb) &&
   4.640 +diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
   4.641 +index 5b1af39..11de5af 100644
   4.642 +--- a/drivers/net/tulip/winbond-840.c
   4.643 ++++ b/drivers/net/tulip/winbond-840.c
   4.644 +@@ -1605,11 +1605,11 @@ #ifdef CONFIG_PM
   4.645 +  * - get_stats:
   4.646 +  * 	spin_lock_irq(np->lock), doesn't touch hw if not present
   4.647 +  * - hard_start_xmit:
   4.648 +- * 	netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
   4.649 ++ * 	synchronize_irq + netif_tx_disable;
   4.650 +  * - tx_timeout:
   4.651 +- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
   4.652 ++ * 	netif_device_detach + netif_tx_disable;
   4.653 +  * - set_multicast_list
   4.654 +- * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
   4.655 ++ * 	netif_device_detach + netif_tx_disable;
   4.656 +  * - interrupt handler
   4.657 +  * 	doesn't touch hw if not present, synchronize_irq waits for
   4.658 +  * 	running instances of the interrupt handler.
   4.659 +@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev 
   4.660 + 		netif_device_detach(dev);
   4.661 + 		update_csr6(dev, 0);
   4.662 + 		iowrite32(0, ioaddr + IntrEnable);
   4.663 +-		netif_stop_queue(dev);
   4.664 + 		spin_unlock_irq(&np->lock);
   4.665 + 
   4.666 +-		spin_unlock_wait(&dev->xmit_lock);
   4.667 + 		synchronize_irq(dev->irq);
   4.668 ++		netif_tx_disable(dev);
   4.669 + 	
   4.670 + 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
   4.671 + 
   4.672 +diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
   4.673 +index 4c76cb7..30c48c9 100644
   4.674 +--- a/drivers/net/typhoon.c
   4.675 ++++ b/drivers/net/typhoon.c
   4.676 +@@ -340,7 +340,7 @@ #define typhoon_synchronize_irq(x) synch
   4.677 + #endif
   4.678 + 
   4.679 + #if defined(NETIF_F_TSO)
   4.680 +-#define skb_tso_size(x)		(skb_shinfo(x)->tso_size)
   4.681 ++#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
   4.682 + #define TSO_NUM_DESCRIPTORS	2
   4.683 + #define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
   4.684 + #else
   4.685 +diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
   4.686 +index ed1f837..2eb6b5f 100644
   4.687 +--- a/drivers/net/via-velocity.c
   4.688 ++++ b/drivers/net/via-velocity.c
   4.689 +@@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff 
   4.690 + 
   4.691 + 	int pktlen = skb->len;
   4.692 + 
   4.693 ++#ifdef VELOCITY_ZERO_COPY_SUPPORT
   4.694 ++	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
   4.695 ++		kfree_skb(skb);
   4.696 ++		return 0;
   4.697 ++	}
   4.698 ++#endif
   4.699 ++
   4.700 + 	spin_lock_irqsave(&vptr->lock, flags);
   4.701 + 
   4.702 + 	index = vptr->td_curr[qnum];
   4.703 +@@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff 
   4.704 + 	 */
   4.705 + 	if (pktlen < ETH_ZLEN) {
   4.706 + 		/* Cannot occur until ZC support */
   4.707 +-		if(skb_linearize(skb, GFP_ATOMIC))
   4.708 +-			return 0; 
   4.709 + 		pktlen = ETH_ZLEN;
   4.710 + 		memcpy(tdinfo->buf, skb->data, skb->len);
   4.711 + 		memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
   4.712 +@@ -1933,7 +1938,6 @@ #ifdef VELOCITY_ZERO_COPY_SUPPORT
   4.713 + 		int nfrags = skb_shinfo(skb)->nr_frags;
   4.714 + 		tdinfo->skb = skb;
   4.715 + 		if (nfrags > 6) {
   4.716 +-			skb_linearize(skb, GFP_ATOMIC);
   4.717 + 			memcpy(tdinfo->buf, skb->data, skb->len);
   4.718 + 			tdinfo->skb_dma[0] = tdinfo->buf_dma;
   4.719 + 			td_ptr->tdesc0.pktsize = 
   4.720 +diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
   4.721 +index 6fd0bf7..75237c1 100644
   4.722 +--- a/drivers/net/wireless/orinoco.c
   4.723 ++++ b/drivers/net/wireless/orinoco.c
   4.724 +@@ -1835,7 +1835,9 @@ static int __orinoco_program_rids(struct
   4.725 + 	/* Set promiscuity / multicast*/
   4.726 + 	priv->promiscuous = 0;
   4.727 + 	priv->mc_count = 0;
   4.728 +-	__orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
   4.729 ++
   4.730 ++	/* FIXME: what about netif_tx_lock */
   4.731 ++	__orinoco_set_multicast_list(dev);
   4.732 + 
   4.733 + 	return 0;
   4.734 + }
   4.735 +diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
   4.736 +index 82cb4af..57cec40 100644
   4.737 +--- a/drivers/s390/net/qeth_eddp.c
   4.738 ++++ b/drivers/s390/net/qeth_eddp.c
   4.739 +@@ -421,7 +421,7 @@ #endif /* CONFIG_QETH_VLAN */
   4.740 +        }
   4.741 + 	tcph = eddp->skb->h.th;
   4.742 + 	while (eddp->skb_offset < eddp->skb->len) {
   4.743 +-		data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
   4.744 ++		data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
   4.745 + 			       (int)(eddp->skb->len - eddp->skb_offset));
   4.746 + 		/* prepare qdio hdr */
   4.747 + 		if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
   4.748 +@@ -516,20 +516,20 @@ qeth_eddp_calc_num_pages(struct qeth_edd
   4.749 + 	
   4.750 + 	QETH_DBF_TEXT(trace, 5, "eddpcanp");
   4.751 + 	/* can we put multiple skbs in one page? */
   4.752 +-	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
   4.753 ++	skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
   4.754 + 	if (skbs_per_page > 1){
   4.755 +-		ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
   4.756 ++		ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
   4.757 + 				 skbs_per_page + 1;
   4.758 + 		ctx->elements_per_skb = 1;
   4.759 + 	} else {
   4.760 + 		/* no -> how many elements per skb? */
   4.761 +-		ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
   4.762 ++		ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
   4.763 + 				     PAGE_SIZE) >> PAGE_SHIFT;
   4.764 + 		ctx->num_pages = ctx->elements_per_skb *
   4.765 +-				 (skb_shinfo(skb)->tso_segs + 1);
   4.766 ++				 (skb_shinfo(skb)->gso_segs + 1);
   4.767 + 	}
   4.768 + 	ctx->num_elements = ctx->elements_per_skb *
   4.769 +-			    (skb_shinfo(skb)->tso_segs + 1);
   4.770 ++			    (skb_shinfo(skb)->gso_segs + 1);
   4.771 + }
   4.772 + 
   4.773 + static inline struct qeth_eddp_context *
   4.774 +diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
   4.775 +index dba7f7f..d9cc997 100644
   4.776 +--- a/drivers/s390/net/qeth_main.c
   4.777 ++++ b/drivers/s390/net/qeth_main.c
   4.778 +@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
   4.779 + 	queue = card->qdio.out_qs
   4.780 + 		[qeth_get_priority_queue(card, skb, ipv, cast_type)];
   4.781 + 
   4.782 +-	if (skb_shinfo(skb)->tso_size)
   4.783 ++	if (skb_shinfo(skb)->gso_size)
   4.784 + 		large_send = card->options.large_send;
   4.785 + 
   4.786 + 	/*are we able to do TSO ? If so ,prepare and send it from here */
   4.787 +@@ -4501,7 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
   4.788 + 		card->stats.tx_packets++;
   4.789 + 		card->stats.tx_bytes += skb->len;
   4.790 + #ifdef CONFIG_QETH_PERF_STATS
   4.791 +-		if (skb_shinfo(skb)->tso_size &&
   4.792 ++		if (skb_shinfo(skb)->gso_size &&
   4.793 + 		   !(large_send == QETH_LARGE_SEND_NO)) {
   4.794 + 			card->perf_stats.large_send_bytes += skb->len;
   4.795 + 			card->perf_stats.large_send_cnt++;
   4.796 +diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
   4.797 +index 1286dde..89cbf34 100644
   4.798 +--- a/drivers/s390/net/qeth_tso.h
   4.799 ++++ b/drivers/s390/net/qeth_tso.h
   4.800 +@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *c
   4.801 + 	hdr->ext.hdr_version = 1;
   4.802 + 	hdr->ext.hdr_len     = 28;
   4.803 + 	/*insert non-fix values */
   4.804 +-	hdr->ext.mss = skb_shinfo(skb)->tso_size;
   4.805 ++	hdr->ext.mss = skb_shinfo(skb)->gso_size;
   4.806 + 	hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
   4.807 + 	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
   4.808 + 				       sizeof(struct qeth_hdr_tso));
   4.809 +diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
   4.810 +index 93535f0..9269df7 100644
   4.811 +--- a/include/linux/ethtool.h
   4.812 ++++ b/include/linux/ethtool.h
   4.813 +@@ -408,6 +408,8 @@ #define ETHTOOL_STSO		0x0000001f /* Set 
   4.814 + #define ETHTOOL_GPERMADDR	0x00000020 /* Get permanent hardware address */
   4.815 + #define ETHTOOL_GUFO		0x00000021 /* Get UFO enable (ethtool_value) */
   4.816 + #define ETHTOOL_SUFO		0x00000022 /* Set UFO enable (ethtool_value) */
   4.817 ++#define ETHTOOL_GGSO		0x00000023 /* Get GSO enable (ethtool_value) */
   4.818 ++#define ETHTOOL_SGSO		0x00000024 /* Set GSO enable (ethtool_value) */
   4.819 + 
   4.820 + /* compatibility with older code */
   4.821 + #define SPARC_ETH_GSET		ETHTOOL_GSET
   4.822 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
   4.823 +index 7fda03d..47b0965 100644
   4.824 +--- a/include/linux/netdevice.h
   4.825 ++++ b/include/linux/netdevice.h
   4.826 +@@ -230,7 +230,8 @@ enum netdev_state_t
   4.827 + 	__LINK_STATE_SCHED,
   4.828 + 	__LINK_STATE_NOCARRIER,
   4.829 + 	__LINK_STATE_RX_SCHED,
   4.830 +-	__LINK_STATE_LINKWATCH_PENDING
   4.831 ++	__LINK_STATE_LINKWATCH_PENDING,
   4.832 ++	__LINK_STATE_QDISC_RUNNING,
   4.833 + };
   4.834 + 
   4.835 + 
   4.836 +@@ -306,9 +307,17 @@ #define NETIF_F_HW_VLAN_TX	128	/* Transm
   4.837 + #define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */
   4.838 + #define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */
   4.839 + #define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */
   4.840 +-#define NETIF_F_TSO		2048	/* Can offload TCP/IP segmentation */
   4.841 ++#define NETIF_F_GSO		2048	/* Enable software GSO. */
   4.842 + #define NETIF_F_LLTX		4096	/* LockLess TX */
   4.843 +-#define NETIF_F_UFO             8192    /* Can offload UDP Large Send*/
   4.844 ++
   4.845 ++	/* Segmentation offload features */
   4.846 ++#define NETIF_F_GSO_SHIFT	16
   4.847 ++#define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
   4.848 ++#define NETIF_F_UFO		(SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
   4.849 ++#define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
   4.850 ++
   4.851 ++#define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
   4.852 ++#define NETIF_F_ALL_CSUM	(NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
   4.853 + 
   4.854 + 	struct net_device	*next_sched;
   4.855 + 
   4.856 +@@ -394,6 +403,9 @@ #define NETIF_F_UFO             8192    
   4.857 + 	struct list_head	qdisc_list;
   4.858 + 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
   4.859 + 
   4.860 ++	/* Partially transmitted GSO packet. */
   4.861 ++	struct sk_buff		*gso_skb;
   4.862 ++
   4.863 + 	/* ingress path synchronizer */
   4.864 + 	spinlock_t		ingress_lock;
   4.865 + 	struct Qdisc		*qdisc_ingress;
   4.866 +@@ -402,7 +414,7 @@ #define NETIF_F_UFO             8192    
   4.867 +  * One part is mostly used on xmit path (device)
   4.868 +  */
   4.869 + 	/* hard_start_xmit synchronizer */
   4.870 +-	spinlock_t		xmit_lock ____cacheline_aligned_in_smp;
   4.871 ++	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
   4.872 + 	/* cpu id of processor entered to hard_start_xmit or -1,
   4.873 + 	   if nobody entered there.
   4.874 + 	 */
   4.875 +@@ -527,6 +539,8 @@ struct packet_type {
   4.876 + 					 struct net_device *,
   4.877 + 					 struct packet_type *,
   4.878 + 					 struct net_device *);
   4.879 ++	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
   4.880 ++						int features);
   4.881 + 	void			*af_packet_priv;
   4.882 + 	struct list_head	list;
   4.883 + };
   4.884 +@@ -693,7 +707,8 @@ extern int		dev_change_name(struct net_d
   4.885 + extern int		dev_set_mtu(struct net_device *, int);
   4.886 + extern int		dev_set_mac_address(struct net_device *,
   4.887 + 					    struct sockaddr *);
   4.888 +-extern void		dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
   4.889 ++extern int		dev_hard_start_xmit(struct sk_buff *skb,
   4.890 ++					    struct net_device *dev);
   4.891 + 
   4.892 + extern void		dev_init(void);
   4.893 + 
   4.894 +@@ -900,11 +915,43 @@ static inline void __netif_rx_complete(s
   4.895 + 	clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
   4.896 + }
   4.897 + 
   4.898 ++static inline void netif_tx_lock(struct net_device *dev)
   4.899 ++{
   4.900 ++	spin_lock(&dev->_xmit_lock);
   4.901 ++	dev->xmit_lock_owner = smp_processor_id();
   4.902 ++}
   4.903 ++
   4.904 ++static inline void netif_tx_lock_bh(struct net_device *dev)
   4.905 ++{
   4.906 ++	spin_lock_bh(&dev->_xmit_lock);
   4.907 ++	dev->xmit_lock_owner = smp_processor_id();
   4.908 ++}
   4.909 ++
   4.910 ++static inline int netif_tx_trylock(struct net_device *dev)
   4.911 ++{
   4.912 ++	int err = spin_trylock(&dev->_xmit_lock);
   4.913 ++	if (!err)
   4.914 ++		dev->xmit_lock_owner = smp_processor_id();
   4.915 ++	return err;
   4.916 ++}
   4.917 ++
   4.918 ++static inline void netif_tx_unlock(struct net_device *dev)
   4.919 ++{
   4.920 ++	dev->xmit_lock_owner = -1;
   4.921 ++	spin_unlock(&dev->_xmit_lock);
   4.922 ++}
   4.923 ++
   4.924 ++static inline void netif_tx_unlock_bh(struct net_device *dev)
   4.925 ++{
   4.926 ++	dev->xmit_lock_owner = -1;
   4.927 ++	spin_unlock_bh(&dev->_xmit_lock);
   4.928 ++}
   4.929 ++
   4.930 + static inline void netif_tx_disable(struct net_device *dev)
   4.931 + {
   4.932 +-	spin_lock_bh(&dev->xmit_lock);
   4.933 ++	netif_tx_lock_bh(dev);
   4.934 + 	netif_stop_queue(dev);
   4.935 +-	spin_unlock_bh(&dev->xmit_lock);
   4.936 ++	netif_tx_unlock_bh(dev);
   4.937 + }
   4.938 + 
   4.939 + /* These functions live elsewhere (drivers/net/net_init.c, but related) */
   4.940 +@@ -932,6 +979,7 @@ extern int		netdev_max_backlog;
   4.941 + extern int		weight_p;
   4.942 + extern int		netdev_set_master(struct net_device *dev, struct net_device *master);
   4.943 + extern int skb_checksum_help(struct sk_buff *skb, int inward);
   4.944 ++extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
   4.945 + #ifdef CONFIG_BUG
   4.946 + extern void netdev_rx_csum_fault(struct net_device *dev);
   4.947 + #else
   4.948 +@@ -951,6 +999,18 @@ #endif
   4.949 + 
   4.950 + extern void linkwatch_run_queue(void);
   4.951 + 
   4.952 ++static inline int skb_gso_ok(struct sk_buff *skb, int features)
   4.953 ++{
   4.954 ++	int feature = skb_shinfo(skb)->gso_size ?
   4.955 ++		      skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
   4.956 ++	return (features & feature) == feature;
   4.957 ++}
   4.958 ++
   4.959 ++static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
   4.960 ++{
   4.961 ++	return !skb_gso_ok(skb, dev->features);
   4.962 ++}
   4.963 ++
   4.964 + #endif /* __KERNEL__ */
   4.965 + 
   4.966 + #endif	/* _LINUX_DEV_H */
   4.967 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
   4.968 +index ad7cc22..b19d45d 100644
   4.969 +--- a/include/linux/skbuff.h
   4.970 ++++ b/include/linux/skbuff.h
   4.971 +@@ -134,9 +134,10 @@ struct skb_frag_struct {
   4.972 + struct skb_shared_info {
   4.973 + 	atomic_t	dataref;
   4.974 + 	unsigned short	nr_frags;
   4.975 +-	unsigned short	tso_size;
   4.976 +-	unsigned short	tso_segs;
   4.977 +-	unsigned short  ufo_size;
   4.978 ++	unsigned short	gso_size;
   4.979 ++	/* Warning: this field is not always filled in (UFO)! */
   4.980 ++	unsigned short	gso_segs;
   4.981 ++	unsigned short  gso_type;
   4.982 + 	unsigned int    ip6_frag_id;
   4.983 + 	struct sk_buff	*frag_list;
   4.984 + 	skb_frag_t	frags[MAX_SKB_FRAGS];
   4.985 +@@ -168,6 +169,14 @@ enum {
   4.986 + 	SKB_FCLONE_CLONE,
   4.987 + };
   4.988 + 
   4.989 ++enum {
   4.990 ++	SKB_GSO_TCPV4 = 1 << 0,
   4.991 ++	SKB_GSO_UDPV4 = 1 << 1,
   4.992 ++
   4.993 ++	/* This indicates the skb is from an untrusted source. */
   4.994 ++	SKB_GSO_DODGY = 1 << 2,
   4.995 ++};
   4.996 ++
   4.997 + /** 
   4.998 +  *	struct sk_buff - socket buffer
   4.999 +  *	@next: Next buffer in list
  4.1000 +@@ -1148,18 +1157,34 @@ static inline int skb_can_coalesce(struc
  4.1001 + 	return 0;
  4.1002 + }
  4.1003 + 
  4.1004 ++static inline int __skb_linearize(struct sk_buff *skb)
  4.1005 ++{
  4.1006 ++	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
  4.1007 ++}
  4.1008 ++
  4.1009 + /**
  4.1010 +  *	skb_linearize - convert paged skb to linear one
  4.1011 +  *	@skb: buffer to linarize
  4.1012 +- *	@gfp: allocation mode
  4.1013 +  *
  4.1014 +  *	If there is no free memory -ENOMEM is returned, otherwise zero
  4.1015 +  *	is returned and the old skb data released.
  4.1016 +  */
  4.1017 +-extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
  4.1018 +-static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
  4.1019 ++static inline int skb_linearize(struct sk_buff *skb)
  4.1020 ++{
  4.1021 ++	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
  4.1022 ++}
  4.1023 ++
  4.1024 ++/**
  4.1025 ++ *	skb_linearize_cow - make sure skb is linear and writable
  4.1026 ++ *	@skb: buffer to process
  4.1027 ++ *
  4.1028 ++ *	If there is no free memory -ENOMEM is returned, otherwise zero
  4.1029 ++ *	is returned and the old skb data released.
  4.1030 ++ */
  4.1031 ++static inline int skb_linearize_cow(struct sk_buff *skb)
  4.1032 + {
  4.1033 +-	return __skb_linearize(skb, gfp);
  4.1034 ++	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
  4.1035 ++	       __skb_linearize(skb) : 0;
  4.1036 + }
  4.1037 + 
  4.1038 + /**
  4.1039 +@@ -1254,6 +1279,7 @@ extern void	       skb_split(struct sk_b
  4.1040 + 				 struct sk_buff *skb1, const u32 len);
  4.1041 + 
  4.1042 + extern void	       skb_release_data(struct sk_buff *skb);
  4.1043 ++extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
  4.1044 + 
  4.1045 + static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
  4.1046 + 				       int len, void *buffer)
  4.1047 +diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
  4.1048 +index b94d1ad..75b5b93 100644
  4.1049 +--- a/include/net/pkt_sched.h
  4.1050 ++++ b/include/net/pkt_sched.h
  4.1051 +@@ -218,12 +218,13 @@ extern struct qdisc_rate_table *qdisc_ge
  4.1052 + 		struct rtattr *tab);
  4.1053 + extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
  4.1054 + 
  4.1055 +-extern int qdisc_restart(struct net_device *dev);
  4.1056 ++extern void __qdisc_run(struct net_device *dev);
  4.1057 + 
  4.1058 + static inline void qdisc_run(struct net_device *dev)
  4.1059 + {
  4.1060 +-	while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
  4.1061 +-		/* NOTHING */;
  4.1062 ++	if (!netif_queue_stopped(dev) &&
  4.1063 ++	    !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
  4.1064 ++		__qdisc_run(dev);
  4.1065 + }
  4.1066 + 
  4.1067 + extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
  4.1068 +diff --git a/include/net/protocol.h b/include/net/protocol.h
  4.1069 +index 6dc5970..0d2dcdb 100644
  4.1070 +--- a/include/net/protocol.h
  4.1071 ++++ b/include/net/protocol.h
  4.1072 +@@ -37,6 +37,8 @@ #define MAX_INET_PROTOS	256		/* Must be 
  4.1073 + struct net_protocol {
  4.1074 + 	int			(*handler)(struct sk_buff *skb);
  4.1075 + 	void			(*err_handler)(struct sk_buff *skb, u32 info);
  4.1076 ++	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
  4.1077 ++					       int features);
  4.1078 + 	int			no_policy;
  4.1079 + };
  4.1080 + 
  4.1081 +diff --git a/include/net/sock.h b/include/net/sock.h
  4.1082 +index f63d0d5..a8e8d21 100644
  4.1083 +--- a/include/net/sock.h
  4.1084 ++++ b/include/net/sock.h
  4.1085 +@@ -1064,9 +1064,13 @@ static inline void sk_setup_caps(struct 
  4.1086 + {
  4.1087 + 	__sk_dst_set(sk, dst);
  4.1088 + 	sk->sk_route_caps = dst->dev->features;
  4.1089 ++	if (sk->sk_route_caps & NETIF_F_GSO)
  4.1090 ++		sk->sk_route_caps |= NETIF_F_TSO;
  4.1091 + 	if (sk->sk_route_caps & NETIF_F_TSO) {
  4.1092 + 		if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
  4.1093 + 			sk->sk_route_caps &= ~NETIF_F_TSO;
  4.1094 ++		else 
  4.1095 ++			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
  4.1096 + 	}
  4.1097 + }
  4.1098 + 
  4.1099 +diff --git a/include/net/tcp.h b/include/net/tcp.h
  4.1100 +index 77f21c6..70e1d5f 100644
  4.1101 +--- a/include/net/tcp.h
  4.1102 ++++ b/include/net/tcp.h
  4.1103 +@@ -552,13 +552,13 @@ #include <net/tcp_ecn.h>
  4.1104 +  */
  4.1105 + static inline int tcp_skb_pcount(const struct sk_buff *skb)
  4.1106 + {
  4.1107 +-	return skb_shinfo(skb)->tso_segs;
  4.1108 ++	return skb_shinfo(skb)->gso_segs;
  4.1109 + }
  4.1110 + 
  4.1111 + /* This is valid iff tcp_skb_pcount() > 1. */
  4.1112 + static inline int tcp_skb_mss(const struct sk_buff *skb)
  4.1113 + {
  4.1114 +-	return skb_shinfo(skb)->tso_size;
  4.1115 ++	return skb_shinfo(skb)->gso_size;
  4.1116 + }
  4.1117 + 
  4.1118 + static inline void tcp_dec_pcount_approx(__u32 *count,
  4.1119 +@@ -1063,6 +1063,8 @@ extern struct request_sock_ops tcp_reque
  4.1120 + 
  4.1121 + extern int tcp_v4_destroy_sock(struct sock *sk);
  4.1122 + 
  4.1123 ++extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
  4.1124 ++
  4.1125 + #ifdef CONFIG_PROC_FS
  4.1126 + extern int  tcp4_proc_init(void);
  4.1127 + extern void tcp4_proc_exit(void);
  4.1128 +diff --git a/net/atm/clip.c b/net/atm/clip.c
  4.1129 +index 1842a4e..6dc21a7 100644
  4.1130 +--- a/net/atm/clip.c
  4.1131 ++++ b/net/atm/clip.c
  4.1132 +@@ -101,7 +101,7 @@ static void unlink_clip_vcc(struct clip_
  4.1133 + 		printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc);
  4.1134 + 		return;
  4.1135 + 	}
  4.1136 +-	spin_lock_bh(&entry->neigh->dev->xmit_lock);	/* block clip_start_xmit() */
  4.1137 ++	netif_tx_lock_bh(entry->neigh->dev);	/* block clip_start_xmit() */
  4.1138 + 	entry->neigh->used = jiffies;
  4.1139 + 	for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
  4.1140 + 		if (*walk == clip_vcc) {
  4.1141 +@@ -125,7 +125,7 @@ static void unlink_clip_vcc(struct clip_
  4.1142 + 	printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
  4.1143 + 	  "0x%p)\n",entry,clip_vcc);
  4.1144 + out:
  4.1145 +-	spin_unlock_bh(&entry->neigh->dev->xmit_lock);
  4.1146 ++	netif_tx_unlock_bh(entry->neigh->dev);
  4.1147 + }
  4.1148 + 
  4.1149 + /* The neighbour entry n->lock is held. */
  4.1150 +diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
  4.1151 +index 0b33a7b..180e79b 100644
  4.1152 +--- a/net/bridge/br_device.c
  4.1153 ++++ b/net/bridge/br_device.c
  4.1154 +@@ -146,9 +146,9 @@ static int br_set_tx_csum(struct net_dev
  4.1155 + 	struct net_bridge *br = netdev_priv(dev);
  4.1156 + 
  4.1157 + 	if (data)
  4.1158 +-		br->feature_mask |= NETIF_F_IP_CSUM;
  4.1159 ++		br->feature_mask |= NETIF_F_NO_CSUM;
  4.1160 + 	else
  4.1161 +-		br->feature_mask &= ~NETIF_F_IP_CSUM;
  4.1162 ++		br->feature_mask &= ~NETIF_F_ALL_CSUM;
  4.1163 + 
  4.1164 + 	br_features_recompute(br);
  4.1165 + 	return 0;
  4.1166 +@@ -185,6 +185,6 @@ void br_dev_setup(struct net_device *dev
  4.1167 + 	dev->set_mac_address = br_set_mac_address;
  4.1168 + 	dev->priv_flags = IFF_EBRIDGE;
  4.1169 + 
  4.1170 +- 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
  4.1171 +- 		| NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM;
  4.1172 ++ 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
  4.1173 ++ 			NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
  4.1174 + }
  4.1175 +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
  4.1176 +index 2d24fb4..00b1128 100644
  4.1177 +--- a/net/bridge/br_forward.c
  4.1178 ++++ b/net/bridge/br_forward.c
  4.1179 +@@ -32,7 +32,7 @@ static inline int should_deliver(const s
  4.1180 + int br_dev_queue_push_xmit(struct sk_buff *skb)
  4.1181 + {
  4.1182 + 	/* drop mtu oversized packets except tso */
  4.1183 +-	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
  4.1184 ++	if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
  4.1185 + 		kfree_skb(skb);
  4.1186 + 	else {
  4.1187 + #ifdef CONFIG_BRIDGE_NETFILTER
  4.1188 +diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
  4.1189 +index f36b35e..0617146 100644
  4.1190 +--- a/net/bridge/br_if.c
  4.1191 ++++ b/net/bridge/br_if.c
  4.1192 +@@ -385,17 +385,28 @@ void br_features_recompute(struct net_br
  4.1193 + 	struct net_bridge_port *p;
  4.1194 + 	unsigned long features, checksum;
  4.1195 + 
  4.1196 +-	features = br->feature_mask &~ NETIF_F_IP_CSUM;
  4.1197 +-	checksum = br->feature_mask & NETIF_F_IP_CSUM;
  4.1198 ++	checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
  4.1199 ++	features = br->feature_mask & ~NETIF_F_ALL_CSUM;
  4.1200 + 
  4.1201 + 	list_for_each_entry(p, &br->port_list, list) {
  4.1202 +-		if (!(p->dev->features 
  4.1203 +-		      & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)))
  4.1204 ++		unsigned long feature = p->dev->features;
  4.1205 ++
  4.1206 ++		if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
  4.1207 ++			checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
  4.1208 ++		if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
  4.1209 ++			checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
  4.1210 ++		if (!(feature & NETIF_F_IP_CSUM))
  4.1211 + 			checksum = 0;
  4.1212 +-		features &= p->dev->features;
  4.1213 ++
  4.1214 ++		if (feature & NETIF_F_GSO)
  4.1215 ++			feature |= NETIF_F_TSO;
  4.1216 ++		feature |= NETIF_F_GSO;
  4.1217 ++
  4.1218 ++		features &= feature;
  4.1219 + 	}
  4.1220 + 
  4.1221 +-	br->dev->features = features | checksum | NETIF_F_LLTX;
  4.1222 ++	br->dev->features = features | checksum | NETIF_F_LLTX |
  4.1223 ++			    NETIF_F_GSO_ROBUST;
  4.1224 + }
  4.1225 + 
  4.1226 + /* called with RTNL */
  4.1227 +diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
  4.1228 +index 9e27373..588207f 100644
  4.1229 +--- a/net/bridge/br_netfilter.c
  4.1230 ++++ b/net/bridge/br_netfilter.c
  4.1231 +@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
  4.1232 + {
  4.1233 + 	if (skb->protocol == htons(ETH_P_IP) &&
  4.1234 + 	    skb->len > skb->dev->mtu &&
  4.1235 +-	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
  4.1236 ++	    !skb_shinfo(skb)->gso_size)
  4.1237 + 		return ip_fragment(skb, br_dev_queue_push_xmit);
  4.1238 + 	else
  4.1239 + 		return br_dev_queue_push_xmit(skb);
  4.1240 +diff --git a/net/core/dev.c b/net/core/dev.c
  4.1241 +index 12a214c..32e1056 100644
  4.1242 +--- a/net/core/dev.c
  4.1243 ++++ b/net/core/dev.c
  4.1244 +@@ -115,6 +115,7 @@ #include <linux/wireless.h>		/* Note : w
  4.1245 + #include <net/iw_handler.h>
  4.1246 + #endif	/* CONFIG_NET_RADIO */
  4.1247 + #include <asm/current.h>
  4.1248 ++#include <linux/err.h>
  4.1249 + 
  4.1250 + /*
  4.1251 +  *	The list of packet types we will receive (as opposed to discard)
  4.1252 +@@ -1032,7 +1033,7 @@ static inline void net_timestamp(struct 
  4.1253 +  *	taps currently in use.
  4.1254 +  */
  4.1255 + 
  4.1256 +-void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  4.1257 ++static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  4.1258 + {
  4.1259 + 	struct packet_type *ptype;
  4.1260 + 
  4.1261 +@@ -1106,6 +1107,45 @@ out:	
  4.1262 + 	return ret;
  4.1263 + }
  4.1264 + 
  4.1265 ++/**
  4.1266 ++ *	skb_gso_segment - Perform segmentation on skb.
  4.1267 ++ *	@skb: buffer to segment
  4.1268 ++ *	@features: features for the output path (see dev->features)
  4.1269 ++ *
  4.1270 ++ *	This function segments the given skb and returns a list of segments.
  4.1271 ++ *
  4.1272 ++ *	It may return NULL if the skb requires no segmentation.  This is
  4.1273 ++ *	only possible when GSO is used for verifying header integrity.
  4.1274 ++ */
  4.1275 ++struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
  4.1276 ++{
  4.1277 ++	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  4.1278 ++	struct packet_type *ptype;
  4.1279 ++	int type = skb->protocol;
  4.1280 ++
  4.1281 ++	BUG_ON(skb_shinfo(skb)->frag_list);
  4.1282 ++	BUG_ON(skb->ip_summed != CHECKSUM_HW);
  4.1283 ++
  4.1284 ++	skb->mac.raw = skb->data;
  4.1285 ++	skb->mac_len = skb->nh.raw - skb->data;
  4.1286 ++	__skb_pull(skb, skb->mac_len);
  4.1287 ++
  4.1288 ++	rcu_read_lock();
  4.1289 ++	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
  4.1290 ++		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
  4.1291 ++			segs = ptype->gso_segment(skb, features);
  4.1292 ++			break;
  4.1293 ++		}
  4.1294 ++	}
  4.1295 ++	rcu_read_unlock();
  4.1296 ++
  4.1297 ++	__skb_push(skb, skb->data - skb->mac.raw);
  4.1298 ++
  4.1299 ++	return segs;
  4.1300 ++}
  4.1301 ++
  4.1302 ++EXPORT_SYMBOL(skb_gso_segment);
  4.1303 ++
  4.1304 + /* Take action when hardware reception checksum errors are detected. */
  4.1305 + #ifdef CONFIG_BUG
  4.1306 + void netdev_rx_csum_fault(struct net_device *dev)
  4.1307 +@@ -1142,75 +1182,108 @@ #else
  4.1308 + #define illegal_highdma(dev, skb)	(0)
  4.1309 + #endif
  4.1310 + 
  4.1311 +-/* Keep head the same: replace data */
  4.1312 +-int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
  4.1313 +-{
  4.1314 +-	unsigned int size;
  4.1315 +-	u8 *data;
  4.1316 +-	long offset;
  4.1317 +-	struct skb_shared_info *ninfo;
  4.1318 +-	int headerlen = skb->data - skb->head;
  4.1319 +-	int expand = (skb->tail + skb->data_len) - skb->end;
  4.1320 +-
  4.1321 +-	if (skb_shared(skb))
  4.1322 +-		BUG();
  4.1323 +-
  4.1324 +-	if (expand <= 0)
  4.1325 +-		expand = 0;
  4.1326 +-
  4.1327 +-	size = skb->end - skb->head + expand;
  4.1328 +-	size = SKB_DATA_ALIGN(size);
  4.1329 +-	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
  4.1330 +-	if (!data)
  4.1331 +-		return -ENOMEM;
  4.1332 +-
  4.1333 +-	/* Copy entire thing */
  4.1334 +-	if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
  4.1335 +-		BUG();
  4.1336 +-
  4.1337 +-	/* Set up shinfo */
  4.1338 +-	ninfo = (struct skb_shared_info*)(data + size);
  4.1339 +-	atomic_set(&ninfo->dataref, 1);
  4.1340 +-	ninfo->tso_size = skb_shinfo(skb)->tso_size;
  4.1341 +-	ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
  4.1342 +-	ninfo->nr_frags = 0;
  4.1343 +-	ninfo->frag_list = NULL;
  4.1344 +-
  4.1345 +-	/* Offset between the two in bytes */
  4.1346 +-	offset = data - skb->head;
  4.1347 +-
  4.1348 +-	/* Free old data. */
  4.1349 +-	skb_release_data(skb);
  4.1350 +-
  4.1351 +-	skb->head = data;
  4.1352 +-	skb->end  = data + size;
  4.1353 +-
  4.1354 +-	/* Set up new pointers */
  4.1355 +-	skb->h.raw   += offset;
  4.1356 +-	skb->nh.raw  += offset;
  4.1357 +-	skb->mac.raw += offset;
  4.1358 +-	skb->tail    += offset;
  4.1359 +-	skb->data    += offset;
  4.1360 +-
  4.1361 +-	/* We are no longer a clone, even if we were. */
  4.1362 +-	skb->cloned    = 0;
  4.1363 +-
  4.1364 +-	skb->tail     += skb->data_len;
  4.1365 +-	skb->data_len  = 0;
  4.1366 ++struct dev_gso_cb {
  4.1367 ++	void (*destructor)(struct sk_buff *skb);
  4.1368 ++};
  4.1369 ++
  4.1370 ++#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
  4.1371 ++
  4.1372 ++static void dev_gso_skb_destructor(struct sk_buff *skb)
  4.1373 ++{
  4.1374 ++	struct dev_gso_cb *cb;
  4.1375 ++
  4.1376 ++	do {
  4.1377 ++		struct sk_buff *nskb = skb->next;
  4.1378 ++
  4.1379 ++		skb->next = nskb->next;
  4.1380 ++		nskb->next = NULL;
  4.1381 ++		kfree_skb(nskb);
  4.1382 ++	} while (skb->next);
  4.1383 ++
  4.1384 ++	cb = DEV_GSO_CB(skb);
  4.1385 ++	if (cb->destructor)
  4.1386 ++		cb->destructor(skb);
  4.1387 ++}
  4.1388 ++
  4.1389 ++/**
  4.1390 ++ *	dev_gso_segment - Perform emulated hardware segmentation on skb.
  4.1391 ++ *	@skb: buffer to segment
  4.1392 ++ *
  4.1393 ++ *	This function segments the given skb and stores the list of segments
  4.1394 ++ *	in skb->next.
  4.1395 ++ */
  4.1396 ++static int dev_gso_segment(struct sk_buff *skb)
  4.1397 ++{
  4.1398 ++	struct net_device *dev = skb->dev;
  4.1399 ++	struct sk_buff *segs;
  4.1400 ++	int features = dev->features & ~(illegal_highdma(dev, skb) ?
  4.1401 ++					 NETIF_F_SG : 0);
  4.1402 ++
  4.1403 ++	segs = skb_gso_segment(skb, features);
  4.1404 ++
  4.1405 ++	/* Verifying header integrity only. */
  4.1406 ++	if (!segs)
  4.1407 ++		return 0;
  4.1408 ++
  4.1409 ++	if (unlikely(IS_ERR(segs)))
  4.1410 ++		return PTR_ERR(segs);
  4.1411 ++
  4.1412 ++	skb->next = segs;
  4.1413 ++	DEV_GSO_CB(skb)->destructor = skb->destructor;
  4.1414 ++	skb->destructor = dev_gso_skb_destructor;
  4.1415 ++
  4.1416 ++	return 0;
  4.1417 ++}
  4.1418 ++
  4.1419 ++int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  4.1420 ++{
  4.1421 ++	if (likely(!skb->next)) {
  4.1422 ++		if (netdev_nit)
  4.1423 ++			dev_queue_xmit_nit(skb, dev);
  4.1424 ++
  4.1425 ++		if (netif_needs_gso(dev, skb)) {
  4.1426 ++			if (unlikely(dev_gso_segment(skb)))
  4.1427 ++				goto out_kfree_skb;
  4.1428 ++			if (skb->next)
  4.1429 ++				goto gso;
  4.1430 ++		}
  4.1431 ++
  4.1432 ++		return dev->hard_start_xmit(skb, dev);
  4.1433 ++	}
  4.1434 ++
  4.1435 ++gso:
  4.1436 ++	do {
  4.1437 ++		struct sk_buff *nskb = skb->next;
  4.1438 ++		int rc;
  4.1439 ++
  4.1440 ++		skb->next = nskb->next;
  4.1441 ++		nskb->next = NULL;
  4.1442 ++		rc = dev->hard_start_xmit(nskb, dev);
  4.1443 ++		if (unlikely(rc)) {
  4.1444 ++			nskb->next = skb->next;
  4.1445 ++			skb->next = nskb;
  4.1446 ++			return rc;
  4.1447 ++		}
  4.1448 ++		if (unlikely(netif_queue_stopped(dev) && skb->next))
  4.1449 ++			return NETDEV_TX_BUSY;
  4.1450 ++	} while (skb->next);
  4.1451 ++	
  4.1452 ++	skb->destructor = DEV_GSO_CB(skb)->destructor;
  4.1453 ++
  4.1454 ++out_kfree_skb:
  4.1455 ++	kfree_skb(skb);
  4.1456 + 	return 0;
  4.1457 + }
  4.1458 + 
  4.1459 + #define HARD_TX_LOCK(dev, cpu) {			\
  4.1460 + 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
  4.1461 +-		spin_lock(&dev->xmit_lock);		\
  4.1462 +-		dev->xmit_lock_owner = cpu;		\
  4.1463 ++		netif_tx_lock(dev);			\
  4.1464 + 	}						\
  4.1465 + }
  4.1466 + 
  4.1467 + #define HARD_TX_UNLOCK(dev) {				\
  4.1468 + 	if ((dev->features & NETIF_F_LLTX) == 0) {	\
  4.1469 +-		dev->xmit_lock_owner = -1;		\
  4.1470 +-		spin_unlock(&dev->xmit_lock);		\
  4.1471 ++		netif_tx_unlock(dev);			\
  4.1472 + 	}						\
  4.1473 + }
  4.1474 + 
  4.1475 +@@ -1246,9 +1319,13 @@ int dev_queue_xmit(struct sk_buff *skb)
  4.1476 + 	struct Qdisc *q;
  4.1477 + 	int rc = -ENOMEM;
  4.1478 + 
  4.1479 ++	/* GSO will handle the following emulations directly. */
  4.1480 ++	if (netif_needs_gso(dev, skb))
  4.1481 ++		goto gso;
  4.1482 ++
  4.1483 + 	if (skb_shinfo(skb)->frag_list &&
  4.1484 + 	    !(dev->features & NETIF_F_FRAGLIST) &&
  4.1485 +-	    __skb_linearize(skb, GFP_ATOMIC))
  4.1486 ++	    __skb_linearize(skb))
  4.1487 + 		goto out_kfree_skb;
  4.1488 + 
  4.1489 + 	/* Fragmented skb is linearized if device does not support SG,
  4.1490 +@@ -1257,25 +1334,26 @@ int dev_queue_xmit(struct sk_buff *skb)
  4.1491 + 	 */
  4.1492 + 	if (skb_shinfo(skb)->nr_frags &&
  4.1493 + 	    (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
  4.1494 +-	    __skb_linearize(skb, GFP_ATOMIC))
  4.1495 ++	    __skb_linearize(skb))
  4.1496 + 		goto out_kfree_skb;
  4.1497 + 
  4.1498 + 	/* If packet is not checksummed and device does not support
  4.1499 + 	 * checksumming for this protocol, complete checksumming here.
  4.1500 + 	 */
  4.1501 + 	if (skb->ip_summed == CHECKSUM_HW &&
  4.1502 +-	    (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
  4.1503 ++	    (!(dev->features & NETIF_F_GEN_CSUM) &&
  4.1504 + 	     (!(dev->features & NETIF_F_IP_CSUM) ||
  4.1505 + 	      skb->protocol != htons(ETH_P_IP))))
  4.1506 + 	      	if (skb_checksum_help(skb, 0))
  4.1507 + 	      		goto out_kfree_skb;
  4.1508 + 
  4.1509 ++gso:
  4.1510 + 	spin_lock_prefetch(&dev->queue_lock);
  4.1511 + 
  4.1512 + 	/* Disable soft irqs for various locks below. Also 
  4.1513 + 	 * stops preemption for RCU. 
  4.1514 + 	 */
  4.1515 +-	local_bh_disable(); 
  4.1516 ++	rcu_read_lock_bh(); 
  4.1517 + 
  4.1518 + 	/* Updates of qdisc are serialized by queue_lock. 
  4.1519 + 	 * The struct Qdisc which is pointed to by qdisc is now a 
  4.1520 +@@ -1309,8 +1387,8 @@ #endif
  4.1521 + 	/* The device has no queue. Common case for software devices:
  4.1522 + 	   loopback, all the sorts of tunnels...
  4.1523 + 
  4.1524 +-	   Really, it is unlikely that xmit_lock protection is necessary here.
  4.1525 +-	   (f.e. loopback and IP tunnels are clean ignoring statistics
  4.1526 ++	   Really, it is unlikely that netif_tx_lock protection is necessary
  4.1527 ++	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
  4.1528 + 	   counters.)
  4.1529 + 	   However, it is possible, that they rely on protection
  4.1530 + 	   made by us here.
  4.1531 +@@ -1326,11 +1404,8 @@ #endif
  4.1532 + 			HARD_TX_LOCK(dev, cpu);
  4.1533 + 
  4.1534 + 			if (!netif_queue_stopped(dev)) {
  4.1535 +-				if (netdev_nit)
  4.1536 +-					dev_queue_xmit_nit(skb, dev);
  4.1537 +-
  4.1538 + 				rc = 0;
  4.1539 +-				if (!dev->hard_start_xmit(skb, dev)) {
  4.1540 ++				if (!dev_hard_start_xmit(skb, dev)) {
  4.1541 + 					HARD_TX_UNLOCK(dev);
  4.1542 + 					goto out;
  4.1543 + 				}
  4.1544 +@@ -1349,13 +1424,13 @@ #endif
  4.1545 + 	}
  4.1546 + 
  4.1547 + 	rc = -ENETDOWN;
  4.1548 +-	local_bh_enable();
  4.1549 ++	rcu_read_unlock_bh();
  4.1550 + 
  4.1551 + out_kfree_skb:
  4.1552 + 	kfree_skb(skb);
  4.1553 + 	return rc;
  4.1554 + out:
  4.1555 +-	local_bh_enable();
  4.1556 ++	rcu_read_unlock_bh();
  4.1557 + 	return rc;
  4.1558 + }
  4.1559 + 
  4.1560 +@@ -2670,7 +2745,7 @@ int register_netdevice(struct net_device
  4.1561 + 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
  4.1562 + 
  4.1563 + 	spin_lock_init(&dev->queue_lock);
  4.1564 +-	spin_lock_init(&dev->xmit_lock);
  4.1565 ++	spin_lock_init(&dev->_xmit_lock);
  4.1566 + 	dev->xmit_lock_owner = -1;
  4.1567 + #ifdef CONFIG_NET_CLS_ACT
  4.1568 + 	spin_lock_init(&dev->ingress_lock);
  4.1569 +@@ -2714,9 +2789,7 @@ #endif
  4.1570 + 
  4.1571 + 	/* Fix illegal SG+CSUM combinations. */
  4.1572 + 	if ((dev->features & NETIF_F_SG) &&
  4.1573 +-	    !(dev->features & (NETIF_F_IP_CSUM |
  4.1574 +-			       NETIF_F_NO_CSUM |
  4.1575 +-			       NETIF_F_HW_CSUM))) {
  4.1576 ++	    !(dev->features & NETIF_F_ALL_CSUM)) {
  4.1577 + 		printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
  4.1578 + 		       dev->name);
  4.1579 + 		dev->features &= ~NETIF_F_SG;
  4.1580 +@@ -3268,7 +3341,6 @@ subsys_initcall(net_dev_init);
  4.1581 + EXPORT_SYMBOL(__dev_get_by_index);
  4.1582 + EXPORT_SYMBOL(__dev_get_by_name);
  4.1583 + EXPORT_SYMBOL(__dev_remove_pack);
  4.1584 +-EXPORT_SYMBOL(__skb_linearize);
  4.1585 + EXPORT_SYMBOL(dev_valid_name);
  4.1586 + EXPORT_SYMBOL(dev_add_pack);
  4.1587 + EXPORT_SYMBOL(dev_alloc_name);
  4.1588 +diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
  4.1589 +index 05d6085..c57d887 100644
  4.1590 +--- a/net/core/dev_mcast.c
  4.1591 ++++ b/net/core/dev_mcast.c
  4.1592 +@@ -62,7 +62,7 @@ #include <net/arp.h>
  4.1593 +  *	Device mc lists are changed by bh at least if IPv6 is enabled,
  4.1594 +  *	so that it must be bh protected.
  4.1595 +  *
  4.1596 +- *	We block accesses to device mc filters with dev->xmit_lock.
  4.1597 ++ *	We block accesses to device mc filters with netif_tx_lock.
  4.1598 +  */
  4.1599 + 
  4.1600 + /*
  4.1601 +@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_d
  4.1602 + 
  4.1603 + void dev_mc_upload(struct net_device *dev)
  4.1604 + {
  4.1605 +-	spin_lock_bh(&dev->xmit_lock);
  4.1606 ++	netif_tx_lock_bh(dev);
  4.1607 + 	__dev_mc_upload(dev);
  4.1608 +-	spin_unlock_bh(&dev->xmit_lock);
  4.1609 ++	netif_tx_unlock_bh(dev);
  4.1610 + }
  4.1611 + 
  4.1612 + /*
  4.1613 +@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev
  4.1614 + 	int err = 0;
  4.1615 + 	struct dev_mc_list *dmi, **dmip;
  4.1616 + 
  4.1617 +-	spin_lock_bh(&dev->xmit_lock);
  4.1618 ++	netif_tx_lock_bh(dev);
  4.1619 + 
  4.1620 + 	for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
  4.1621 + 		/*
  4.1622 +@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev
  4.1623 + 			 */
  4.1624 + 			__dev_mc_upload(dev);
  4.1625 + 			
  4.1626 +-			spin_unlock_bh(&dev->xmit_lock);
  4.1627 ++			netif_tx_unlock_bh(dev);
  4.1628 + 			return 0;
  4.1629 + 		}
  4.1630 + 	}
  4.1631 + 	err = -ENOENT;
  4.1632 + done:
  4.1633 +-	spin_unlock_bh(&dev->xmit_lock);
  4.1634 ++	netif_tx_unlock_bh(dev);
  4.1635 + 	return err;
  4.1636 + }
  4.1637 + 
  4.1638 +@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, v
  4.1639 + 
  4.1640 + 	dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
  4.1641 + 
  4.1642 +-	spin_lock_bh(&dev->xmit_lock);
  4.1643 ++	netif_tx_lock_bh(dev);
  4.1644 + 	for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
  4.1645 + 		if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
  4.1646 + 		    dmi->dmi_addrlen == alen) {
  4.1647 +@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, v
  4.1648 + 	}
  4.1649 + 
  4.1650 + 	if ((dmi = dmi1) == NULL) {
  4.1651 +-		spin_unlock_bh(&dev->xmit_lock);
  4.1652 ++		netif_tx_unlock_bh(dev);
  4.1653 + 		return -ENOMEM;
  4.1654 + 	}
  4.1655 + 	memcpy(dmi->dmi_addr, addr, alen);
  4.1656 +@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, v
  4.1657 + 
  4.1658 + 	__dev_mc_upload(dev);
  4.1659 + 	
  4.1660 +-	spin_unlock_bh(&dev->xmit_lock);
  4.1661 ++	netif_tx_unlock_bh(dev);
  4.1662 + 	return 0;
  4.1663 + 
  4.1664 + done:
  4.1665 +-	spin_unlock_bh(&dev->xmit_lock);
  4.1666 ++	netif_tx_unlock_bh(dev);
  4.1667 + 	kfree(dmi1);
  4.1668 + 	return err;
  4.1669 + }
  4.1670 +@@ -204,7 +204,7 @@ done:
  4.1671 + 
  4.1672 + void dev_mc_discard(struct net_device *dev)
  4.1673 + {
  4.1674 +-	spin_lock_bh(&dev->xmit_lock);
  4.1675 ++	netif_tx_lock_bh(dev);
  4.1676 + 	
  4.1677 + 	while (dev->mc_list != NULL) {
  4.1678 + 		struct dev_mc_list *tmp = dev->mc_list;
  4.1679 +@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *d
  4.1680 + 	}
  4.1681 + 	dev->mc_count = 0;
  4.1682 + 
  4.1683 +-	spin_unlock_bh(&dev->xmit_lock);
  4.1684 ++	netif_tx_unlock_bh(dev);
  4.1685 + }
  4.1686 + 
  4.1687 + #ifdef CONFIG_PROC_FS
  4.1688 +@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_fi
  4.1689 + 	struct dev_mc_list *m;
  4.1690 + 	struct net_device *dev = v;
  4.1691 + 
  4.1692 +-	spin_lock_bh(&dev->xmit_lock);
  4.1693 ++	netif_tx_lock_bh(dev);
  4.1694 + 	for (m = dev->mc_list; m; m = m->next) {
  4.1695 + 		int i;
  4.1696 + 
  4.1697 +@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_fi
  4.1698 + 
  4.1699 + 		seq_putc(seq, '\n');
  4.1700 + 	}
  4.1701 +-	spin_unlock_bh(&dev->xmit_lock);
  4.1702 ++	netif_tx_unlock_bh(dev);
  4.1703 + 	return 0;
  4.1704 + }
  4.1705 + 
  4.1706 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
  4.1707 +index e6f7610..27ce168 100644
  4.1708 +--- a/net/core/ethtool.c
  4.1709 ++++ b/net/core/ethtool.c
  4.1710 +@@ -30,7 +30,7 @@ u32 ethtool_op_get_link(struct net_devic
  4.1711 + 
  4.1712 + u32 ethtool_op_get_tx_csum(struct net_device *dev)
  4.1713 + {
  4.1714 +-	return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0;
  4.1715 ++	return (dev->features & NETIF_F_ALL_CSUM) != 0;
  4.1716 + }
  4.1717 + 
  4.1718 + int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
  4.1719 +@@ -551,9 +551,7 @@ static int ethtool_set_sg(struct net_dev
  4.1720 + 		return -EFAULT;
  4.1721 + 
  4.1722 + 	if (edata.data && 
  4.1723 +-	    !(dev->features & (NETIF_F_IP_CSUM |
  4.1724 +-			       NETIF_F_NO_CSUM |
  4.1725 +-			       NETIF_F_HW_CSUM)))
  4.1726 ++	    !(dev->features & NETIF_F_ALL_CSUM))
  4.1727 + 		return -EINVAL;
  4.1728 + 
  4.1729 + 	return __ethtool_set_sg(dev, edata.data);
  4.1730 +@@ -591,7 +589,7 @@ static int ethtool_set_tso(struct net_de
  4.1731 + 
  4.1732 + static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr)
  4.1733 + {
  4.1734 +-	struct ethtool_value edata = { ETHTOOL_GTSO };
  4.1735 ++	struct ethtool_value edata = { ETHTOOL_GUFO };
  4.1736 + 
  4.1737 + 	if (!dev->ethtool_ops->get_ufo)
  4.1738 + 		return -EOPNOTSUPP;
  4.1739 +@@ -600,6 +598,7 @@ static int ethtool_get_ufo(struct net_de
  4.1740 + 		 return -EFAULT;
  4.1741 + 	return 0;
  4.1742 + }
  4.1743 ++
  4.1744 + static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
  4.1745 + {
  4.1746 + 	struct ethtool_value edata;
  4.1747 +@@ -615,6 +614,29 @@ static int ethtool_set_ufo(struct net_de
  4.1748 + 	return dev->ethtool_ops->set_ufo(dev, edata.data);
  4.1749 + }
  4.1750 + 
  4.1751 ++static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
  4.1752 ++{
  4.1753 ++	struct ethtool_value edata = { ETHTOOL_GGSO };
  4.1754 ++
  4.1755 ++	edata.data = dev->features & NETIF_F_GSO;
  4.1756 ++	if (copy_to_user(useraddr, &edata, sizeof(edata)))
  4.1757 ++		 return -EFAULT;
  4.1758 ++	return 0;
  4.1759 ++}
  4.1760 ++
  4.1761 ++static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
  4.1762 ++{
  4.1763 ++	struct ethtool_value edata;
  4.1764 ++
  4.1765 ++	if (copy_from_user(&edata, useraddr, sizeof(edata)))
  4.1766 ++		return -EFAULT;
  4.1767 ++	if (edata.data)
  4.1768 ++		dev->features |= NETIF_F_GSO;
  4.1769 ++	else
  4.1770 ++		dev->features &= ~NETIF_F_GSO;
  4.1771 ++	return 0;
  4.1772 ++}
  4.1773 ++
  4.1774 + static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
  4.1775 + {
  4.1776 + 	struct ethtool_test test;
  4.1777 +@@ -906,6 +928,12 @@ int dev_ethtool(struct ifreq *ifr)
  4.1778 + 	case ETHTOOL_SUFO:
  4.1779 + 		rc = ethtool_set_ufo(dev, useraddr);
  4.1780 + 		break;
  4.1781 ++	case ETHTOOL_GGSO:
  4.1782 ++		rc = ethtool_get_gso(dev, useraddr);
  4.1783 ++		break;
  4.1784 ++	case ETHTOOL_SGSO:
  4.1785 ++		rc = ethtool_set_gso(dev, useraddr);
  4.1786 ++		break;
  4.1787 + 	default:
  4.1788 + 		rc =  -EOPNOTSUPP;
  4.1789 + 	}
  4.1790 +diff --git a/net/core/netpoll.c b/net/core/netpoll.c
  4.1791 +index ea51f8d..ec28d3b 100644
  4.1792 +--- a/net/core/netpoll.c
  4.1793 ++++ b/net/core/netpoll.c
  4.1794 +@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netp
  4.1795 + 
  4.1796 + 	do {
  4.1797 + 		npinfo->tries--;
  4.1798 +-		spin_lock(&np->dev->xmit_lock);
  4.1799 +-		np->dev->xmit_lock_owner = smp_processor_id();
  4.1800 ++		netif_tx_lock(np->dev);
  4.1801 + 
  4.1802 + 		/*
  4.1803 + 		 * network drivers do not expect to be called if the queue is
  4.1804 + 		 * stopped.
  4.1805 + 		 */
  4.1806 + 		if (netif_queue_stopped(np->dev)) {
  4.1807 +-			np->dev->xmit_lock_owner = -1;
  4.1808 +-			spin_unlock(&np->dev->xmit_lock);
  4.1809 ++			netif_tx_unlock(np->dev);
  4.1810 + 			netpoll_poll(np);
  4.1811 + 			udelay(50);
  4.1812 + 			continue;
  4.1813 + 		}
  4.1814 + 
  4.1815 + 		status = np->dev->hard_start_xmit(skb, np->dev);
  4.1816 +-		np->dev->xmit_lock_owner = -1;
  4.1817 +-		spin_unlock(&np->dev->xmit_lock);
  4.1818 ++		netif_tx_unlock(np->dev);
  4.1819 + 
  4.1820 + 		/* success */
  4.1821 + 		if(!status) {
  4.1822 +diff --git a/net/core/pktgen.c b/net/core/pktgen.c
  4.1823 +index da16f8f..2380347 100644
  4.1824 +--- a/net/core/pktgen.c
  4.1825 ++++ b/net/core/pktgen.c
  4.1826 +@@ -2582,7 +2582,7 @@ static __inline__ void pktgen_xmit(struc
  4.1827 + 		}
  4.1828 + 	}
  4.1829 + 	
  4.1830 +-	spin_lock_bh(&odev->xmit_lock);
  4.1831 ++	netif_tx_lock_bh(odev);
  4.1832 + 	if (!netif_queue_stopped(odev)) {
  4.1833 + 
  4.1834 + 		atomic_inc(&(pkt_dev->skb->users));
  4.1835 +@@ -2627,7 +2627,7 @@ retry_now:
  4.1836 + 		pkt_dev->next_tx_ns = 0;
  4.1837 +         }
  4.1838 + 
  4.1839 +-	spin_unlock_bh(&odev->xmit_lock);
  4.1840 ++	netif_tx_unlock_bh(odev);
  4.1841 + 	
  4.1842 + 	/* If pkt_dev->count is zero, then run forever */
  4.1843 + 	if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
  4.1844 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
  4.1845 +index 2144952..46f56af 100644
  4.1846 +--- a/net/core/skbuff.c
  4.1847 ++++ b/net/core/skbuff.c
  4.1848 +@@ -164,9 +164,9 @@ struct sk_buff *__alloc_skb(unsigned int
  4.1849 + 	shinfo = skb_shinfo(skb);
  4.1850 + 	atomic_set(&shinfo->dataref, 1);
  4.1851 + 	shinfo->nr_frags  = 0;
  4.1852 +-	shinfo->tso_size = 0;
  4.1853 +-	shinfo->tso_segs = 0;
  4.1854 +-	shinfo->ufo_size = 0;
  4.1855 ++	shinfo->gso_size = 0;
  4.1856 ++	shinfo->gso_segs = 0;
  4.1857 ++	shinfo->gso_type = 0;
  4.1858 + 	shinfo->ip6_frag_id = 0;
  4.1859 + 	shinfo->frag_list = NULL;
  4.1860 + 
  4.1861 +@@ -230,8 +230,9 @@ struct sk_buff *alloc_skb_from_cache(kme
  4.1862 + 
  4.1863 + 	atomic_set(&(skb_shinfo(skb)->dataref), 1);
  4.1864 + 	skb_shinfo(skb)->nr_frags  = 0;
  4.1865 +-	skb_shinfo(skb)->tso_size = 0;
  4.1866 +-	skb_shinfo(skb)->tso_segs = 0;
  4.1867 ++	skb_shinfo(skb)->gso_size = 0;
  4.1868 ++	skb_shinfo(skb)->gso_segs = 0;
  4.1869 ++	skb_shinfo(skb)->gso_type = 0;
  4.1870 + 	skb_shinfo(skb)->frag_list = NULL;
  4.1871 + out:
  4.1872 + 	return skb;
  4.1873 +@@ -501,8 +502,9 @@ #endif
  4.1874 + 	new->tc_index	= old->tc_index;
  4.1875 + #endif
  4.1876 + 	atomic_set(&new->users, 1);
  4.1877 +-	skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
  4.1878 +-	skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
  4.1879 ++	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
  4.1880 ++	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
  4.1881 ++	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
  4.1882 + }
  4.1883 + 
  4.1884 + /**
  4.1885 +@@ -1777,6 +1779,133 @@ int skb_append_datato_frags(struct sock 
  4.1886 + 	return 0;
  4.1887 + }
  4.1888 + 
  4.1889 ++/**
  4.1890 ++ *	skb_segment - Perform protocol segmentation on skb.
  4.1891 ++ *	@skb: buffer to segment
  4.1892 ++ *	@features: features for the output path (see dev->features)
  4.1893 ++ *
  4.1894 ++ *	This function performs segmentation on the given skb.  It returns
  4.1895 ++ *	the segment at the given position.  It returns NULL if there are
  4.1896 ++ *	no more segments to generate, or when an error is encountered.
  4.1897 ++ */
  4.1898 ++struct sk_buff *skb_segment(struct sk_buff *skb, int features)
  4.1899 ++{
  4.1900 ++	struct sk_buff *segs = NULL;
  4.1901 ++	struct sk_buff *tail = NULL;
  4.1902 ++	unsigned int mss = skb_shinfo(skb)->gso_size;
  4.1903 ++	unsigned int doffset = skb->data - skb->mac.raw;
  4.1904 ++	unsigned int offset = doffset;
  4.1905 ++	unsigned int headroom;
  4.1906 ++	unsigned int len;
  4.1907 ++	int sg = features & NETIF_F_SG;
  4.1908 ++	int nfrags = skb_shinfo(skb)->nr_frags;
  4.1909 ++	int err = -ENOMEM;
  4.1910 ++	int i = 0;
  4.1911 ++	int pos;
  4.1912 ++
  4.1913 ++	__skb_push(skb, doffset);
  4.1914 ++	headroom = skb_headroom(skb);
  4.1915 ++	pos = skb_headlen(skb);
  4.1916 ++
  4.1917 ++	do {
  4.1918 ++		struct sk_buff *nskb;
  4.1919 ++		skb_frag_t *frag;
  4.1920 ++		int hsize, nsize;
  4.1921 ++		int k;
  4.1922 ++		int size;
  4.1923 ++
  4.1924 ++		len = skb->len - offset;
  4.1925 ++		if (len > mss)
  4.1926 ++			len = mss;
  4.1927 ++
  4.1928 ++		hsize = skb_headlen(skb) - offset;
  4.1929 ++		if (hsize < 0)
  4.1930 ++			hsize = 0;
  4.1931 ++		nsize = hsize + doffset;
  4.1932 ++		if (nsize > len + doffset || !sg)
  4.1933 ++			nsize = len + doffset;
  4.1934 ++
  4.1935 ++		nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
  4.1936 ++		if (unlikely(!nskb))
  4.1937 ++			goto err;
  4.1938 ++
  4.1939 ++		if (segs)
  4.1940 ++			tail->next = nskb;
  4.1941 ++		else
  4.1942 ++			segs = nskb;
  4.1943 ++		tail = nskb;
  4.1944 ++
  4.1945 ++		nskb->dev = skb->dev;
  4.1946 ++		nskb->priority = skb->priority;
  4.1947 ++		nskb->protocol = skb->protocol;
  4.1948 ++		nskb->dst = dst_clone(skb->dst);
  4.1949 ++		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
  4.1950 ++		nskb->pkt_type = skb->pkt_type;
  4.1951 ++		nskb->mac_len = skb->mac_len;
  4.1952 ++
  4.1953 ++		skb_reserve(nskb, headroom);
  4.1954 ++		nskb->mac.raw = nskb->data;
  4.1955 ++		nskb->nh.raw = nskb->data + skb->mac_len;
  4.1956 ++		nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
  4.1957 ++		memcpy(skb_put(nskb, doffset), skb->data, doffset);
  4.1958 ++
  4.1959 ++		if (!sg) {
  4.1960 ++			nskb->csum = skb_copy_and_csum_bits(skb, offset,
  4.1961 ++							    skb_put(nskb, len),
  4.1962 ++							    len, 0);
  4.1963 ++			continue;
  4.1964 ++		}
  4.1965 ++
  4.1966 ++		frag = skb_shinfo(nskb)->frags;
  4.1967 ++		k = 0;
  4.1968 ++
  4.1969 ++		nskb->ip_summed = CHECKSUM_HW;
  4.1970 ++		nskb->csum = skb->csum;
  4.1971 ++		memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
  4.1972 ++
  4.1973 ++		while (pos < offset + len) {
  4.1974 ++			BUG_ON(i >= nfrags);
  4.1975 ++
  4.1976 ++			*frag = skb_shinfo(skb)->frags[i];
  4.1977 ++			get_page(frag->page);
  4.1978 ++			size = frag->size;
  4.1979 ++
  4.1980 ++			if (pos < offset) {
  4.1981 ++				frag->page_offset += offset - pos;
  4.1982 ++				frag->size -= offset - pos;
  4.1983 ++			}
  4.1984 ++
  4.1985 ++			k++;
  4.1986 ++
  4.1987 ++			if (pos + size <= offset + len) {
  4.1988 ++				i++;
  4.1989 ++				pos += size;
  4.1990 ++			} else {
  4.1991 ++				frag->size -= pos + size - (offset + len);
  4.1992 ++				break;
  4.1993 ++			}
  4.1994 ++
  4.1995 ++			frag++;
  4.1996 ++		}
  4.1997 ++
  4.1998 ++		skb_shinfo(nskb)->nr_frags = k;
  4.1999 ++		nskb->data_len = len - hsize;
  4.2000 ++		nskb->len += nskb->data_len;
  4.2001 ++		nskb->truesize += nskb->data_len;
  4.2002 ++	} while ((offset += len) < skb->len);
  4.2003 ++
  4.2004 ++	return segs;
  4.2005 ++
  4.2006 ++err:
  4.2007 ++	while ((skb = segs)) {
  4.2008 ++		segs = skb->next;
  4.2009 ++		kfree(skb);
  4.2010 ++	}
  4.2011 ++	return ERR_PTR(err);
  4.2012 ++}
  4.2013 ++
  4.2014 ++EXPORT_SYMBOL_GPL(skb_segment);
  4.2015 ++
  4.2016 + void __init skb_init(void)
  4.2017 + {
  4.2018 + 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
  4.2019 +diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
  4.2020 +index 44bda85..2e3323a 100644
  4.2021 +--- a/net/decnet/dn_nsp_in.c
  4.2022 ++++ b/net/decnet/dn_nsp_in.c
  4.2023 +@@ -801,8 +801,7 @@ got_it:
  4.2024 + 		 * We linearize everything except data segments here.
  4.2025 + 		 */
  4.2026 + 		if (cb->nsp_flags & ~0x60) {
  4.2027 +-			if (unlikely(skb_is_nonlinear(skb)) &&
  4.2028 +-			    skb_linearize(skb, GFP_ATOMIC) != 0)
  4.2029 ++			if (unlikely(skb_linearize(skb)))
  4.2030 + 				goto free_out;
  4.2031 + 		}
  4.2032 + 
  4.2033 +diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
  4.2034 +index 3407f19..a0a25e0 100644
  4.2035 +--- a/net/decnet/dn_route.c
  4.2036 ++++ b/net/decnet/dn_route.c
  4.2037 +@@ -629,8 +629,7 @@ int dn_route_rcv(struct sk_buff *skb, st
  4.2038 + 			padlen);
  4.2039 + 
  4.2040 +         if (flags & DN_RT_PKT_CNTL) {
  4.2041 +-		if (unlikely(skb_is_nonlinear(skb)) &&
  4.2042 +-		    skb_linearize(skb, GFP_ATOMIC) != 0)
  4.2043 ++		if (unlikely(skb_linearize(skb)))
  4.2044 + 			goto dump_it;
  4.2045 + 
  4.2046 +                 switch(flags & DN_RT_CNTL_MSK) {
  4.2047 +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
  4.2048 +index 97c276f..5ba719e 100644
  4.2049 +--- a/net/ipv4/af_inet.c
  4.2050 ++++ b/net/ipv4/af_inet.c
  4.2051 +@@ -68,6 +68,7 @@
  4.2052 +  */
  4.2053 + 
  4.2054 + #include <linux/config.h>
  4.2055 ++#include <linux/err.h>
  4.2056 + #include <linux/errno.h>
  4.2057 + #include <linux/types.h>
  4.2058 + #include <linux/socket.h>
  4.2059 +@@ -1084,6 +1085,54 @@ int inet_sk_rebuild_header(struct sock *
  4.2060 + 
  4.2061 + EXPORT_SYMBOL(inet_sk_rebuild_header);
  4.2062 + 
  4.2063 ++static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
  4.2064 ++{
  4.2065 ++	struct sk_buff *segs = ERR_PTR(-EINVAL);
  4.2066 ++	struct iphdr *iph;
  4.2067 ++	struct net_protocol *ops;
  4.2068 ++	int proto;
  4.2069 ++	int ihl;
  4.2070 ++	int id;
  4.2071 ++
  4.2072 ++	if (!pskb_may_pull(skb, sizeof(*iph)))
  4.2073 ++		goto out;
  4.2074 ++
  4.2075 ++	iph = skb->nh.iph;
  4.2076 ++	ihl = iph->ihl * 4;
  4.2077 ++	if (ihl < sizeof(*iph))
  4.2078 ++		goto out;
  4.2079 ++
  4.2080 ++	if (!pskb_may_pull(skb, ihl))
  4.2081 ++		goto out;
  4.2082 ++
  4.2083 ++	skb->h.raw = __skb_pull(skb, ihl);
  4.2084 ++	iph = skb->nh.iph;
  4.2085 ++	id = ntohs(iph->id);
  4.2086 ++	proto = iph->protocol & (MAX_INET_PROTOS - 1);
  4.2087 ++	segs = ERR_PTR(-EPROTONOSUPPORT);
  4.2088 ++
  4.2089 ++	rcu_read_lock();
  4.2090 ++	ops = rcu_dereference(inet_protos[proto]);
  4.2091 ++	if (ops && ops->gso_segment)
  4.2092 ++		segs = ops->gso_segment(skb, features);
  4.2093 ++	rcu_read_unlock();
  4.2094 ++
  4.2095 ++	if (!segs || unlikely(IS_ERR(segs)))
  4.2096 ++		goto out;
  4.2097 ++
  4.2098 ++	skb = segs;
  4.2099 ++	do {
  4.2100 ++		iph = skb->nh.iph;
  4.2101 ++		iph->id = htons(id++);
  4.2102 ++		iph->tot_len = htons(skb->len - skb->mac_len);
  4.2103 ++		iph->check = 0;
  4.2104 ++		iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
  4.2105 ++	} while ((skb = skb->next));
  4.2106 ++
  4.2107 ++out:
  4.2108 ++	return segs;
  4.2109 ++}
  4.2110 ++
  4.2111 + #ifdef CONFIG_IP_MULTICAST
  4.2112 + static struct net_protocol igmp_protocol = {
  4.2113 + 	.handler =	igmp_rcv,
  4.2114 +@@ -1093,6 +1142,7 @@ #endif
  4.2115 + static struct net_protocol tcp_protocol = {
  4.2116 + 	.handler =	tcp_v4_rcv,
  4.2117 + 	.err_handler =	tcp_v4_err,
  4.2118 ++	.gso_segment =	tcp_tso_segment,
  4.2119 + 	.no_policy =	1,
  4.2120 + };
  4.2121 + 
  4.2122 +@@ -1138,6 +1188,7 @@ static int ipv4_proc_init(void);
  4.2123 + static struct packet_type ip_packet_type = {
  4.2124 + 	.type = __constant_htons(ETH_P_IP),
  4.2125 + 	.func = ip_rcv,
  4.2126 ++	.gso_segment = inet_gso_segment,
  4.2127 + };
  4.2128 + 
  4.2129 + static int __init inet_init(void)
  4.2130 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
  4.2131 +index 8dcba38..19c3c73 100644
  4.2132 +--- a/net/ipv4/ip_output.c
  4.2133 ++++ b/net/ipv4/ip_output.c
  4.2134 +@@ -210,8 +210,7 @@ #if defined(CONFIG_NETFILTER) && defined
  4.2135 + 		return dst_output(skb);
  4.2136 + 	}
  4.2137 + #endif
  4.2138 +-	if (skb->len > dst_mtu(skb->dst) &&
  4.2139 +-	    !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
  4.2140 ++	if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
  4.2141 + 		return ip_fragment(skb, ip_finish_output2);
  4.2142 + 	else
  4.2143 + 		return ip_finish_output2(skb);
  4.2144 +@@ -362,7 +361,7 @@ packet_routed:
  4.2145 + 	}
  4.2146 + 
  4.2147 + 	ip_select_ident_more(iph, &rt->u.dst, sk,
  4.2148 +-			     (skb_shinfo(skb)->tso_segs ?: 1) - 1);
  4.2149 ++			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
  4.2150 + 
  4.2151 + 	/* Add an IP checksum. */
  4.2152 + 	ip_send_check(iph);
  4.2153 +@@ -743,7 +742,8 @@ static inline int ip_ufo_append_data(str
  4.2154 + 			       (length - transhdrlen));
  4.2155 + 	if (!err) {
  4.2156 + 		/* specify the length of each IP datagram fragment*/
  4.2157 +-		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
  4.2158 ++		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
  4.2159 ++		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
  4.2160 + 		__skb_queue_tail(&sk->sk_write_queue, skb);
  4.2161 + 
  4.2162 + 		return 0;
  4.2163 +@@ -839,7 +839,7 @@ int ip_append_data(struct sock *sk,
  4.2164 + 	 */
  4.2165 + 	if (transhdrlen &&
  4.2166 + 	    length + fragheaderlen <= mtu &&
  4.2167 +-	    rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
  4.2168 ++	    rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
  4.2169 + 	    !exthdrlen)
  4.2170 + 		csummode = CHECKSUM_HW;
  4.2171 + 
  4.2172 +@@ -1086,14 +1086,16 @@ ssize_t	ip_append_page(struct sock *sk, 
  4.2173 + 
  4.2174 + 	inet->cork.length += size;
  4.2175 + 	if ((sk->sk_protocol == IPPROTO_UDP) &&
  4.2176 +-	    (rt->u.dst.dev->features & NETIF_F_UFO))
  4.2177 +-		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
  4.2178 ++	    (rt->u.dst.dev->features & NETIF_F_UFO)) {
  4.2179 ++		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
  4.2180 ++		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
  4.2181 ++	}
  4.2182 + 
  4.2183 + 
  4.2184 + 	while (size > 0) {
  4.2185 + 		int i;
  4.2186 + 
  4.2187 +-		if (skb_shinfo(skb)->ufo_size)
  4.2188 ++		if (skb_shinfo(skb)->gso_size)
  4.2189 + 			len = size;
  4.2190 + 		else {
  4.2191 + 
  4.2192 +diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
  4.2193 +index d64e2ec..7494823 100644
  4.2194 +--- a/net/ipv4/ipcomp.c
  4.2195 ++++ b/net/ipv4/ipcomp.c
  4.2196 +@@ -84,7 +84,7 @@ static int ipcomp_input(struct xfrm_stat
  4.2197 +                         struct xfrm_decap_state *decap, struct sk_buff *skb)
  4.2198 + {
  4.2199 + 	u8 nexthdr;
  4.2200 +-	int err = 0;
  4.2201 ++	int err = -ENOMEM;
  4.2202 + 	struct iphdr *iph;
  4.2203 + 	union {
  4.2204 + 		struct iphdr	iph;
  4.2205 +@@ -92,11 +92,8 @@ static int ipcomp_input(struct xfrm_stat
  4.2206 + 	} tmp_iph;
  4.2207 + 
  4.2208 + 
  4.2209 +-	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  4.2210 +-	    skb_linearize(skb, GFP_ATOMIC) != 0) {
  4.2211 +-	    	err = -ENOMEM;
  4.2212 ++	if (skb_linearize_cow(skb))
  4.2213 + 	    	goto out;
  4.2214 +-	}
  4.2215 + 
  4.2216 + 	skb->ip_summed = CHECKSUM_NONE;
  4.2217 + 
  4.2218 +@@ -171,10 +168,8 @@ static int ipcomp_output(struct xfrm_sta
  4.2219 + 		goto out_ok;
  4.2220 + 	}
  4.2221 + 
  4.2222 +-	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  4.2223 +-	    skb_linearize(skb, GFP_ATOMIC) != 0) {
  4.2224 ++	if (skb_linearize_cow(skb))
  4.2225 + 		goto out_ok;
  4.2226 +-	}
  4.2227 + 	
  4.2228 + 	err = ipcomp_compress(x, skb);
  4.2229 + 	iph = skb->nh.iph;
  4.2230 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
  4.2231 +index 00aa80e..84130c9 100644
  4.2232 +--- a/net/ipv4/tcp.c
  4.2233 ++++ b/net/ipv4/tcp.c
  4.2234 +@@ -257,6 +257,7 @@ #include <linux/smp_lock.h>
  4.2235 + #include <linux/fs.h>
  4.2236 + #include <linux/random.h>
  4.2237 + #include <linux/bootmem.h>
  4.2238 ++#include <linux/err.h>
  4.2239 + 
  4.2240 + #include <net/icmp.h>
  4.2241 + #include <net/tcp.h>
  4.2242 +@@ -570,7 +571,7 @@ new_segment:
  4.2243 + 		skb->ip_summed = CHECKSUM_HW;
  4.2244 + 		tp->write_seq += copy;
  4.2245 + 		TCP_SKB_CB(skb)->end_seq += copy;
  4.2246 +-		skb_shinfo(skb)->tso_segs = 0;
  4.2247 ++		skb_shinfo(skb)->gso_segs = 0;
  4.2248 + 
  4.2249 + 		if (!copied)
  4.2250 + 			TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
  4.2251 +@@ -621,14 +622,10 @@ ssize_t tcp_sendpage(struct socket *sock
  4.2252 + 	ssize_t res;
  4.2253 + 	struct sock *sk = sock->sk;
  4.2254 + 
  4.2255 +-#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  4.2256 +-
  4.2257 + 	if (!(sk->sk_route_caps & NETIF_F_SG) ||
  4.2258 +-	    !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
  4.2259 ++	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
  4.2260 + 		return sock_no_sendpage(sock, page, offset, size, flags);
  4.2261 + 
  4.2262 +-#undef TCP_ZC_CSUM_FLAGS
  4.2263 +-
  4.2264 + 	lock_sock(sk);
  4.2265 + 	TCP_CHECK_TIMER(sk);
  4.2266 + 	res = do_tcp_sendpages(sk, &page, offset, size, flags);
  4.2267 +@@ -725,9 +722,7 @@ new_segment:
  4.2268 + 				/*
  4.2269 + 				 * Check whether we can use HW checksum.
  4.2270 + 				 */
  4.2271 +-				if (sk->sk_route_caps &
  4.2272 +-				    (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
  4.2273 +-				     NETIF_F_HW_CSUM))
  4.2274 ++				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
  4.2275 + 					skb->ip_summed = CHECKSUM_HW;
  4.2276 + 
  4.2277 + 				skb_entail(sk, tp, skb);
  4.2278 +@@ -823,7 +818,7 @@ new_segment:
  4.2279 + 
  4.2280 + 			tp->write_seq += copy;
  4.2281 + 			TCP_SKB_CB(skb)->end_seq += copy;
  4.2282 +-			skb_shinfo(skb)->tso_segs = 0;
  4.2283 ++			skb_shinfo(skb)->gso_segs = 0;
  4.2284 + 
  4.2285 + 			from += copy;
  4.2286 + 			copied += copy;
  4.2287 +@@ -2026,6 +2021,71 @@ int tcp_getsockopt(struct sock *sk, int 
  4.2288 + }
  4.2289 + 
  4.2290 + 
  4.2291 ++struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
  4.2292 ++{
  4.2293 ++	struct sk_buff *segs = ERR_PTR(-EINVAL);
  4.2294 ++	struct tcphdr *th;
  4.2295 ++	unsigned thlen;
  4.2296 ++	unsigned int seq;
  4.2297 ++	unsigned int delta;
  4.2298 ++	unsigned int oldlen;
  4.2299 ++	unsigned int len;
  4.2300 ++
  4.2301 ++	if (!pskb_may_pull(skb, sizeof(*th)))
  4.2302 ++		goto out;
  4.2303 ++
  4.2304 ++	th = skb->h.th;
  4.2305 ++	thlen = th->doff * 4;
  4.2306 ++	if (thlen < sizeof(*th))
  4.2307 ++		goto out;
  4.2308 ++
  4.2309 ++	if (!pskb_may_pull(skb, thlen))
  4.2310 ++		goto out;
  4.2311 ++
  4.2312 ++	segs = NULL;
  4.2313 ++	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
  4.2314 ++		goto out;
  4.2315 ++
  4.2316 ++	oldlen = (u16)~skb->len;
  4.2317 ++	__skb_pull(skb, thlen);
  4.2318 ++
  4.2319 ++	segs = skb_segment(skb, features);
  4.2320 ++	if (IS_ERR(segs))
  4.2321 ++		goto out;
  4.2322 ++
  4.2323 ++	len = skb_shinfo(skb)->gso_size;
  4.2324 ++	delta = htonl(oldlen + (thlen + len));
  4.2325 ++
  4.2326 ++	skb = segs;
  4.2327 ++	th = skb->h.th;
  4.2328 ++	seq = ntohl(th->seq);
  4.2329 ++
  4.2330 ++	do {
  4.2331 ++		th->fin = th->psh = 0;
  4.2332 ++
  4.2333 ++		th->check = ~csum_fold(th->check + delta);
  4.2334 ++		if (skb->ip_summed != CHECKSUM_HW)
  4.2335 ++			th->check = csum_fold(csum_partial(skb->h.raw, thlen,
  4.2336 ++							   skb->csum));
  4.2337 ++
  4.2338 ++		seq += len;
  4.2339 ++		skb = skb->next;
  4.2340 ++		th = skb->h.th;
  4.2341 ++
  4.2342 ++		th->seq = htonl(seq);
  4.2343 ++		th->cwr = 0;
  4.2344 ++	} while (skb->next);
  4.2345 ++
  4.2346 ++	delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
  4.2347 ++	th->check = ~csum_fold(th->check + delta);
  4.2348 ++	if (skb->ip_summed != CHECKSUM_HW)
  4.2349 ++		th->check = csum_fold(csum_partial(skb->h.raw, thlen,
  4.2350 ++						   skb->csum));
  4.2351 ++
  4.2352 ++out:
  4.2353 ++	return segs;
  4.2354 ++}
  4.2355 ++
  4.2356 + extern void __skb_cb_too_small_for_tcp(int, int);
  4.2357 + extern struct tcp_congestion_ops tcp_reno;
  4.2358 + 
  4.2359 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
  4.2360 +index e9a54ae..defe77a 100644
  4.2361 +--- a/net/ipv4/tcp_input.c
  4.2362 ++++ b/net/ipv4/tcp_input.c
  4.2363 +@@ -1072,7 +1072,7 @@ tcp_sacktag_write_queue(struct sock *sk,
  4.2364 + 				else
  4.2365 + 					pkt_len = (end_seq -
  4.2366 + 						   TCP_SKB_CB(skb)->seq);
  4.2367 +-				if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
  4.2368 ++				if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
  4.2369 + 					break;
  4.2370 + 				pcount = tcp_skb_pcount(skb);
  4.2371 + 			}
  4.2372 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
  4.2373 +index 310f2e6..ee01f69 100644
  4.2374 +--- a/net/ipv4/tcp_output.c
  4.2375 ++++ b/net/ipv4/tcp_output.c
  4.2376 +@@ -497,15 +497,17 @@ static void tcp_set_skb_tso_segs(struct 
  4.2377 + 		/* Avoid the costly divide in the normal
  4.2378 + 		 * non-TSO case.
  4.2379 + 		 */
  4.2380 +-		skb_shinfo(skb)->tso_segs = 1;
  4.2381 +-		skb_shinfo(skb)->tso_size = 0;
  4.2382 ++		skb_shinfo(skb)->gso_segs = 1;
  4.2383 ++		skb_shinfo(skb)->gso_size = 0;
  4.2384 ++		skb_shinfo(skb)->gso_type = 0;
  4.2385 + 	} else {
  4.2386 + 		unsigned int factor;
  4.2387 + 
  4.2388 + 		factor = skb->len + (mss_now - 1);
  4.2389 + 		factor /= mss_now;
  4.2390 +-		skb_shinfo(skb)->tso_segs = factor;
  4.2391 +-		skb_shinfo(skb)->tso_size = mss_now;
  4.2392 ++		skb_shinfo(skb)->gso_segs = factor;
  4.2393 ++		skb_shinfo(skb)->gso_size = mss_now;
  4.2394 ++		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  4.2395 + 	}
  4.2396 + }
  4.2397 + 
  4.2398 +@@ -850,7 +852,7 @@ static int tcp_init_tso_segs(struct sock
  4.2399 + 
  4.2400 + 	if (!tso_segs ||
  4.2401 + 	    (tso_segs > 1 &&
  4.2402 +-	     skb_shinfo(skb)->tso_size != mss_now)) {
  4.2403 ++	     tcp_skb_mss(skb) != mss_now)) {
  4.2404 + 		tcp_set_skb_tso_segs(sk, skb, mss_now);
  4.2405 + 		tso_segs = tcp_skb_pcount(skb);
  4.2406 + 	}
  4.2407 +@@ -1510,8 +1512,9 @@ int tcp_retransmit_skb(struct sock *sk, 
  4.2408 + 	   tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
  4.2409 + 		if (!pskb_trim(skb, 0)) {
  4.2410 + 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
  4.2411 +-			skb_shinfo(skb)->tso_segs = 1;
  4.2412 +-			skb_shinfo(skb)->tso_size = 0;
  4.2413 ++			skb_shinfo(skb)->gso_segs = 1;
  4.2414 ++			skb_shinfo(skb)->gso_size = 0;
  4.2415 ++			skb_shinfo(skb)->gso_type = 0;
  4.2416 + 			skb->ip_summed = CHECKSUM_NONE;
  4.2417 + 			skb->csum = 0;
  4.2418 + 		}
  4.2419 +@@ -1716,8 +1719,9 @@ void tcp_send_fin(struct sock *sk)
  4.2420 + 		skb->csum = 0;
  4.2421 + 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
  4.2422 + 		TCP_SKB_CB(skb)->sacked = 0;
  4.2423 +-		skb_shinfo(skb)->tso_segs = 1;
  4.2424 +-		skb_shinfo(skb)->tso_size = 0;
  4.2425 ++		skb_shinfo(skb)->gso_segs = 1;
  4.2426 ++		skb_shinfo(skb)->gso_size = 0;
  4.2427 ++		skb_shinfo(skb)->gso_type = 0;
  4.2428 + 
  4.2429 + 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
  4.2430 + 		TCP_SKB_CB(skb)->seq = tp->write_seq;
  4.2431 +@@ -1749,8 +1753,9 @@ void tcp_send_active_reset(struct sock *
  4.2432 + 	skb->csum = 0;
  4.2433 + 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
  4.2434 + 	TCP_SKB_CB(skb)->sacked = 0;
  4.2435 +-	skb_shinfo(skb)->tso_segs = 1;
  4.2436 +-	skb_shinfo(skb)->tso_size = 0;
  4.2437 ++	skb_shinfo(skb)->gso_segs = 1;
  4.2438 ++	skb_shinfo(skb)->gso_size = 0;
  4.2439 ++	skb_shinfo(skb)->gso_type = 0;
  4.2440 + 
  4.2441 + 	/* Send it off. */
  4.2442 + 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
  4.2443 +@@ -1833,8 +1838,9 @@ struct sk_buff * tcp_make_synack(struct 
  4.2444 + 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
  4.2445 + 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
  4.2446 + 	TCP_SKB_CB(skb)->sacked = 0;
  4.2447 +-	skb_shinfo(skb)->tso_segs = 1;
  4.2448 +-	skb_shinfo(skb)->tso_size = 0;
  4.2449 ++	skb_shinfo(skb)->gso_segs = 1;
  4.2450 ++	skb_shinfo(skb)->gso_size = 0;
  4.2451 ++	skb_shinfo(skb)->gso_type = 0;
  4.2452 + 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
  4.2453 + 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
  4.2454 + 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
  4.2455 +@@ -1937,8 +1943,9 @@ int tcp_connect(struct sock *sk)
  4.2456 + 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
  4.2457 + 	TCP_ECN_send_syn(sk, tp, buff);
  4.2458 + 	TCP_SKB_CB(buff)->sacked = 0;
  4.2459 +-	skb_shinfo(buff)->tso_segs = 1;
  4.2460 +-	skb_shinfo(buff)->tso_size = 0;
  4.2461 ++	skb_shinfo(buff)->gso_segs = 1;
  4.2462 ++	skb_shinfo(buff)->gso_size = 0;
  4.2463 ++	skb_shinfo(buff)->gso_type = 0;
  4.2464 + 	buff->csum = 0;
  4.2465 + 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
  4.2466 + 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
  4.2467 +@@ -2042,8 +2049,9 @@ void tcp_send_ack(struct sock *sk)
  4.2468 + 		buff->csum = 0;
  4.2469 + 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
  4.2470 + 		TCP_SKB_CB(buff)->sacked = 0;
  4.2471 +-		skb_shinfo(buff)->tso_segs = 1;
  4.2472 +-		skb_shinfo(buff)->tso_size = 0;
  4.2473 ++		skb_shinfo(buff)->gso_segs = 1;
  4.2474 ++		skb_shinfo(buff)->gso_size = 0;
  4.2475 ++		skb_shinfo(buff)->gso_type = 0;
  4.2476 + 
  4.2477 + 		/* Send it off, this clears delayed acks for us. */
  4.2478 + 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
  4.2479 +@@ -2078,8 +2086,9 @@ static int tcp_xmit_probe_skb(struct soc
  4.2480 + 	skb->csum = 0;
  4.2481 + 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
  4.2482 + 	TCP_SKB_CB(skb)->sacked = urgent;
  4.2483 +-	skb_shinfo(skb)->tso_segs = 1;
  4.2484 +-	skb_shinfo(skb)->tso_size = 0;
  4.2485 ++	skb_shinfo(skb)->gso_segs = 1;
  4.2486 ++	skb_shinfo(skb)->gso_size = 0;
  4.2487 ++	skb_shinfo(skb)->gso_type = 0;
  4.2488 + 
  4.2489 + 	/* Use a previous sequence.  This should cause the other
  4.2490 + 	 * end to send an ack.  Don't queue or clone SKB, just
  4.2491 +diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
  4.2492 +index 32ad229..737c1db 100644
  4.2493 +--- a/net/ipv4/xfrm4_output.c
  4.2494 ++++ b/net/ipv4/xfrm4_output.c
  4.2495 +@@ -9,6 +9,8 @@
  4.2496 +  */
  4.2497 + 
  4.2498 + #include <linux/compiler.h>
  4.2499 ++#include <linux/if_ether.h>
  4.2500 ++#include <linux/kernel.h>
  4.2501 + #include <linux/skbuff.h>
  4.2502 + #include <linux/spinlock.h>
  4.2503 + #include <linux/netfilter_ipv4.h>
  4.2504 +@@ -152,16 +154,10 @@ error_nolock:
  4.2505 + 	goto out_exit;
  4.2506 + }
  4.2507 + 
  4.2508 +-static int xfrm4_output_finish(struct sk_buff *skb)
  4.2509 ++static int xfrm4_output_finish2(struct sk_buff *skb)
  4.2510 + {
  4.2511 + 	int err;
  4.2512 + 
  4.2513 +-#ifdef CONFIG_NETFILTER
  4.2514 +-	if (!skb->dst->xfrm) {
  4.2515 +-		IPCB(skb)->flags |= IPSKB_REROUTED;
  4.2516 +-		return dst_output(skb);
  4.2517 +-	}
  4.2518 +-#endif
  4.2519 + 	while (likely((err = xfrm4_output_one(skb)) == 0)) {
  4.2520 + 		nf_reset(skb);
  4.2521 + 
  4.2522 +@@ -174,7 +170,7 @@ #endif
  4.2523 + 			return dst_output(skb);
  4.2524 + 
  4.2525 + 		err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL,
  4.2526 +-			      skb->dst->dev, xfrm4_output_finish);
  4.2527 ++			      skb->dst->dev, xfrm4_output_finish2);
  4.2528 + 		if (unlikely(err != 1))
  4.2529 + 			break;
  4.2530 + 	}
  4.2531 +@@ -182,6 +178,48 @@ #endif
  4.2532 + 	return err;
  4.2533 + }
  4.2534 + 
  4.2535 ++static int xfrm4_output_finish(struct sk_buff *skb)
  4.2536 ++{
  4.2537 ++	struct sk_buff *segs;
  4.2538 ++
  4.2539 ++#ifdef CONFIG_NETFILTER
  4.2540 ++	if (!skb->dst->xfrm) {
  4.2541 ++		IPCB(skb)->flags |= IPSKB_REROUTED;
  4.2542 ++		return dst_output(skb);
  4.2543 ++	}
  4.2544 ++#endif
  4.2545 ++
  4.2546 ++	if (!skb_shinfo(skb)->gso_size)
  4.2547 ++		return xfrm4_output_finish2(skb);
  4.2548 ++
  4.2549 ++	skb->protocol = htons(ETH_P_IP);
  4.2550 ++	segs = skb_gso_segment(skb, 0);
  4.2551 ++	kfree_skb(skb);
  4.2552 ++	if (unlikely(IS_ERR(segs)))
  4.2553 ++		return PTR_ERR(segs);
  4.2554 ++
  4.2555 ++	do {
  4.2556 ++		struct sk_buff *nskb = segs->next;
  4.2557 ++		int err;
  4.2558 ++
  4.2559 ++		segs->next = NULL;
  4.2560 ++		err = xfrm4_output_finish2(segs);
  4.2561 ++
  4.2562 ++		if (unlikely(err)) {
  4.2563 ++			while ((segs = nskb)) {
  4.2564 ++				nskb = segs->next;
  4.2565 ++				segs->next = NULL;
  4.2566 ++				kfree_skb(segs);
  4.2567 ++			}
  4.2568 ++			return err;
  4.2569 ++		}
  4.2570 ++
  4.2571 ++		segs = nskb;
  4.2572 ++	} while (segs);
  4.2573 ++
  4.2574 ++	return 0;
  4.2575 ++}
  4.2576 ++
  4.2577 + int xfrm4_output(struct sk_buff *skb)
  4.2578 + {
  4.2579 + 	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
  4.2580 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
  4.2581 +index 5bf70b1..cf5d17e 100644
  4.2582 +--- a/net/ipv6/ip6_output.c
  4.2583 ++++ b/net/ipv6/ip6_output.c
  4.2584 +@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s
  4.2585 + 
  4.2586 + int ip6_output(struct sk_buff *skb)
  4.2587 + {
  4.2588 +-	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) ||
  4.2589 ++	if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
  4.2590 + 				dst_allfrag(skb->dst))
  4.2591 + 		return ip6_fragment(skb, ip6_output2);
  4.2592 + 	else
  4.2593 +@@ -829,8 +829,9 @@ static inline int ip6_ufo_append_data(st
  4.2594 + 		struct frag_hdr fhdr;
  4.2595 + 
  4.2596 + 		/* specify the length of each IP datagram fragment*/
  4.2597 +-		skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) - 
  4.2598 +-						sizeof(struct frag_hdr);
  4.2599 ++		skb_shinfo(skb)->gso_size = mtu - fragheaderlen - 
  4.2600 ++					    sizeof(struct frag_hdr);
  4.2601 ++		skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
  4.2602 + 		ipv6_select_ident(skb, &fhdr);
  4.2603 + 		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  4.2604 + 		__skb_queue_tail(&sk->sk_write_queue, skb);
  4.2605 +diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
  4.2606 +index d511a88..ef56d5d 100644
  4.2607 +--- a/net/ipv6/ipcomp6.c
  4.2608 ++++ b/net/ipv6/ipcomp6.c
  4.2609 +@@ -64,7 +64,7 @@ static LIST_HEAD(ipcomp6_tfms_list);
  4.2610 + 
  4.2611 + static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
  4.2612 + {
  4.2613 +-	int err = 0;
  4.2614 ++	int err = -ENOMEM;
  4.2615 + 	u8 nexthdr = 0;
  4.2616 + 	int hdr_len = skb->h.raw - skb->nh.raw;
  4.2617 + 	unsigned char *tmp_hdr = NULL;
  4.2618 +@@ -75,11 +75,8 @@ static int ipcomp6_input(struct xfrm_sta
  4.2619 + 	struct crypto_tfm *tfm;
  4.2620 + 	int cpu;
  4.2621 + 
  4.2622 +-	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  4.2623 +-		skb_linearize(skb, GFP_ATOMIC) != 0) {
  4.2624 +-		err = -ENOMEM;
  4.2625 ++	if (skb_linearize_cow(skb))
  4.2626 + 		goto out;
  4.2627 +-	}
  4.2628 + 
  4.2629 + 	skb->ip_summed = CHECKSUM_NONE;
  4.2630 + 
  4.2631 +@@ -158,10 +155,8 @@ static int ipcomp6_output(struct xfrm_st
  4.2632 + 		goto out_ok;
  4.2633 + 	}
  4.2634 + 
  4.2635 +-	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
  4.2636 +-		skb_linearize(skb, GFP_ATOMIC) != 0) {
  4.2637 ++	if (skb_linearize_cow(skb))
  4.2638 + 		goto out_ok;
  4.2639 +-	}
  4.2640 + 
  4.2641 + 	/* compression */
  4.2642 + 	plen = skb->len - hdr_len;
  4.2643 +diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
  4.2644 +index 8024217..39bdeec 100644
  4.2645 +--- a/net/ipv6/xfrm6_output.c
  4.2646 ++++ b/net/ipv6/xfrm6_output.c
  4.2647 +@@ -151,7 +151,7 @@ error_nolock:
  4.2648 + 	goto out_exit;
  4.2649 + }
  4.2650 + 
  4.2651 +-static int xfrm6_output_finish(struct sk_buff *skb)
  4.2652 ++static int xfrm6_output_finish2(struct sk_buff *skb)
  4.2653 + {
  4.2654 + 	int err;
  4.2655 + 
  4.2656 +@@ -167,7 +167,7 @@ static int xfrm6_output_finish(struct sk
  4.2657 + 			return dst_output(skb);
  4.2658 + 
  4.2659 + 		err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL,
  4.2660 +-			      skb->dst->dev, xfrm6_output_finish);
  4.2661 ++			      skb->dst->dev, xfrm6_output_finish2);
  4.2662 + 		if (unlikely(err != 1))
  4.2663 + 			break;
  4.2664 + 	}
  4.2665 +@@ -175,6 +175,41 @@ static int xfrm6_output_finish(struct sk
  4.2666 + 	return err;
  4.2667 + }
  4.2668 + 
  4.2669 ++static int xfrm6_output_finish(struct sk_buff *skb)
  4.2670 ++{
  4.2671 ++	struct sk_buff *segs;
  4.2672 ++
  4.2673 ++	if (!skb_shinfo(skb)->gso_size)
  4.2674 ++		return xfrm6_output_finish2(skb);
  4.2675 ++
  4.2676 ++	skb->protocol = htons(ETH_P_IP);
  4.2677 ++	segs = skb_gso_segment(skb, 0);
  4.2678 ++	kfree_skb(skb);
  4.2679 ++	if (unlikely(IS_ERR(segs)))
  4.2680 ++		return PTR_ERR(segs);
  4.2681 ++
  4.2682 ++	do {
  4.2683 ++		struct sk_buff *nskb = segs->next;
  4.2684 ++		int err;
  4.2685 ++
  4.2686 ++		segs->next = NULL;
  4.2687 ++		err = xfrm6_output_finish2(segs);
  4.2688 ++
  4.2689 ++		if (unlikely(err)) {
  4.2690 ++			while ((segs = nskb)) {
  4.2691 ++				nskb = segs->next;
  4.2692 ++				segs->next = NULL;
  4.2693 ++				kfree_skb(segs);
  4.2694 ++			}
  4.2695 ++			return err;
  4.2696 ++		}
  4.2697 ++
  4.2698 ++		segs = nskb;
  4.2699 ++	} while (segs);
  4.2700 ++
  4.2701 ++	return 0;
  4.2702 ++}
  4.2703 ++
  4.2704 + int xfrm6_output(struct sk_buff *skb)
  4.2705 + {
  4.2706 + 	return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dst->dev,
  4.2707 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
  4.2708 +index 99ceb91..28c9efd 100644
  4.2709 +--- a/net/sched/sch_generic.c
  4.2710 ++++ b/net/sched/sch_generic.c
  4.2711 +@@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device
  4.2712 +    dev->queue_lock serializes queue accesses for this device
  4.2713 +    AND dev->qdisc pointer itself.
  4.2714 + 
  4.2715 +-   dev->xmit_lock serializes accesses to device driver.
  4.2716 ++   netif_tx_lock serializes accesses to device driver.
  4.2717 + 
  4.2718 +-   dev->queue_lock and dev->xmit_lock are mutually exclusive,
  4.2719 ++   dev->queue_lock and netif_tx_lock are mutually exclusive,
  4.2720 +    if one is grabbed, another must be free.
  4.2721 +  */
  4.2722 + 
  4.2723 +@@ -90,14 +90,17 @@ void qdisc_unlock_tree(struct net_device
  4.2724 +    NOTE: Called under dev->queue_lock with locally disabled BH.
  4.2725 + */
  4.2726 + 
  4.2727 +-int qdisc_restart(struct net_device *dev)
  4.2728 ++static inline int qdisc_restart(struct net_device *dev)
  4.2729 + {
  4.2730 + 	struct Qdisc *q = dev->qdisc;
  4.2731 + 	struct sk_buff *skb;
  4.2732 + 
  4.2733 + 	/* Dequeue packet */
  4.2734 +-	if ((skb = q->dequeue(q)) != NULL) {
  4.2735 ++	if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
  4.2736 + 		unsigned nolock = (dev->features & NETIF_F_LLTX);
  4.2737 ++
  4.2738 ++		dev->gso_skb = NULL;
  4.2739 ++
  4.2740 + 		/*
  4.2741 + 		 * When the driver has LLTX set it does its own locking
  4.2742 + 		 * in start_xmit. No need to add additional overhead by
  4.2743 +@@ -108,7 +111,7 @@ int qdisc_restart(struct net_device *dev
  4.2744 + 		 * will be requeued.
  4.2745 + 		 */
  4.2746 + 		if (!nolock) {
  4.2747 +-			if (!spin_trylock(&dev->xmit_lock)) {
  4.2748 ++			if (!netif_tx_trylock(dev)) {
  4.2749 + 			collision:
  4.2750 + 				/* So, someone grabbed the driver. */
  4.2751 + 				
  4.2752 +@@ -126,8 +129,6 @@ int qdisc_restart(struct net_device *dev
  4.2753 + 				__get_cpu_var(netdev_rx_stat).cpu_collision++;
  4.2754 + 				goto requeue;
  4.2755 + 			}
  4.2756 +-			/* Remember that the driver is grabbed by us. */
  4.2757 +-			dev->xmit_lock_owner = smp_processor_id();
  4.2758 + 		}
  4.2759 + 		
  4.2760 + 		{
  4.2761 +@@ -136,14 +137,11 @@ int qdisc_restart(struct net_device *dev
  4.2762 + 
  4.2763 + 			if (!netif_queue_stopped(dev)) {
  4.2764 + 				int ret;
  4.2765 +-				if (netdev_nit)
  4.2766 +-					dev_queue_xmit_nit(skb, dev);
  4.2767 + 
  4.2768 +-				ret = dev->hard_start_xmit(skb, dev);
  4.2769 ++				ret = dev_hard_start_xmit(skb, dev);
  4.2770 + 				if (ret == NETDEV_TX_OK) { 
  4.2771 + 					if (!nolock) {
  4.2772 +-						dev->xmit_lock_owner = -1;
  4.2773 +-						spin_unlock(&dev->xmit_lock);
  4.2774 ++						netif_tx_unlock(dev);
  4.2775 + 					}
  4.2776 + 					spin_lock(&dev->queue_lock);
  4.2777 + 					return -1;
  4.2778 +@@ -157,8 +155,7 @@ int qdisc_restart(struct net_device *dev
  4.2779 + 			/* NETDEV_TX_BUSY - we need to requeue */
  4.2780 + 			/* Release the driver */
  4.2781 + 			if (!nolock) { 
  4.2782 +-				dev->xmit_lock_owner = -1;
  4.2783 +-				spin_unlock(&dev->xmit_lock);
  4.2784 ++				netif_tx_unlock(dev);
  4.2785 + 			} 
  4.2786 + 			spin_lock(&dev->queue_lock);
  4.2787 + 			q = dev->qdisc;
  4.2788 +@@ -175,7 +172,10 @@ int qdisc_restart(struct net_device *dev
  4.2789 + 		 */
  4.2790 + 
  4.2791 + requeue:
  4.2792 +-		q->ops->requeue(skb, q);
  4.2793 ++		if (skb->next)
  4.2794 ++			dev->gso_skb = skb;
  4.2795 ++		else
  4.2796 ++			q->ops->requeue(skb, q);
  4.2797 + 		netif_schedule(dev);
  4.2798 + 		return 1;
  4.2799 + 	}
  4.2800 +@@ -183,11 +183,23 @@ requeue:
  4.2801 + 	return q->q.qlen;
  4.2802 + }
  4.2803 + 
  4.2804 ++void __qdisc_run(struct net_device *dev)
  4.2805 ++{
  4.2806 ++	if (unlikely(dev->qdisc == &noop_qdisc))
  4.2807 ++		goto out;
  4.2808 ++
  4.2809 ++	while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
  4.2810 ++		/* NOTHING */;
  4.2811 ++
  4.2812 ++out:
  4.2813 ++	clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
  4.2814 ++}
  4.2815 ++
  4.2816 + static void dev_watchdog(unsigned long arg)
  4.2817 + {
  4.2818 + 	struct net_device *dev = (struct net_device *)arg;
  4.2819 + 
  4.2820 +-	spin_lock(&dev->xmit_lock);
  4.2821 ++	netif_tx_lock(dev);
  4.2822 + 	if (dev->qdisc != &noop_qdisc) {
  4.2823 + 		if (netif_device_present(dev) &&
  4.2824 + 		    netif_running(dev) &&
  4.2825 +@@ -201,7 +213,7 @@ static void dev_watchdog(unsigned long a
  4.2826 + 				dev_hold(dev);
  4.2827 + 		}
  4.2828 + 	}
  4.2829 +-	spin_unlock(&dev->xmit_lock);
  4.2830 ++	netif_tx_unlock(dev);
  4.2831 + 
  4.2832 + 	dev_put(dev);
  4.2833 + }
  4.2834 +@@ -225,17 +237,17 @@ void __netdev_watchdog_up(struct net_dev
  4.2835 + 
  4.2836 + static void dev_watchdog_up(struct net_device *dev)
  4.2837 + {
  4.2838 +-	spin_lock_bh(&dev->xmit_lock);
  4.2839 ++	netif_tx_lock_bh(dev);
  4.2840 + 	__netdev_watchdog_up(dev);
  4.2841 +-	spin_unlock_bh(&dev->xmit_lock);
  4.2842 ++	netif_tx_unlock_bh(dev);
  4.2843 + }
  4.2844 + 
  4.2845 + static void dev_watchdog_down(struct net_device *dev)
  4.2846 + {
  4.2847 +-	spin_lock_bh(&dev->xmit_lock);
  4.2848 ++	netif_tx_lock_bh(dev);
  4.2849 + 	if (del_timer(&dev->watchdog_timer))
  4.2850 + 		__dev_put(dev);
  4.2851 +-	spin_unlock_bh(&dev->xmit_lock);
  4.2852 ++	netif_tx_unlock_bh(dev);
  4.2853 + }
  4.2854 + 
  4.2855 + void netif_carrier_on(struct net_device *dev)
  4.2856 +@@ -577,10 +589,17 @@ void dev_deactivate(struct net_device *d
  4.2857 + 
  4.2858 + 	dev_watchdog_down(dev);
  4.2859 + 
  4.2860 +-	while (test_bit(__LINK_STATE_SCHED, &dev->state))
  4.2861 ++	/* Wait for outstanding dev_queue_xmit calls. */
  4.2862 ++	synchronize_rcu();
  4.2863 ++
  4.2864 ++	/* Wait for outstanding qdisc_run calls. */
  4.2865 ++	while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
  4.2866 + 		yield();
  4.2867 + 
  4.2868 +-	spin_unlock_wait(&dev->xmit_lock);
  4.2869 ++	if (dev->gso_skb) {
  4.2870 ++		kfree_skb(dev->gso_skb);
  4.2871 ++		dev->gso_skb = NULL;
  4.2872 ++	}
  4.2873 + }
  4.2874 + 
  4.2875 + void dev_init_scheduler(struct net_device *dev)
  4.2876 +@@ -622,6 +641,5 @@ EXPORT_SYMBOL(qdisc_create_dflt);
  4.2877 + EXPORT_SYMBOL(qdisc_alloc);
  4.2878 + EXPORT_SYMBOL(qdisc_destroy);
  4.2879 + EXPORT_SYMBOL(qdisc_reset);
  4.2880 +-EXPORT_SYMBOL(qdisc_restart);
  4.2881 + EXPORT_SYMBOL(qdisc_lock_tree);
  4.2882 + EXPORT_SYMBOL(qdisc_unlock_tree);
  4.2883 +diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
  4.2884 +index 79b8ef3..4c16ad5 100644
  4.2885 +--- a/net/sched/sch_teql.c
  4.2886 ++++ b/net/sched/sch_teql.c
  4.2887 +@@ -302,20 +302,17 @@ restart:
  4.2888 + 
  4.2889 + 		switch (teql_resolve(skb, skb_res, slave)) {
  4.2890 + 		case 0:
  4.2891 +-			if (spin_trylock(&slave->xmit_lock)) {
  4.2892 +-				slave->xmit_lock_owner = smp_processor_id();
  4.2893 ++			if (netif_tx_trylock(slave)) {
  4.2894 + 				if (!netif_queue_stopped(slave) &&
  4.2895 + 				    slave->hard_start_xmit(skb, slave) == 0) {
  4.2896 +-					slave->xmit_lock_owner = -1;
  4.2897 +-					spin_unlock(&slave->xmit_lock);
  4.2898 ++					netif_tx_unlock(slave);
  4.2899 + 					master->slaves = NEXT_SLAVE(q);
  4.2900 + 					netif_wake_queue(dev);
  4.2901 + 					master->stats.tx_packets++;
  4.2902 + 					master->stats.tx_bytes += len;
  4.2903 + 					return 0;
  4.2904 + 				}
  4.2905 +-				slave->xmit_lock_owner = -1;
  4.2906 +-				spin_unlock(&slave->xmit_lock);
  4.2907 ++				netif_tx_unlock(slave);
  4.2908 + 			}
  4.2909 + 			if (netif_queue_stopped(dev))
  4.2910 + 				busy = 1;