direct-io.hg
changeset 10720:d48322cddd87
Split networking GSO patch into base portion plus additions.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Tue Jul 25 15:06:39 2006 +0100 (2006-07-25) |
parents | b8b7c278ca4c |
children | f021b091c559 |
files | patches/linux-2.6.16.13/net-gso-0-base.patch patches/linux-2.6.16.13/net-gso-1-check-dodgy.patch patches/linux-2.6.16.13/net-gso-2-checksum-fix.patch patches/linux-2.6.16.13/net-gso.patch |
line diff
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/patches/linux-2.6.16.13/net-gso-0-base.patch Tue Jul 25 15:06:39 2006 +0100 1.3 @@ -0,0 +1,2907 @@ 1.4 +diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt 1.5 +index 3c0a5ba..847cedb 100644 1.6 +--- a/Documentation/networking/netdevices.txt 1.7 ++++ b/Documentation/networking/netdevices.txt 1.8 +@@ -42,9 +42,9 @@ dev->get_stats: 1.9 + Context: nominally process, but don't sleep inside an rwlock 1.10 + 1.11 + dev->hard_start_xmit: 1.12 +- Synchronization: dev->xmit_lock spinlock. 1.13 ++ Synchronization: netif_tx_lock spinlock. 1.14 + When the driver sets NETIF_F_LLTX in dev->features this will be 1.15 +- called without holding xmit_lock. In this case the driver 1.16 ++ called without holding netif_tx_lock. In this case the driver 1.17 + has to lock by itself when needed. It is recommended to use a try lock 1.18 + for this and return -1 when the spin lock fails. 1.19 + The locking there should also properly protect against 1.20 +@@ -62,12 +62,12 @@ dev->hard_start_xmit: 1.21 + Only valid when NETIF_F_LLTX is set. 1.22 + 1.23 + dev->tx_timeout: 1.24 +- Synchronization: dev->xmit_lock spinlock. 1.25 ++ Synchronization: netif_tx_lock spinlock. 1.26 + Context: BHs disabled 1.27 + Notes: netif_queue_stopped() is guaranteed true 1.28 + 1.29 + dev->set_multicast_list: 1.30 +- Synchronization: dev->xmit_lock spinlock. 1.31 ++ Synchronization: netif_tx_lock spinlock. 1.32 + Context: BHs disabled 1.33 + 1.34 + dev->poll: 1.35 +diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c 1.36 +index 4be9769..2e7cac7 100644 1.37 +--- a/drivers/block/aoe/aoenet.c 1.38 ++++ b/drivers/block/aoe/aoenet.c 1.39 +@@ -95,9 +95,8 @@ mac_addr(char addr[6]) 1.40 + static struct sk_buff * 1.41 + skb_check(struct sk_buff *skb) 1.42 + { 1.43 +- if (skb_is_nonlinear(skb)) 1.44 + if ((skb = skb_share_check(skb, GFP_ATOMIC))) 1.45 +- if (skb_linearize(skb, GFP_ATOMIC) < 0) { 1.46 ++ if (skb_linearize(skb)) { 1.47 + dev_kfree_skb(skb); 1.48 + return NULL; 1.49 + } 1.50 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 1.51 +index a2408d7..c90e620 100644 1.52 +--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 1.53 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 1.54 +@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_ 1.55 + 1.56 + ipoib_mcast_stop_thread(dev, 0); 1.57 + 1.58 +- spin_lock_irqsave(&dev->xmit_lock, flags); 1.59 ++ local_irq_save(flags); 1.60 ++ netif_tx_lock(dev); 1.61 + spin_lock(&priv->lock); 1.62 + 1.63 + /* 1.64 +@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_ 1.65 + } 1.66 + 1.67 + spin_unlock(&priv->lock); 1.68 +- spin_unlock_irqrestore(&dev->xmit_lock, flags); 1.69 ++ netif_tx_unlock(dev); 1.70 ++ local_irq_restore(flags); 1.71 + 1.72 + /* We have to cancel outside of the spinlock */ 1.73 + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 1.74 +diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c 1.75 +index 6711eb6..8d2351f 100644 1.76 +--- a/drivers/media/dvb/dvb-core/dvb_net.c 1.77 ++++ b/drivers/media/dvb/dvb-core/dvb_net.c 1.78 +@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void 1.79 + 1.80 + dvb_net_feed_stop(dev); 1.81 + priv->rx_mode = RX_MODE_UNI; 1.82 +- spin_lock_bh(&dev->xmit_lock); 1.83 ++ netif_tx_lock_bh(dev); 1.84 + 1.85 + if (dev->flags & IFF_PROMISC) { 1.86 + dprintk("%s: promiscuous mode\n", dev->name); 1.87 +@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void 1.88 + } 1.89 + } 1.90 + 1.91 +- spin_unlock_bh(&dev->xmit_lock); 1.92 ++ netif_tx_unlock_bh(dev); 1.93 + dvb_net_feed_start(dev); 1.94 + } 1.95 + 1.96 +diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c 1.97 +index dd41049..6615583 100644 1.98 +--- a/drivers/net/8139cp.c 1.99 ++++ b/drivers/net/8139cp.c 1.100 +@@ -794,7 +794,7 @@ #endif 1.101 + entry = cp->tx_head; 1.102 + eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 1.103 + if (dev->features & NETIF_F_TSO) 1.104 +- mss = skb_shinfo(skb)->tso_size; 1.105 ++ mss = skb_shinfo(skb)->gso_size; 1.106 + 1.107 + if (skb_shinfo(skb)->nr_frags == 0) { 1.108 + struct cp_desc *txd = &cp->tx_ring[entry]; 1.109 +diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c 1.110 +index a24200d..b5e39a1 100644 1.111 +--- a/drivers/net/bnx2.c 1.112 ++++ b/drivers/net/bnx2.c 1.113 +@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp) 1.114 + skb = tx_buf->skb; 1.115 + #ifdef BCM_TSO 1.116 + /* partial BD completions possible with TSO packets */ 1.117 +- if (skb_shinfo(skb)->tso_size) { 1.118 ++ if (skb_shinfo(skb)->gso_size) { 1.119 + u16 last_idx, last_ring_idx; 1.120 + 1.121 + last_idx = sw_cons + 1.122 +@@ -1948,7 +1948,7 @@ bnx2_poll(struct net_device *dev, int *b 1.123 + return 1; 1.124 + } 1.125 + 1.126 +-/* Called with rtnl_lock from vlan functions and also dev->xmit_lock 1.127 ++/* Called with rtnl_lock from vlan functions and also netif_tx_lock 1.128 + * from set_multicast. 1.129 + */ 1.130 + static void 1.131 +@@ -4403,7 +4403,7 @@ bnx2_vlan_rx_kill_vid(struct net_device 1.132 + } 1.133 + #endif 1.134 + 1.135 +-/* Called with dev->xmit_lock. 1.136 ++/* Called with netif_tx_lock. 1.137 + * hard_start_xmit is pseudo-lockless - a lock is only required when 1.138 + * the tx queue is full. This way, we get the benefit of lockless 1.139 + * operations most of the time without the complexities to handle 1.140 +@@ -4441,7 +4441,7 @@ bnx2_start_xmit(struct sk_buff *skb, str 1.141 + (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); 1.142 + } 1.143 + #ifdef BCM_TSO 1.144 +- if ((mss = skb_shinfo(skb)->tso_size) && 1.145 ++ if ((mss = skb_shinfo(skb)->gso_size) && 1.146 + (skb->len > (bp->dev->mtu + ETH_HLEN))) { 1.147 + u32 tcp_opt_len, ip_tcp_len; 1.148 + 1.149 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c 1.150 +index bcf9f17..e970921 100644 1.151 +--- a/drivers/net/bonding/bond_main.c 1.152 ++++ b/drivers/net/bonding/bond_main.c 1.153 +@@ -1145,8 +1145,7 @@ int bond_sethwaddr(struct net_device *bo 1.154 + } 1.155 + 1.156 + #define BOND_INTERSECT_FEATURES \ 1.157 +- (NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\ 1.158 +- NETIF_F_TSO|NETIF_F_UFO) 1.159 ++ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO) 1.160 + 1.161 + /* 1.162 + * Compute the common dev->feature set available to all slaves. Some 1.163 +@@ -1164,9 +1163,7 @@ static int bond_compute_features(struct 1.164 + features &= (slave->dev->features & BOND_INTERSECT_FEATURES); 1.165 + 1.166 + if ((features & NETIF_F_SG) && 1.167 +- !(features & (NETIF_F_IP_CSUM | 1.168 +- NETIF_F_NO_CSUM | 1.169 +- NETIF_F_HW_CSUM))) 1.170 ++ !(features & NETIF_F_ALL_CSUM)) 1.171 + features &= ~NETIF_F_SG; 1.172 + 1.173 + /* 1.174 +@@ -4147,7 +4144,7 @@ static int bond_init(struct net_device * 1.175 + */ 1.176 + bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1.177 + 1.178 +- /* don't acquire bond device's xmit_lock when 1.179 ++ /* don't acquire bond device's netif_tx_lock when 1.180 + * transmitting */ 1.181 + bond_dev->features |= NETIF_F_LLTX; 1.182 + 1.183 +diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c 1.184 +index 30ff8ea..7b7d360 100644 1.185 +--- a/drivers/net/chelsio/sge.c 1.186 ++++ b/drivers/net/chelsio/sge.c 1.187 +@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s 1.188 + struct cpl_tx_pkt *cpl; 1.189 + 1.190 + #ifdef NETIF_F_TSO 1.191 +- if (skb_shinfo(skb)->tso_size) { 1.192 ++ if (skb_shinfo(skb)->gso_size) { 1.193 + int eth_type; 1.194 + struct cpl_tx_pkt_lso *hdr; 1.195 + 1.196 +@@ -1434,7 +1434,7 @@ #ifdef NETIF_F_TSO 1.197 + hdr->ip_hdr_words = skb->nh.iph->ihl; 1.198 + hdr->tcp_hdr_words = skb->h.th->doff; 1.199 + hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, 1.200 +- skb_shinfo(skb)->tso_size)); 1.201 ++ skb_shinfo(skb)->gso_size)); 1.202 + hdr->len = htonl(skb->len - sizeof(*hdr)); 1.203 + cpl = (struct cpl_tx_pkt *)hdr; 1.204 + sge->stats.tx_lso_pkts++; 1.205 +diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c 1.206 +index fa29402..681d284 100644 1.207 +--- a/drivers/net/e1000/e1000_main.c 1.208 ++++ b/drivers/net/e1000/e1000_main.c 1.209 +@@ -2526,7 +2526,7 @@ #ifdef NETIF_F_TSO 1.210 + uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1.211 + int err; 1.212 + 1.213 +- if (skb_shinfo(skb)->tso_size) { 1.214 ++ if (skb_shinfo(skb)->gso_size) { 1.215 + if (skb_header_cloned(skb)) { 1.216 + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1.217 + if (err) 1.218 +@@ -2534,7 +2534,7 @@ #ifdef NETIF_F_TSO 1.219 + } 1.220 + 1.221 + hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 1.222 +- mss = skb_shinfo(skb)->tso_size; 1.223 ++ mss = skb_shinfo(skb)->gso_size; 1.224 + if (skb->protocol == ntohs(ETH_P_IP)) { 1.225 + skb->nh.iph->tot_len = 0; 1.226 + skb->nh.iph->check = 0; 1.227 +@@ -2651,7 +2651,7 @@ #ifdef NETIF_F_TSO 1.228 + * tso gets written back prematurely before the data is fully 1.229 + * DMAd to the controller */ 1.230 + if (!skb->data_len && tx_ring->last_tx_tso && 1.231 +- !skb_shinfo(skb)->tso_size) { 1.232 ++ !skb_shinfo(skb)->gso_size) { 1.233 + tx_ring->last_tx_tso = 0; 1.234 + size -= 4; 1.235 + } 1.236 +@@ -2893,7 +2893,7 @@ #endif 1.237 + } 1.238 + 1.239 + #ifdef NETIF_F_TSO 1.240 +- mss = skb_shinfo(skb)->tso_size; 1.241 ++ mss = skb_shinfo(skb)->gso_size; 1.242 + /* The controller does a simple calculation to 1.243 + * make sure there is enough room in the FIFO before 1.244 + * initiating the DMA for each buffer. The calc is: 1.245 +@@ -2935,7 +2935,7 @@ #endif 1.246 + #ifdef NETIF_F_TSO 1.247 + /* Controller Erratum workaround */ 1.248 + if (!skb->data_len && tx_ring->last_tx_tso && 1.249 +- !skb_shinfo(skb)->tso_size) 1.250 ++ !skb_shinfo(skb)->gso_size) 1.251 + count++; 1.252 + #endif 1.253 + 1.254 +diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c 1.255 +index 3682ec6..c35f16e 100644 1.256 +--- a/drivers/net/forcedeth.c 1.257 ++++ b/drivers/net/forcedeth.c 1.258 +@@ -482,9 +482,9 @@ #define LPA_1000HALF 0x0400 1.259 + * critical parts: 1.260 + * - rx is (pseudo-) lockless: it relies on the single-threading provided 1.261 + * by the arch code for interrupts. 1.262 +- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission 1.263 ++ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 1.264 + * needs dev->priv->lock :-( 1.265 +- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock. 1.266 ++ * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 1.267 + */ 1.268 + 1.269 + /* in dev: base, irq */ 1.270 +@@ -1016,7 +1016,7 @@ static void drain_ring(struct net_device 1.271 + 1.272 + /* 1.273 + * nv_start_xmit: dev->hard_start_xmit function 1.274 +- * Called with dev->xmit_lock held. 1.275 ++ * Called with netif_tx_lock held. 1.276 + */ 1.277 + static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 1.278 + { 1.279 +@@ -1105,8 +1105,8 @@ static int nv_start_xmit(struct sk_buff 1.280 + np->tx_skbuff[nr] = skb; 1.281 + 1.282 + #ifdef NETIF_F_TSO 1.283 +- if (skb_shinfo(skb)->tso_size) 1.284 +- tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT); 1.285 ++ if (skb_shinfo(skb)->gso_size) 1.286 ++ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1.287 + else 1.288 + #endif 1.289 + tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1.290 +@@ -1203,7 +1203,7 @@ static void nv_tx_done(struct net_device 1.291 + 1.292 + /* 1.293 + * nv_tx_timeout: dev->tx_timeout function 1.294 +- * Called with dev->xmit_lock held. 1.295 ++ * Called with netif_tx_lock held. 1.296 + */ 1.297 + static void nv_tx_timeout(struct net_device *dev) 1.298 + { 1.299 +@@ -1524,7 +1524,7 @@ static int nv_change_mtu(struct net_devi 1.300 + * Changing the MTU is a rare event, it shouldn't matter. 1.301 + */ 1.302 + disable_irq(dev->irq); 1.303 +- spin_lock_bh(&dev->xmit_lock); 1.304 ++ netif_tx_lock_bh(dev); 1.305 + spin_lock(&np->lock); 1.306 + /* stop engines */ 1.307 + nv_stop_rx(dev); 1.308 +@@ -1559,7 +1559,7 @@ static int nv_change_mtu(struct net_devi 1.309 + nv_start_rx(dev); 1.310 + nv_start_tx(dev); 1.311 + spin_unlock(&np->lock); 1.312 +- spin_unlock_bh(&dev->xmit_lock); 1.313 ++ netif_tx_unlock_bh(dev); 1.314 + enable_irq(dev->irq); 1.315 + } 1.316 + return 0; 1.317 +@@ -1594,7 +1594,7 @@ static int nv_set_mac_address(struct net 1.318 + memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 1.319 + 1.320 + if (netif_running(dev)) { 1.321 +- spin_lock_bh(&dev->xmit_lock); 1.322 ++ netif_tx_lock_bh(dev); 1.323 + spin_lock_irq(&np->lock); 1.324 + 1.325 + /* stop rx engine */ 1.326 +@@ -1606,7 +1606,7 @@ static int nv_set_mac_address(struct net 1.327 + /* restart rx engine */ 1.328 + nv_start_rx(dev); 1.329 + spin_unlock_irq(&np->lock); 1.330 +- spin_unlock_bh(&dev->xmit_lock); 1.331 ++ netif_tx_unlock_bh(dev); 1.332 + } else { 1.333 + nv_copy_mac_to_hw(dev); 1.334 + } 1.335 +@@ -1615,7 +1615,7 @@ static int nv_set_mac_address(struct net 1.336 + 1.337 + /* 1.338 + * nv_set_multicast: dev->set_multicast function 1.339 +- * Called with dev->xmit_lock held. 1.340 ++ * Called with netif_tx_lock held. 1.341 + */ 1.342 + static void nv_set_multicast(struct net_device *dev) 1.343 + { 1.344 +diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c 1.345 +index 102c1f0..d12605f 100644 1.346 +--- a/drivers/net/hamradio/6pack.c 1.347 ++++ b/drivers/net/hamradio/6pack.c 1.348 +@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net 1.349 + { 1.350 + struct sockaddr_ax25 *sa = addr; 1.351 + 1.352 +- spin_lock_irq(&dev->xmit_lock); 1.353 ++ netif_tx_lock_bh(dev); 1.354 + memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); 1.355 +- spin_unlock_irq(&dev->xmit_lock); 1.356 ++ netif_tx_unlock_bh(dev); 1.357 + 1.358 + return 0; 1.359 + } 1.360 +@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_stru 1.361 + break; 1.362 + } 1.363 + 1.364 +- spin_lock_irq(&dev->xmit_lock); 1.365 ++ netif_tx_lock_bh(dev); 1.366 + memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN); 1.367 +- spin_unlock_irq(&dev->xmit_lock); 1.368 ++ netif_tx_unlock_bh(dev); 1.369 + 1.370 + err = 0; 1.371 + break; 1.372 +diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c 1.373 +index dc5e9d5..5c66f5a 100644 1.374 +--- a/drivers/net/hamradio/mkiss.c 1.375 ++++ b/drivers/net/hamradio/mkiss.c 1.376 +@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net 1.377 + { 1.378 + struct sockaddr_ax25 *sa = addr; 1.379 + 1.380 +- spin_lock_irq(&dev->xmit_lock); 1.381 ++ netif_tx_lock_bh(dev); 1.382 + memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); 1.383 +- spin_unlock_irq(&dev->xmit_lock); 1.384 ++ netif_tx_unlock_bh(dev); 1.385 + 1.386 + return 0; 1.387 + } 1.388 +@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct 1.389 + break; 1.390 + } 1.391 + 1.392 +- spin_lock_irq(&dev->xmit_lock); 1.393 ++ netif_tx_lock_bh(dev); 1.394 + memcpy(dev->dev_addr, addr, AX25_ADDR_LEN); 1.395 +- spin_unlock_irq(&dev->xmit_lock); 1.396 ++ netif_tx_unlock_bh(dev); 1.397 + 1.398 + err = 0; 1.399 + break; 1.400 +diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c 1.401 +index 31fb2d7..2e222ef 100644 1.402 +--- a/drivers/net/ifb.c 1.403 ++++ b/drivers/net/ifb.c 1.404 +@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev 1.405 + dp->st_task_enter++; 1.406 + if ((skb = skb_peek(&dp->tq)) == NULL) { 1.407 + dp->st_txq_refl_try++; 1.408 +- if (spin_trylock(&_dev->xmit_lock)) { 1.409 ++ if (netif_tx_trylock(_dev)) { 1.410 + dp->st_rxq_enter++; 1.411 + while ((skb = skb_dequeue(&dp->rq)) != NULL) { 1.412 + skb_queue_tail(&dp->tq, skb); 1.413 + dp->st_rx2tx_tran++; 1.414 + } 1.415 +- spin_unlock(&_dev->xmit_lock); 1.416 ++ netif_tx_unlock(_dev); 1.417 + } else { 1.418 + /* reschedule */ 1.419 + dp->st_rxq_notenter++; 1.420 +@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev 1.421 + } 1.422 + } 1.423 + 1.424 +- if (spin_trylock(&_dev->xmit_lock)) { 1.425 ++ if (netif_tx_trylock(_dev)) { 1.426 + dp->st_rxq_check++; 1.427 + if ((skb = skb_peek(&dp->rq)) == NULL) { 1.428 + dp->tasklet_pending = 0; 1.429 +@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev 1.430 + netif_wake_queue(_dev); 1.431 + } else { 1.432 + dp->st_rxq_rsch++; 1.433 +- spin_unlock(&_dev->xmit_lock); 1.434 ++ netif_tx_unlock(_dev); 1.435 + goto resched; 1.436 + } 1.437 +- spin_unlock(&_dev->xmit_lock); 1.438 ++ netif_tx_unlock(_dev); 1.439 + } else { 1.440 + resched: 1.441 + dp->tasklet_pending = 1; 1.442 +diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c 1.443 +index a9f49f0..339d4a7 100644 1.444 +--- a/drivers/net/irda/vlsi_ir.c 1.445 ++++ b/drivers/net/irda/vlsi_ir.c 1.446 +@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct s 1.447 + || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec)) 1.448 + break; 1.449 + udelay(100); 1.450 +- /* must not sleep here - we are called under xmit_lock! */ 1.451 ++ /* must not sleep here - called under netif_tx_lock! */ 1.452 + } 1.453 + } 1.454 + 1.455 +diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c 1.456 +index f9f77e4..bdab369 100644 1.457 +--- a/drivers/net/ixgb/ixgb_main.c 1.458 ++++ b/drivers/net/ixgb/ixgb_main.c 1.459 +@@ -1163,7 +1163,7 @@ #ifdef NETIF_F_TSO 1.460 + uint16_t ipcse, tucse, mss; 1.461 + int err; 1.462 + 1.463 +- if(likely(skb_shinfo(skb)->tso_size)) { 1.464 ++ if(likely(skb_shinfo(skb)->gso_size)) { 1.465 + if (skb_header_cloned(skb)) { 1.466 + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1.467 + if (err) 1.468 +@@ -1171,7 +1171,7 @@ #ifdef NETIF_F_TSO 1.469 + } 1.470 + 1.471 + hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 1.472 +- mss = skb_shinfo(skb)->tso_size; 1.473 ++ mss = skb_shinfo(skb)->gso_size; 1.474 + skb->nh.iph->tot_len = 0; 1.475 + skb->nh.iph->check = 0; 1.476 + skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, 1.477 +diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c 1.478 +index 690a1aa..9bcaa80 100644 1.479 +--- a/drivers/net/loopback.c 1.480 ++++ b/drivers/net/loopback.c 1.481 +@@ -74,7 +74,7 @@ static void emulate_large_send_offload(s 1.482 + struct iphdr *iph = skb->nh.iph; 1.483 + struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4)); 1.484 + unsigned int doffset = (iph->ihl + th->doff) * 4; 1.485 +- unsigned int mtu = skb_shinfo(skb)->tso_size + doffset; 1.486 ++ unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; 1.487 + unsigned int offset = 0; 1.488 + u32 seq = ntohl(th->seq); 1.489 + u16 id = ntohs(iph->id); 1.490 +@@ -139,7 +139,7 @@ #ifndef LOOPBACK_MUST_CHECKSUM 1.491 + #endif 1.492 + 1.493 + #ifdef LOOPBACK_TSO 1.494 +- if (skb_shinfo(skb)->tso_size) { 1.495 ++ if (skb_shinfo(skb)->gso_size) { 1.496 + BUG_ON(skb->protocol != htons(ETH_P_IP)); 1.497 + BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); 1.498 + 1.499 +diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c 1.500 +index c0998ef..0fac9d5 100644 1.501 +--- a/drivers/net/mv643xx_eth.c 1.502 ++++ b/drivers/net/mv643xx_eth.c 1.503 +@@ -1107,7 +1107,7 @@ static int mv643xx_eth_start_xmit(struct 1.504 + 1.505 + #ifdef MV643XX_CHECKSUM_OFFLOAD_TX 1.506 + if (has_tiny_unaligned_frags(skb)) { 1.507 +- if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { 1.508 ++ if (__skb_linearize(skb)) { 1.509 + stats->tx_dropped++; 1.510 + printk(KERN_DEBUG "%s: failed to linearize tiny " 1.511 + "unaligned fragment\n", dev->name); 1.512 +diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c 1.513 +index 9d6d254..c9ed624 100644 1.514 +--- a/drivers/net/natsemi.c 1.515 ++++ b/drivers/net/natsemi.c 1.516 +@@ -323,12 +323,12 @@ performance critical codepaths: 1.517 + The rx process only runs in the interrupt handler. Access from outside 1.518 + the interrupt handler is only permitted after disable_irq(). 1.519 + 1.520 +-The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap 1.521 ++The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap 1.522 + is set, then access is permitted under spin_lock_irq(&np->lock). 1.523 + 1.524 + Thus configuration functions that want to access everything must call 1.525 + disable_irq(dev->irq); 1.526 +- spin_lock_bh(dev->xmit_lock); 1.527 ++ netif_tx_lock_bh(dev); 1.528 + spin_lock_irq(&np->lock); 1.529 + 1.530 + IV. Notes 1.531 +diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c 1.532 +index 8cc0d0b..e53b313 100644 1.533 +--- a/drivers/net/r8169.c 1.534 ++++ b/drivers/net/r8169.c 1.535 +@@ -2171,7 +2171,7 @@ static int rtl8169_xmit_frags(struct rtl 1.536 + static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) 1.537 + { 1.538 + if (dev->features & NETIF_F_TSO) { 1.539 +- u32 mss = skb_shinfo(skb)->tso_size; 1.540 ++ u32 mss = skb_shinfo(skb)->gso_size; 1.541 + 1.542 + if (mss) 1.543 + return LargeSend | ((mss & MSSMask) << MSSShift); 1.544 +diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c 1.545 +index b7f00d6..439f45f 100644 1.546 +--- a/drivers/net/s2io.c 1.547 ++++ b/drivers/net/s2io.c 1.548 +@@ -3522,8 +3522,8 @@ #endif 1.549 + txdp->Control_1 = 0; 1.550 + txdp->Control_2 = 0; 1.551 + #ifdef NETIF_F_TSO 1.552 +- mss = skb_shinfo(skb)->tso_size; 1.553 +- if (mss) { 1.554 ++ mss = skb_shinfo(skb)->gso_size; 1.555 ++ if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) { 1.556 + txdp->Control_1 |= TXD_TCP_LSO_EN; 1.557 + txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 1.558 + } 1.559 +@@ -3543,10 +3543,10 @@ #endif 1.560 + } 1.561 + 1.562 + frg_len = skb->len - skb->data_len; 1.563 +- if (skb_shinfo(skb)->ufo_size) { 1.564 ++ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) { 1.565 + int ufo_size; 1.566 + 1.567 +- ufo_size = skb_shinfo(skb)->ufo_size; 1.568 ++ ufo_size = skb_shinfo(skb)->gso_size; 1.569 + ufo_size &= ~7; 1.570 + txdp->Control_1 |= TXD_UFO_EN; 1.571 + txdp->Control_1 |= TXD_UFO_MSS(ufo_size); 1.572 +@@ -3572,7 +3572,7 @@ #endif 1.573 + txdp->Host_Control = (unsigned long) skb; 1.574 + txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 1.575 + 1.576 +- if (skb_shinfo(skb)->ufo_size) 1.577 ++ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) 1.578 + txdp->Control_1 |= TXD_UFO_EN; 1.579 + 1.580 + frg_cnt = skb_shinfo(skb)->nr_frags; 1.581 +@@ -3587,12 +3587,12 @@ #endif 1.582 + (sp->pdev, frag->page, frag->page_offset, 1.583 + frag->size, PCI_DMA_TODEVICE); 1.584 + txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 1.585 +- if (skb_shinfo(skb)->ufo_size) 1.586 ++ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) 1.587 + txdp->Control_1 |= TXD_UFO_EN; 1.588 + } 1.589 + txdp->Control_1 |= TXD_GATHER_CODE_LAST; 1.590 + 1.591 +- if (skb_shinfo(skb)->ufo_size) 1.592 ++ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) 1.593 + frg_cnt++; /* as Txd0 was used for inband header */ 1.594 + 1.595 + tx_fifo = mac_control->tx_FIFO_start[queue]; 1.596 +@@ -3606,7 +3606,7 @@ #ifdef NETIF_F_TSO 1.597 + if (mss) 1.598 + val64 |= TX_FIFO_SPECIAL_FUNC; 1.599 + #endif 1.600 +- if (skb_shinfo(skb)->ufo_size) 1.601 ++ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) 1.602 + val64 |= TX_FIFO_SPECIAL_FUNC; 1.603 + writeq(val64, &tx_fifo->List_Control); 1.604 + 1.605 +diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c 1.606 +index 0618cd5..2a55eb3 100644 1.607 +--- a/drivers/net/sky2.c 1.608 ++++ b/drivers/net/sky2.c 1.609 +@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s 1.610 + count = sizeof(dma_addr_t) / sizeof(u32); 1.611 + count += skb_shinfo(skb)->nr_frags * count; 1.612 + 1.613 +- if (skb_shinfo(skb)->tso_size) 1.614 ++ if (skb_shinfo(skb)->gso_size) 1.615 + ++count; 1.616 + 1.617 + if (skb->ip_summed == CHECKSUM_HW) 1.618 +@@ -1197,7 +1197,7 @@ static int sky2_xmit_frame(struct sk_buf 1.619 + } 1.620 + 1.621 + /* Check for TCP Segmentation Offload */ 1.622 +- mss = skb_shinfo(skb)->tso_size; 1.623 ++ mss = skb_shinfo(skb)->gso_size; 1.624 + if (mss != 0) { 1.625 + /* just drop the packet if non-linear expansion fails */ 1.626 + if (skb_header_cloned(skb) && 1.627 +diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c 1.628 +index caf4102..fc9164a 100644 1.629 +--- a/drivers/net/tg3.c 1.630 ++++ b/drivers/net/tg3.c 1.631 +@@ -3664,7 +3664,7 @@ static int tg3_start_xmit(struct sk_buff 1.632 + #if TG3_TSO_SUPPORT != 0 1.633 + mss = 0; 1.634 + if (skb->len > (tp->dev->mtu + ETH_HLEN) && 1.635 +- (mss = skb_shinfo(skb)->tso_size) != 0) { 1.636 ++ (mss = skb_shinfo(skb)->gso_size) != 0) { 1.637 + int tcp_opt_len, ip_tcp_len; 1.638 + 1.639 + if (skb_header_cloned(skb) && 1.640 +diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c 1.641 +index 5b1af39..11de5af 100644 1.642 +--- a/drivers/net/tulip/winbond-840.c 1.643 ++++ b/drivers/net/tulip/winbond-840.c 1.644 +@@ -1605,11 +1605,11 @@ #ifdef CONFIG_PM 1.645 + * - get_stats: 1.646 + * spin_lock_irq(np->lock), doesn't touch hw if not present 1.647 + * - hard_start_xmit: 1.648 +- * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock); 1.649 ++ * synchronize_irq + netif_tx_disable; 1.650 + * - tx_timeout: 1.651 +- * netif_device_detach + spin_unlock_wait(&dev->xmit_lock); 1.652 ++ * netif_device_detach + netif_tx_disable; 1.653 + * - set_multicast_list 1.654 +- * netif_device_detach + spin_unlock_wait(&dev->xmit_lock); 1.655 ++ * netif_device_detach + netif_tx_disable; 1.656 + * - interrupt handler 1.657 + * doesn't touch hw if not present, synchronize_irq waits for 1.658 + * running instances of the interrupt handler. 1.659 +@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev 1.660 + netif_device_detach(dev); 1.661 + update_csr6(dev, 0); 1.662 + iowrite32(0, ioaddr + IntrEnable); 1.663 +- netif_stop_queue(dev); 1.664 + spin_unlock_irq(&np->lock); 1.665 + 1.666 +- spin_unlock_wait(&dev->xmit_lock); 1.667 + synchronize_irq(dev->irq); 1.668 ++ netif_tx_disable(dev); 1.669 + 1.670 + np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 1.671 + 1.672 +diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c 1.673 +index 4c76cb7..30c48c9 100644 1.674 +--- a/drivers/net/typhoon.c 1.675 ++++ b/drivers/net/typhoon.c 1.676 +@@ -340,7 +340,7 @@ #define typhoon_synchronize_irq(x) synch 1.677 + #endif 1.678 + 1.679 + #if defined(NETIF_F_TSO) 1.680 +-#define skb_tso_size(x) (skb_shinfo(x)->tso_size) 1.681 ++#define skb_tso_size(x) (skb_shinfo(x)->gso_size) 1.682 + #define TSO_NUM_DESCRIPTORS 2 1.683 + #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 1.684 + #else 1.685 +diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c 1.686 +index ed1f837..2eb6b5f 100644 1.687 +--- a/drivers/net/via-velocity.c 1.688 ++++ b/drivers/net/via-velocity.c 1.689 +@@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff 1.690 + 1.691 + int pktlen = skb->len; 1.692 + 1.693 ++#ifdef VELOCITY_ZERO_COPY_SUPPORT 1.694 ++ if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { 1.695 ++ kfree_skb(skb); 1.696 ++ return 0; 1.697 ++ } 1.698 ++#endif 1.699 ++ 1.700 + spin_lock_irqsave(&vptr->lock, flags); 1.701 + 1.702 + index = vptr->td_curr[qnum]; 1.703 +@@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff 1.704 + */ 1.705 + if (pktlen < ETH_ZLEN) { 1.706 + /* Cannot occur until ZC support */ 1.707 +- if(skb_linearize(skb, GFP_ATOMIC)) 1.708 +- return 0; 1.709 + pktlen = ETH_ZLEN; 1.710 + memcpy(tdinfo->buf, skb->data, skb->len); 1.711 + memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); 1.712 +@@ -1933,7 +1938,6 @@ #ifdef VELOCITY_ZERO_COPY_SUPPORT 1.713 + int nfrags = skb_shinfo(skb)->nr_frags; 1.714 + tdinfo->skb = skb; 1.715 + if (nfrags > 6) { 1.716 +- skb_linearize(skb, GFP_ATOMIC); 1.717 + memcpy(tdinfo->buf, skb->data, skb->len); 1.718 + tdinfo->skb_dma[0] = tdinfo->buf_dma; 1.719 + td_ptr->tdesc0.pktsize = 1.720 +diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c 1.721 +index 6fd0bf7..75237c1 100644 1.722 +--- a/drivers/net/wireless/orinoco.c 1.723 ++++ b/drivers/net/wireless/orinoco.c 1.724 +@@ -1835,7 +1835,9 @@ static int __orinoco_program_rids(struct 1.725 + /* Set promiscuity / multicast*/ 1.726 + priv->promiscuous = 0; 1.727 + priv->mc_count = 0; 1.728 +- __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */ 1.729 ++ 1.730 ++ /* FIXME: what about netif_tx_lock */ 1.731 ++ __orinoco_set_multicast_list(dev); 1.732 + 1.733 + return 0; 1.734 + } 1.735 +diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c 1.736 +index 82cb4af..57cec40 100644 1.737 +--- a/drivers/s390/net/qeth_eddp.c 1.738 ++++ b/drivers/s390/net/qeth_eddp.c 1.739 +@@ -421,7 +421,7 @@ #endif /* CONFIG_QETH_VLAN */ 1.740 + } 1.741 + tcph = eddp->skb->h.th; 1.742 + while (eddp->skb_offset < eddp->skb->len) { 1.743 +- data_len = min((int)skb_shinfo(eddp->skb)->tso_size, 1.744 ++ data_len = min((int)skb_shinfo(eddp->skb)->gso_size, 1.745 + (int)(eddp->skb->len - eddp->skb_offset)); 1.746 + /* prepare qdio hdr */ 1.747 + if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ 1.748 +@@ -516,20 +516,20 @@ qeth_eddp_calc_num_pages(struct qeth_edd 1.749 + 1.750 + QETH_DBF_TEXT(trace, 5, "eddpcanp"); 1.751 + /* can we put multiple skbs in one page? */ 1.752 +- skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len); 1.753 ++ skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len); 1.754 + if (skbs_per_page > 1){ 1.755 +- ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) / 1.756 ++ ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) / 1.757 + skbs_per_page + 1; 1.758 + ctx->elements_per_skb = 1; 1.759 + } else { 1.760 + /* no -> how many elements per skb? */ 1.761 +- ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len + 1.762 ++ ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len + 1.763 + PAGE_SIZE) >> PAGE_SHIFT; 1.764 + ctx->num_pages = ctx->elements_per_skb * 1.765 +- (skb_shinfo(skb)->tso_segs + 1); 1.766 ++ (skb_shinfo(skb)->gso_segs + 1); 1.767 + } 1.768 + ctx->num_elements = ctx->elements_per_skb * 1.769 +- (skb_shinfo(skb)->tso_segs + 1); 1.770 ++ (skb_shinfo(skb)->gso_segs + 1); 1.771 + } 1.772 + 1.773 + static inline struct qeth_eddp_context * 1.774 +diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c 1.775 +index dba7f7f..d9cc997 100644 1.776 +--- a/drivers/s390/net/qeth_main.c 1.777 ++++ b/drivers/s390/net/qeth_main.c 1.778 +@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card, 1.779 + queue = card->qdio.out_qs 1.780 + [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 1.781 + 1.782 +- if (skb_shinfo(skb)->tso_size) 1.783 ++ if (skb_shinfo(skb)->gso_size) 1.784 + large_send = card->options.large_send; 1.785 + 1.786 + /*are we able to do TSO ? If so ,prepare and send it from here */ 1.787 +@@ -4501,7 +4501,7 @@ qeth_send_packet(struct qeth_card *card, 1.788 + card->stats.tx_packets++; 1.789 + card->stats.tx_bytes += skb->len; 1.790 + #ifdef CONFIG_QETH_PERF_STATS 1.791 +- if (skb_shinfo(skb)->tso_size && 1.792 ++ if (skb_shinfo(skb)->gso_size && 1.793 + !(large_send == QETH_LARGE_SEND_NO)) { 1.794 + card->perf_stats.large_send_bytes += skb->len; 1.795 + card->perf_stats.large_send_cnt++; 1.796 +diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h 1.797 +index 1286dde..89cbf34 100644 1.798 +--- a/drivers/s390/net/qeth_tso.h 1.799 ++++ b/drivers/s390/net/qeth_tso.h 1.800 +@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *c 1.801 + hdr->ext.hdr_version = 1; 1.802 + hdr->ext.hdr_len = 28; 1.803 + /*insert non-fix values */ 1.804 +- hdr->ext.mss = skb_shinfo(skb)->tso_size; 1.805 ++ hdr->ext.mss = skb_shinfo(skb)->gso_size; 1.806 + hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); 1.807 + hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - 1.808 + sizeof(struct qeth_hdr_tso)); 1.809 +diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h 1.810 +index 93535f0..9269df7 100644 1.811 +--- a/include/linux/ethtool.h 1.812 ++++ b/include/linux/ethtool.h 1.813 +@@ -408,6 +408,8 @@ #define ETHTOOL_STSO 0x0000001f /* Set 1.814 + #define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */ 1.815 + #define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */ 1.816 + #define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */ 1.817 ++#define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */ 1.818 ++#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ 1.819 + 1.820 + /* compatibility with older code */ 1.821 + #define SPARC_ETH_GSET ETHTOOL_GSET 1.822 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h 1.823 +index 7fda03d..47b0965 100644 1.824 +--- a/include/linux/netdevice.h 1.825 ++++ b/include/linux/netdevice.h 1.826 +@@ -230,7 +230,8 @@ enum netdev_state_t 1.827 + __LINK_STATE_SCHED, 1.828 + __LINK_STATE_NOCARRIER, 1.829 + __LINK_STATE_RX_SCHED, 1.830 +- __LINK_STATE_LINKWATCH_PENDING 1.831 ++ __LINK_STATE_LINKWATCH_PENDING, 1.832 ++ __LINK_STATE_QDISC_RUNNING, 1.833 + }; 1.834 + 1.835 + 1.836 +@@ -306,9 +307,17 @@ #define NETIF_F_HW_VLAN_TX 128 /* Transm 1.837 + #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ 1.838 + #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ 1.839 + #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ 1.840 +-#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ 1.841 ++#define NETIF_F_GSO 2048 /* Enable software GSO. */ 1.842 + #define NETIF_F_LLTX 4096 /* LockLess TX */ 1.843 +-#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/ 1.844 ++ 1.845 ++ /* Segmentation offload features */ 1.846 ++#define NETIF_F_GSO_SHIFT 16 1.847 ++#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) 1.848 ++#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) 1.849 ++#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) 1.850 ++ 1.851 ++#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 1.852 ++#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) 1.853 + 1.854 + struct net_device *next_sched; 1.855 + 1.856 +@@ -394,6 +403,9 @@ #define NETIF_F_UFO 8192 1.857 + struct list_head qdisc_list; 1.858 + unsigned long tx_queue_len; /* Max frames per queue allowed */ 1.859 + 1.860 ++ /* Partially transmitted GSO packet. */ 1.861 ++ struct sk_buff *gso_skb; 1.862 ++ 1.863 + /* ingress path synchronizer */ 1.864 + spinlock_t ingress_lock; 1.865 + struct Qdisc *qdisc_ingress; 1.866 +@@ -402,7 +414,7 @@ #define NETIF_F_UFO 8192 1.867 + * One part is mostly used on xmit path (device) 1.868 + */ 1.869 + /* hard_start_xmit synchronizer */ 1.870 +- spinlock_t xmit_lock ____cacheline_aligned_in_smp; 1.871 ++ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 1.872 + /* cpu id of processor entered to hard_start_xmit or -1, 1.873 + if nobody entered there. 1.874 + */ 1.875 +@@ -527,6 +539,8 @@ struct packet_type { 1.876 + struct net_device *, 1.877 + struct packet_type *, 1.878 + struct net_device *); 1.879 ++ struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1.880 ++ int features); 1.881 + void *af_packet_priv; 1.882 + struct list_head list; 1.883 + }; 1.884 +@@ -693,7 +707,8 @@ extern int dev_change_name(struct net_d 1.885 + extern int dev_set_mtu(struct net_device *, int); 1.886 + extern int dev_set_mac_address(struct net_device *, 1.887 + struct sockaddr *); 1.888 +-extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 1.889 ++extern int dev_hard_start_xmit(struct sk_buff *skb, 1.890 ++ struct net_device *dev); 1.891 + 1.892 + extern void dev_init(void); 1.893 + 1.894 +@@ -900,11 +915,43 @@ static inline void __netif_rx_complete(s 1.895 + clear_bit(__LINK_STATE_RX_SCHED, &dev->state); 1.896 + } 1.897 + 1.898 ++static inline void netif_tx_lock(struct net_device *dev) 1.899 ++{ 1.900 ++ spin_lock(&dev->_xmit_lock); 1.901 ++ dev->xmit_lock_owner = smp_processor_id(); 1.902 ++} 1.903 ++ 1.904 ++static inline void netif_tx_lock_bh(struct net_device *dev) 1.905 ++{ 1.906 ++ spin_lock_bh(&dev->_xmit_lock); 1.907 ++ dev->xmit_lock_owner = smp_processor_id(); 1.908 ++} 1.909 ++ 1.910 ++static inline int netif_tx_trylock(struct net_device *dev) 1.911 ++{ 1.912 ++ int err = spin_trylock(&dev->_xmit_lock); 1.913 ++ if (!err) 1.914 ++ dev->xmit_lock_owner = smp_processor_id(); 1.915 ++ return err; 1.916 ++} 1.917 ++ 1.918 ++static inline void netif_tx_unlock(struct net_device *dev) 1.919 ++{ 1.920 ++ dev->xmit_lock_owner = -1; 1.921 ++ spin_unlock(&dev->_xmit_lock); 1.922 ++} 1.923 ++ 1.924 ++static inline void netif_tx_unlock_bh(struct net_device *dev) 1.925 ++{ 1.926 ++ dev->xmit_lock_owner = -1; 1.927 ++ spin_unlock_bh(&dev->_xmit_lock); 1.928 ++} 1.929 ++ 1.930 + static inline void netif_tx_disable(struct net_device *dev) 1.931 + { 1.932 +- spin_lock_bh(&dev->xmit_lock); 1.933 ++ netif_tx_lock_bh(dev); 1.934 + netif_stop_queue(dev); 1.935 +- spin_unlock_bh(&dev->xmit_lock); 1.936 ++ netif_tx_unlock_bh(dev); 1.937 + } 1.938 + 1.939 + /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 1.940 +@@ -932,6 +979,7 @@ extern int netdev_max_backlog; 1.941 + extern int weight_p; 1.942 + extern int netdev_set_master(struct net_device *dev, struct net_device *master); 1.943 + extern int skb_checksum_help(struct sk_buff *skb, int inward); 1.944 ++extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); 1.945 + #ifdef CONFIG_BUG 1.946 + extern void netdev_rx_csum_fault(struct net_device *dev); 1.947 + #else 1.948 +@@ -951,6 +999,18 @@ #endif 1.949 + 1.950 + extern void linkwatch_run_queue(void); 1.951 + 1.952 ++static inline int skb_gso_ok(struct sk_buff *skb, int features) 1.953 ++{ 1.954 ++ int feature = skb_shinfo(skb)->gso_size ? 1.955 ++ skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0; 1.956 ++ return (features & feature) == feature; 1.957 ++} 1.958 ++ 1.959 ++static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 1.960 ++{ 1.961 ++ return !skb_gso_ok(skb, dev->features); 1.962 ++} 1.963 ++ 1.964 + #endif /* __KERNEL__ */ 1.965 + 1.966 + #endif /* _LINUX_DEV_H */ 1.967 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h 1.968 +index ad7cc22..b19d45d 100644 1.969 +--- a/include/linux/skbuff.h 1.970 ++++ b/include/linux/skbuff.h 1.971 +@@ -134,9 +134,10 @@ struct skb_frag_struct { 1.972 + struct skb_shared_info { 1.973 + atomic_t dataref; 1.974 + unsigned short nr_frags; 1.975 +- unsigned short tso_size; 1.976 +- unsigned short tso_segs; 1.977 +- unsigned short ufo_size; 1.978 ++ unsigned short gso_size; 1.979 ++ /* Warning: this field is not always filled in (UFO)! */ 1.980 ++ unsigned short gso_segs; 1.981 ++ unsigned short gso_type; 1.982 + unsigned int ip6_frag_id; 1.983 + struct sk_buff *frag_list; 1.984 + skb_frag_t frags[MAX_SKB_FRAGS]; 1.985 +@@ -168,6 +169,14 @@ enum { 1.986 + SKB_FCLONE_CLONE, 1.987 + }; 1.988 + 1.989 ++enum { 1.990 ++ SKB_GSO_TCPV4 = 1 << 0, 1.991 ++ SKB_GSO_UDPV4 = 1 << 1, 1.992 ++ 1.993 ++ /* This indicates the skb is from an untrusted source. */ 1.994 ++ SKB_GSO_DODGY = 1 << 2, 1.995 ++}; 1.996 ++ 1.997 + /** 1.998 + * struct sk_buff - socket buffer 1.999 + * @next: Next buffer in list 1.1000 +@@ -1148,18 +1157,34 @@ static inline int skb_can_coalesce(struc 1.1001 + return 0; 1.1002 + } 1.1003 + 1.1004 ++static inline int __skb_linearize(struct sk_buff *skb) 1.1005 ++{ 1.1006 ++ return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1.1007 ++} 1.1008 ++ 1.1009 + /** 1.1010 + * skb_linearize - convert paged skb to linear one 1.1011 + * @skb: buffer to linarize 1.1012 +- * @gfp: allocation mode 1.1013 + * 1.1014 + * If there is no free memory -ENOMEM is returned, otherwise zero 1.1015 + * is returned and the old skb data released. 1.1016 + */ 1.1017 +-extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp); 1.1018 +-static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp) 1.1019 ++static inline int skb_linearize(struct sk_buff *skb) 1.1020 ++{ 1.1021 ++ return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 1.1022 ++} 1.1023 ++ 1.1024 ++/** 1.1025 ++ * skb_linearize_cow - make sure skb is linear and writable 1.1026 ++ * @skb: buffer to process 1.1027 ++ * 1.1028 ++ * If there is no free memory -ENOMEM is returned, otherwise zero 1.1029 ++ * is returned and the old skb data released. 1.1030 ++ */ 1.1031 ++static inline int skb_linearize_cow(struct sk_buff *skb) 1.1032 + { 1.1033 +- return __skb_linearize(skb, gfp); 1.1034 ++ return skb_is_nonlinear(skb) || skb_cloned(skb) ? 1.1035 ++ __skb_linearize(skb) : 0; 1.1036 + } 1.1037 + 1.1038 + /** 1.1039 +@@ -1254,6 +1279,7 @@ extern void skb_split(struct sk_b 1.1040 + struct sk_buff *skb1, const u32 len); 1.1041 + 1.1042 + extern void skb_release_data(struct sk_buff *skb); 1.1043 ++extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1.1044 + 1.1045 + static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1.1046 + int len, void *buffer) 1.1047 +diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h 1.1048 +index b94d1ad..75b5b93 100644 1.1049 +--- a/include/net/pkt_sched.h 1.1050 ++++ b/include/net/pkt_sched.h 1.1051 +@@ -218,12 +218,13 @@ extern struct qdisc_rate_table *qdisc_ge 1.1052 + struct rtattr *tab); 1.1053 + extern void qdisc_put_rtab(struct qdisc_rate_table *tab); 1.1054 + 1.1055 +-extern int qdisc_restart(struct net_device *dev); 1.1056 ++extern void __qdisc_run(struct net_device *dev); 1.1057 + 1.1058 + static inline void qdisc_run(struct net_device *dev) 1.1059 + { 1.1060 +- while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0) 1.1061 +- /* NOTHING */; 1.1062 ++ if (!netif_queue_stopped(dev) && 1.1063 ++ !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 1.1064 ++ __qdisc_run(dev); 1.1065 + } 1.1066 + 1.1067 + extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, 1.1068 +diff --git a/include/net/protocol.h b/include/net/protocol.h 1.1069 +index 6dc5970..0d2dcdb 100644 1.1070 +--- a/include/net/protocol.h 1.1071 ++++ b/include/net/protocol.h 1.1072 +@@ -37,6 +37,8 @@ #define MAX_INET_PROTOS 256 /* Must be 1.1073 + struct net_protocol { 1.1074 + int (*handler)(struct sk_buff *skb); 1.1075 + void (*err_handler)(struct sk_buff *skb, u32 info); 1.1076 ++ struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1.1077 ++ int features); 1.1078 + int no_policy; 1.1079 + }; 1.1080 + 1.1081 +diff --git a/include/net/sock.h b/include/net/sock.h 1.1082 +index f63d0d5..a8e8d21 100644 1.1083 +--- a/include/net/sock.h 1.1084 ++++ b/include/net/sock.h 1.1085 +@@ -1064,9 +1064,13 @@ static inline void sk_setup_caps(struct 1.1086 + { 1.1087 + __sk_dst_set(sk, dst); 1.1088 + sk->sk_route_caps = dst->dev->features; 1.1089 ++ if (sk->sk_route_caps & NETIF_F_GSO) 1.1090 ++ sk->sk_route_caps |= NETIF_F_TSO; 1.1091 + if (sk->sk_route_caps & NETIF_F_TSO) { 1.1092 + if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len) 1.1093 + sk->sk_route_caps &= ~NETIF_F_TSO; 1.1094 ++ else 1.1095 ++ sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1.1096 + } 1.1097 + } 1.1098 + 1.1099 +diff --git a/include/net/tcp.h b/include/net/tcp.h 1.1100 +index 77f21c6..70e1d5f 100644 1.1101 +--- a/include/net/tcp.h 1.1102 ++++ b/include/net/tcp.h 1.1103 +@@ -552,13 +552,13 @@ #include <net/tcp_ecn.h> 1.1104 + */ 1.1105 + static inline int tcp_skb_pcount(const struct sk_buff *skb) 1.1106 + { 1.1107 +- return skb_shinfo(skb)->tso_segs; 1.1108 ++ return skb_shinfo(skb)->gso_segs; 1.1109 + } 1.1110 + 1.1111 + /* This is valid iff tcp_skb_pcount() > 1. */ 1.1112 + static inline int tcp_skb_mss(const struct sk_buff *skb) 1.1113 + { 1.1114 +- return skb_shinfo(skb)->tso_size; 1.1115 ++ return skb_shinfo(skb)->gso_size; 1.1116 + } 1.1117 + 1.1118 + static inline void tcp_dec_pcount_approx(__u32 *count, 1.1119 +@@ -1063,6 +1063,8 @@ extern struct request_sock_ops tcp_reque 1.1120 + 1.1121 + extern int tcp_v4_destroy_sock(struct sock *sk); 1.1122 + 1.1123 ++extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); 1.1124 ++ 1.1125 + #ifdef CONFIG_PROC_FS 1.1126 + extern int tcp4_proc_init(void); 1.1127 + extern void tcp4_proc_exit(void); 1.1128 +diff --git a/net/atm/clip.c b/net/atm/clip.c 1.1129 +index 1842a4e..6dc21a7 100644 1.1130 +--- a/net/atm/clip.c 1.1131 ++++ b/net/atm/clip.c 1.1132 +@@ -101,7 +101,7 @@ static void unlink_clip_vcc(struct clip_ 1.1133 + printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc); 1.1134 + return; 1.1135 + } 1.1136 +- spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */ 1.1137 ++ netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ 1.1138 + entry->neigh->used = jiffies; 1.1139 + for (walk = &entry->vccs; *walk; walk = &(*walk)->next) 1.1140 + if (*walk == clip_vcc) { 1.1141 +@@ -125,7 +125,7 @@ static void unlink_clip_vcc(struct clip_ 1.1142 + printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " 1.1143 + "0x%p)\n",entry,clip_vcc); 1.1144 + out: 1.1145 +- spin_unlock_bh(&entry->neigh->dev->xmit_lock); 1.1146 ++ netif_tx_unlock_bh(entry->neigh->dev); 1.1147 + } 1.1148 + 1.1149 + /* The neighbour entry n->lock is held. */ 1.1150 +diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c 1.1151 +index 0b33a7b..180e79b 100644 1.1152 +--- a/net/bridge/br_device.c 1.1153 ++++ b/net/bridge/br_device.c 1.1154 +@@ -146,9 +146,9 @@ static int br_set_tx_csum(struct net_dev 1.1155 + struct net_bridge *br = netdev_priv(dev); 1.1156 + 1.1157 + if (data) 1.1158 +- br->feature_mask |= NETIF_F_IP_CSUM; 1.1159 ++ br->feature_mask |= NETIF_F_NO_CSUM; 1.1160 + else 1.1161 +- br->feature_mask &= ~NETIF_F_IP_CSUM; 1.1162 ++ br->feature_mask &= ~NETIF_F_ALL_CSUM; 1.1163 + 1.1164 + br_features_recompute(br); 1.1165 + return 0; 1.1166 +@@ -185,6 +185,6 @@ void br_dev_setup(struct net_device *dev 1.1167 + dev->set_mac_address = br_set_mac_address; 1.1168 + dev->priv_flags = IFF_EBRIDGE; 1.1169 + 1.1170 +- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 1.1171 +- | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM; 1.1172 ++ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 1.1173 ++ NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST; 1.1174 + } 1.1175 +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c 1.1176 +index 2d24fb4..00b1128 100644 1.1177 +--- a/net/bridge/br_forward.c 1.1178 ++++ b/net/bridge/br_forward.c 1.1179 +@@ -32,7 +32,7 @@ static inline int should_deliver(const s 1.1180 + int br_dev_queue_push_xmit(struct sk_buff *skb) 1.1181 + { 1.1182 + /* drop mtu oversized packets except tso */ 1.1183 +- if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size) 1.1184 ++ if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size) 1.1185 + kfree_skb(skb); 1.1186 + else { 1.1187 + #ifdef CONFIG_BRIDGE_NETFILTER 1.1188 +diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c 1.1189 +index f36b35e..0617146 100644 1.1190 +--- a/net/bridge/br_if.c 1.1191 ++++ b/net/bridge/br_if.c 1.1192 +@@ -385,17 +385,28 @@ void br_features_recompute(struct net_br 1.1193 + struct net_bridge_port *p; 1.1194 + unsigned long features, checksum; 1.1195 + 1.1196 +- features = br->feature_mask &~ NETIF_F_IP_CSUM; 1.1197 +- checksum = br->feature_mask & NETIF_F_IP_CSUM; 1.1198 ++ checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0; 1.1199 ++ features = br->feature_mask & ~NETIF_F_ALL_CSUM; 1.1200 + 1.1201 + list_for_each_entry(p, &br->port_list, list) { 1.1202 +- if (!(p->dev->features 1.1203 +- & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM))) 1.1204 ++ unsigned long feature = p->dev->features; 1.1205 ++ 1.1206 ++ if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM)) 1.1207 ++ checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM; 1.1208 ++ if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM)) 1.1209 ++ checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM; 1.1210 ++ if (!(feature & NETIF_F_IP_CSUM)) 1.1211 + checksum = 0; 1.1212 +- features &= p->dev->features; 1.1213 ++ 1.1214 ++ if (feature & NETIF_F_GSO) 1.1215 ++ feature |= NETIF_F_TSO; 1.1216 ++ feature |= NETIF_F_GSO; 1.1217 ++ 1.1218 ++ features &= feature; 1.1219 + } 1.1220 + 1.1221 +- br->dev->features = features | checksum | NETIF_F_LLTX; 1.1222 ++ br->dev->features = features | checksum | NETIF_F_LLTX | 1.1223 ++ NETIF_F_GSO_ROBUST; 1.1224 + } 1.1225 + 1.1226 + /* called with RTNL */ 1.1227 +diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c 1.1228 +index 9e27373..588207f 100644 1.1229 +--- a/net/bridge/br_netfilter.c 1.1230 ++++ b/net/bridge/br_netfilter.c 1.1231 +@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s 1.1232 + { 1.1233 + if (skb->protocol == htons(ETH_P_IP) && 1.1234 + skb->len > skb->dev->mtu && 1.1235 +- !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) 1.1236 ++ !skb_shinfo(skb)->gso_size) 1.1237 + return ip_fragment(skb, br_dev_queue_push_xmit); 1.1238 + else 1.1239 + return br_dev_queue_push_xmit(skb); 1.1240 +diff --git a/net/core/dev.c b/net/core/dev.c 1.1241 +index 12a214c..32e1056 100644 1.1242 +--- a/net/core/dev.c 1.1243 ++++ b/net/core/dev.c 1.1244 +@@ -115,6 +115,7 @@ #include <linux/wireless.h> /* Note : w 1.1245 + #include <net/iw_handler.h> 1.1246 + #endif /* CONFIG_NET_RADIO */ 1.1247 + #include <asm/current.h> 1.1248 ++#include <linux/err.h> 1.1249 + 1.1250 + /* 1.1251 + * The list of packet types we will receive (as opposed to discard) 1.1252 +@@ -1032,7 +1033,7 @@ static inline void net_timestamp(struct 1.1253 + * taps currently in use. 1.1254 + */ 1.1255 + 1.1256 +-void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1.1257 ++static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1.1258 + { 1.1259 + struct packet_type *ptype; 1.1260 + 1.1261 +@@ -1106,6 +1107,45 @@ out: 1.1262 + return ret; 1.1263 + } 1.1264 + 1.1265 ++/** 1.1266 ++ * skb_gso_segment - Perform segmentation on skb. 1.1267 ++ * @skb: buffer to segment 1.1268 ++ * @features: features for the output path (see dev->features) 1.1269 ++ * 1.1270 ++ * This function segments the given skb and returns a list of segments. 1.1271 ++ * 1.1272 ++ * It may return NULL if the skb requires no segmentation. This is 1.1273 ++ * only possible when GSO is used for verifying header integrity. 1.1274 ++ */ 1.1275 ++struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) 1.1276 ++{ 1.1277 ++ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1.1278 ++ struct packet_type *ptype; 1.1279 ++ int type = skb->protocol; 1.1280 ++ 1.1281 ++ BUG_ON(skb_shinfo(skb)->frag_list); 1.1282 ++ BUG_ON(skb->ip_summed != CHECKSUM_HW); 1.1283 ++ 1.1284 ++ skb->mac.raw = skb->data; 1.1285 ++ skb->mac_len = skb->nh.raw - skb->data; 1.1286 ++ __skb_pull(skb, skb->mac_len); 1.1287 ++ 1.1288 ++ rcu_read_lock(); 1.1289 ++ list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { 1.1290 ++ if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1.1291 ++ segs = ptype->gso_segment(skb, features); 1.1292 ++ break; 1.1293 ++ } 1.1294 ++ } 1.1295 ++ rcu_read_unlock(); 1.1296 ++ 1.1297 ++ __skb_push(skb, skb->data - skb->mac.raw); 1.1298 ++ 1.1299 ++ return segs; 1.1300 ++} 1.1301 ++ 1.1302 ++EXPORT_SYMBOL(skb_gso_segment); 1.1303 ++ 1.1304 + /* Take action when hardware reception checksum errors are detected. */ 1.1305 + #ifdef CONFIG_BUG 1.1306 + void netdev_rx_csum_fault(struct net_device *dev) 1.1307 +@@ -1142,75 +1182,108 @@ #else 1.1308 + #define illegal_highdma(dev, skb) (0) 1.1309 + #endif 1.1310 + 1.1311 +-/* Keep head the same: replace data */ 1.1312 +-int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask) 1.1313 +-{ 1.1314 +- unsigned int size; 1.1315 +- u8 *data; 1.1316 +- long offset; 1.1317 +- struct skb_shared_info *ninfo; 1.1318 +- int headerlen = skb->data - skb->head; 1.1319 +- int expand = (skb->tail + skb->data_len) - skb->end; 1.1320 +- 1.1321 +- if (skb_shared(skb)) 1.1322 +- BUG(); 1.1323 +- 1.1324 +- if (expand <= 0) 1.1325 +- expand = 0; 1.1326 +- 1.1327 +- size = skb->end - skb->head + expand; 1.1328 +- size = SKB_DATA_ALIGN(size); 1.1329 +- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 1.1330 +- if (!data) 1.1331 +- return -ENOMEM; 1.1332 +- 1.1333 +- /* Copy entire thing */ 1.1334 +- if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len)) 1.1335 +- BUG(); 1.1336 +- 1.1337 +- /* Set up shinfo */ 1.1338 +- ninfo = (struct skb_shared_info*)(data + size); 1.1339 +- atomic_set(&ninfo->dataref, 1); 1.1340 +- ninfo->tso_size = skb_shinfo(skb)->tso_size; 1.1341 +- ninfo->tso_segs = skb_shinfo(skb)->tso_segs; 1.1342 +- ninfo->nr_frags = 0; 1.1343 +- ninfo->frag_list = NULL; 1.1344 +- 1.1345 +- /* Offset between the two in bytes */ 1.1346 +- offset = data - skb->head; 1.1347 +- 1.1348 +- /* Free old data. */ 1.1349 +- skb_release_data(skb); 1.1350 +- 1.1351 +- skb->head = data; 1.1352 +- skb->end = data + size; 1.1353 +- 1.1354 +- /* Set up new pointers */ 1.1355 +- skb->h.raw += offset; 1.1356 +- skb->nh.raw += offset; 1.1357 +- skb->mac.raw += offset; 1.1358 +- skb->tail += offset; 1.1359 +- skb->data += offset; 1.1360 +- 1.1361 +- /* We are no longer a clone, even if we were. */ 1.1362 +- skb->cloned = 0; 1.1363 +- 1.1364 +- skb->tail += skb->data_len; 1.1365 +- skb->data_len = 0; 1.1366 ++struct dev_gso_cb { 1.1367 ++ void (*destructor)(struct sk_buff *skb); 1.1368 ++}; 1.1369 ++ 1.1370 ++#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) 1.1371 ++ 1.1372 ++static void dev_gso_skb_destructor(struct sk_buff *skb) 1.1373 ++{ 1.1374 ++ struct dev_gso_cb *cb; 1.1375 ++ 1.1376 ++ do { 1.1377 ++ struct sk_buff *nskb = skb->next; 1.1378 ++ 1.1379 ++ skb->next = nskb->next; 1.1380 ++ nskb->next = NULL; 1.1381 ++ kfree_skb(nskb); 1.1382 ++ } while (skb->next); 1.1383 ++ 1.1384 ++ cb = DEV_GSO_CB(skb); 1.1385 ++ if (cb->destructor) 1.1386 ++ cb->destructor(skb); 1.1387 ++} 1.1388 ++ 1.1389 ++/** 1.1390 ++ * dev_gso_segment - Perform emulated hardware segmentation on skb. 1.1391 ++ * @skb: buffer to segment 1.1392 ++ * 1.1393 ++ * This function segments the given skb and stores the list of segments 1.1394 ++ * in skb->next. 1.1395 ++ */ 1.1396 ++static int dev_gso_segment(struct sk_buff *skb) 1.1397 ++{ 1.1398 ++ struct net_device *dev = skb->dev; 1.1399 ++ struct sk_buff *segs; 1.1400 ++ int features = dev->features & ~(illegal_highdma(dev, skb) ? 1.1401 ++ NETIF_F_SG : 0); 1.1402 ++ 1.1403 ++ segs = skb_gso_segment(skb, features); 1.1404 ++ 1.1405 ++ /* Verifying header integrity only. */ 1.1406 ++ if (!segs) 1.1407 ++ return 0; 1.1408 ++ 1.1409 ++ if (unlikely(IS_ERR(segs))) 1.1410 ++ return PTR_ERR(segs); 1.1411 ++ 1.1412 ++ skb->next = segs; 1.1413 ++ DEV_GSO_CB(skb)->destructor = skb->destructor; 1.1414 ++ skb->destructor = dev_gso_skb_destructor; 1.1415 ++ 1.1416 ++ return 0; 1.1417 ++} 1.1418 ++ 1.1419 ++int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 1.1420 ++{ 1.1421 ++ if (likely(!skb->next)) { 1.1422 ++ if (netdev_nit) 1.1423 ++ dev_queue_xmit_nit(skb, dev); 1.1424 ++ 1.1425 ++ if (netif_needs_gso(dev, skb)) { 1.1426 ++ if (unlikely(dev_gso_segment(skb))) 1.1427 ++ goto out_kfree_skb; 1.1428 ++ if (skb->next) 1.1429 ++ goto gso; 1.1430 ++ } 1.1431 ++ 1.1432 ++ return dev->hard_start_xmit(skb, dev); 1.1433 ++ } 1.1434 ++ 1.1435 ++gso: 1.1436 ++ do { 1.1437 ++ struct sk_buff *nskb = skb->next; 1.1438 ++ int rc; 1.1439 ++ 1.1440 ++ skb->next = nskb->next; 1.1441 ++ nskb->next = NULL; 1.1442 ++ rc = dev->hard_start_xmit(nskb, dev); 1.1443 ++ if (unlikely(rc)) { 1.1444 ++ nskb->next = skb->next; 1.1445 ++ skb->next = nskb; 1.1446 ++ return rc; 1.1447 ++ } 1.1448 ++ if (unlikely(netif_queue_stopped(dev) && skb->next)) 1.1449 ++ return NETDEV_TX_BUSY; 1.1450 ++ } while (skb->next); 1.1451 ++ 1.1452 ++ skb->destructor = DEV_GSO_CB(skb)->destructor; 1.1453 ++ 1.1454 ++out_kfree_skb: 1.1455 ++ kfree_skb(skb); 1.1456 + return 0; 1.1457 + } 1.1458 + 1.1459 + #define HARD_TX_LOCK(dev, cpu) { \ 1.1460 + if ((dev->features & NETIF_F_LLTX) == 0) { \ 1.1461 +- spin_lock(&dev->xmit_lock); \ 1.1462 +- dev->xmit_lock_owner = cpu; \ 1.1463 ++ netif_tx_lock(dev); \ 1.1464 + } \ 1.1465 + } 1.1466 + 1.1467 + #define HARD_TX_UNLOCK(dev) { \ 1.1468 + if ((dev->features & NETIF_F_LLTX) == 0) { \ 1.1469 +- dev->xmit_lock_owner = -1; \ 1.1470 +- spin_unlock(&dev->xmit_lock); \ 1.1471 ++ netif_tx_unlock(dev); \ 1.1472 + } \ 1.1473 + } 1.1474 + 1.1475 +@@ -1246,9 +1319,13 @@ int dev_queue_xmit(struct sk_buff *skb) 1.1476 + struct Qdisc *q; 1.1477 + int rc = -ENOMEM; 1.1478 + 1.1479 ++ /* GSO will handle the following emulations directly. */ 1.1480 ++ if (netif_needs_gso(dev, skb)) 1.1481 ++ goto gso; 1.1482 ++ 1.1483 + if (skb_shinfo(skb)->frag_list && 1.1484 + !(dev->features & NETIF_F_FRAGLIST) && 1.1485 +- __skb_linearize(skb, GFP_ATOMIC)) 1.1486 ++ __skb_linearize(skb)) 1.1487 + goto out_kfree_skb; 1.1488 + 1.1489 + /* Fragmented skb is linearized if device does not support SG, 1.1490 +@@ -1257,25 +1334,26 @@ int dev_queue_xmit(struct sk_buff *skb) 1.1491 + */ 1.1492 + if (skb_shinfo(skb)->nr_frags && 1.1493 + (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && 1.1494 +- __skb_linearize(skb, GFP_ATOMIC)) 1.1495 ++ __skb_linearize(skb)) 1.1496 + goto out_kfree_skb; 1.1497 + 1.1498 + /* If packet is not checksummed and device does not support 1.1499 + * checksumming for this protocol, complete checksumming here. 1.1500 + */ 1.1501 + if (skb->ip_summed == CHECKSUM_HW && 1.1502 +- (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) && 1.1503 ++ (!(dev->features & NETIF_F_GEN_CSUM) && 1.1504 + (!(dev->features & NETIF_F_IP_CSUM) || 1.1505 + skb->protocol != htons(ETH_P_IP)))) 1.1506 + if (skb_checksum_help(skb, 0)) 1.1507 + goto out_kfree_skb; 1.1508 + 1.1509 ++gso: 1.1510 + spin_lock_prefetch(&dev->queue_lock); 1.1511 + 1.1512 + /* Disable soft irqs for various locks below. Also 1.1513 + * stops preemption for RCU. 1.1514 + */ 1.1515 +- local_bh_disable(); 1.1516 ++ rcu_read_lock_bh(); 1.1517 + 1.1518 + /* Updates of qdisc are serialized by queue_lock. 1.1519 + * The struct Qdisc which is pointed to by qdisc is now a 1.1520 +@@ -1309,8 +1387,8 @@ #endif 1.1521 + /* The device has no queue. Common case for software devices: 1.1522 + loopback, all the sorts of tunnels... 1.1523 + 1.1524 +- Really, it is unlikely that xmit_lock protection is necessary here. 1.1525 +- (f.e. loopback and IP tunnels are clean ignoring statistics 1.1526 ++ Really, it is unlikely that netif_tx_lock protection is necessary 1.1527 ++ here. (f.e. loopback and IP tunnels are clean ignoring statistics 1.1528 + counters.) 1.1529 + However, it is possible, that they rely on protection 1.1530 + made by us here. 1.1531 +@@ -1326,11 +1404,8 @@ #endif 1.1532 + HARD_TX_LOCK(dev, cpu); 1.1533 + 1.1534 + if (!netif_queue_stopped(dev)) { 1.1535 +- if (netdev_nit) 1.1536 +- dev_queue_xmit_nit(skb, dev); 1.1537 +- 1.1538 + rc = 0; 1.1539 +- if (!dev->hard_start_xmit(skb, dev)) { 1.1540 ++ if (!dev_hard_start_xmit(skb, dev)) { 1.1541 + HARD_TX_UNLOCK(dev); 1.1542 + goto out; 1.1543 + } 1.1544 +@@ -1349,13 +1424,13 @@ #endif 1.1545 + } 1.1546 + 1.1547 + rc = -ENETDOWN; 1.1548 +- local_bh_enable(); 1.1549 ++ rcu_read_unlock_bh(); 1.1550 + 1.1551 + out_kfree_skb: 1.1552 + kfree_skb(skb); 1.1553 + return rc; 1.1554 + out: 1.1555 +- local_bh_enable(); 1.1556 ++ rcu_read_unlock_bh(); 1.1557 + return rc; 1.1558 + } 1.1559 + 1.1560 +@@ -2670,7 +2745,7 @@ int register_netdevice(struct net_device 1.1561 + BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 1.1562 + 1.1563 + spin_lock_init(&dev->queue_lock); 1.1564 +- spin_lock_init(&dev->xmit_lock); 1.1565 ++ spin_lock_init(&dev->_xmit_lock); 1.1566 + dev->xmit_lock_owner = -1; 1.1567 + #ifdef CONFIG_NET_CLS_ACT 1.1568 + spin_lock_init(&dev->ingress_lock); 1.1569 +@@ -2714,9 +2789,7 @@ #endif 1.1570 + 1.1571 + /* Fix illegal SG+CSUM combinations. */ 1.1572 + if ((dev->features & NETIF_F_SG) && 1.1573 +- !(dev->features & (NETIF_F_IP_CSUM | 1.1574 +- NETIF_F_NO_CSUM | 1.1575 +- NETIF_F_HW_CSUM))) { 1.1576 ++ !(dev->features & NETIF_F_ALL_CSUM)) { 1.1577 + printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", 1.1578 + dev->name); 1.1579 + dev->features &= ~NETIF_F_SG; 1.1580 +@@ -3268,7 +3341,6 @@ subsys_initcall(net_dev_init); 1.1581 + EXPORT_SYMBOL(__dev_get_by_index); 1.1582 + EXPORT_SYMBOL(__dev_get_by_name); 1.1583 + EXPORT_SYMBOL(__dev_remove_pack); 1.1584 +-EXPORT_SYMBOL(__skb_linearize); 1.1585 + EXPORT_SYMBOL(dev_valid_name); 1.1586 + EXPORT_SYMBOL(dev_add_pack); 1.1587 + EXPORT_SYMBOL(dev_alloc_name); 1.1588 +diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c 1.1589 +index 05d6085..c57d887 100644 1.1590 +--- a/net/core/dev_mcast.c 1.1591 ++++ b/net/core/dev_mcast.c 1.1592 +@@ -62,7 +62,7 @@ #include <net/arp.h> 1.1593 + * Device mc lists are changed by bh at least if IPv6 is enabled, 1.1594 + * so that it must be bh protected. 1.1595 + * 1.1596 +- * We block accesses to device mc filters with dev->xmit_lock. 1.1597 ++ * We block accesses to device mc filters with netif_tx_lock. 1.1598 + */ 1.1599 + 1.1600 + /* 1.1601 +@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_d 1.1602 + 1.1603 + void dev_mc_upload(struct net_device *dev) 1.1604 + { 1.1605 +- spin_lock_bh(&dev->xmit_lock); 1.1606 ++ netif_tx_lock_bh(dev); 1.1607 + __dev_mc_upload(dev); 1.1608 +- spin_unlock_bh(&dev->xmit_lock); 1.1609 ++ netif_tx_unlock_bh(dev); 1.1610 + } 1.1611 + 1.1612 + /* 1.1613 +@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev 1.1614 + int err = 0; 1.1615 + struct dev_mc_list *dmi, **dmip; 1.1616 + 1.1617 +- spin_lock_bh(&dev->xmit_lock); 1.1618 ++ netif_tx_lock_bh(dev); 1.1619 + 1.1620 + for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { 1.1621 + /* 1.1622 +@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev 1.1623 + */ 1.1624 + __dev_mc_upload(dev); 1.1625 + 1.1626 +- spin_unlock_bh(&dev->xmit_lock); 1.1627 ++ netif_tx_unlock_bh(dev); 1.1628 + return 0; 1.1629 + } 1.1630 + } 1.1631 + err = -ENOENT; 1.1632 + done: 1.1633 +- spin_unlock_bh(&dev->xmit_lock); 1.1634 ++ netif_tx_unlock_bh(dev); 1.1635 + return err; 1.1636 + } 1.1637 + 1.1638 +@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, v 1.1639 + 1.1640 + dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC); 1.1641 + 1.1642 +- spin_lock_bh(&dev->xmit_lock); 1.1643 ++ netif_tx_lock_bh(dev); 1.1644 + for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { 1.1645 + if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && 1.1646 + dmi->dmi_addrlen == alen) { 1.1647 +@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, v 1.1648 + } 1.1649 + 1.1650 + if ((dmi = dmi1) == NULL) { 1.1651 +- spin_unlock_bh(&dev->xmit_lock); 1.1652 ++ netif_tx_unlock_bh(dev); 1.1653 + return -ENOMEM; 1.1654 + } 1.1655 + memcpy(dmi->dmi_addr, addr, alen); 1.1656 +@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, v 1.1657 + 1.1658 + __dev_mc_upload(dev); 1.1659 + 1.1660 +- spin_unlock_bh(&dev->xmit_lock); 1.1661 ++ netif_tx_unlock_bh(dev); 1.1662 + return 0; 1.1663 + 1.1664 + done: 1.1665 +- spin_unlock_bh(&dev->xmit_lock); 1.1666 ++ netif_tx_unlock_bh(dev); 1.1667 + kfree(dmi1); 1.1668 + return err; 1.1669 + } 1.1670 +@@ -204,7 +204,7 @@ done: 1.1671 + 1.1672 + void dev_mc_discard(struct net_device *dev) 1.1673 + { 1.1674 +- spin_lock_bh(&dev->xmit_lock); 1.1675 ++ netif_tx_lock_bh(dev); 1.1676 + 1.1677 + while (dev->mc_list != NULL) { 1.1678 + struct dev_mc_list *tmp = dev->mc_list; 1.1679 +@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *d 1.1680 + } 1.1681 + dev->mc_count = 0; 1.1682 + 1.1683 +- spin_unlock_bh(&dev->xmit_lock); 1.1684 ++ netif_tx_unlock_bh(dev); 1.1685 + } 1.1686 + 1.1687 + #ifdef CONFIG_PROC_FS 1.1688 +@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_fi 1.1689 + struct dev_mc_list *m; 1.1690 + struct net_device *dev = v; 1.1691 + 1.1692 +- spin_lock_bh(&dev->xmit_lock); 1.1693 ++ netif_tx_lock_bh(dev); 1.1694 + for (m = dev->mc_list; m; m = m->next) { 1.1695 + int i; 1.1696 + 1.1697 +@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_fi 1.1698 + 1.1699 + seq_putc(seq, '\n'); 1.1700 + } 1.1701 +- spin_unlock_bh(&dev->xmit_lock); 1.1702 ++ netif_tx_unlock_bh(dev); 1.1703 + return 0; 1.1704 + } 1.1705 + 1.1706 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c 1.1707 +index e6f7610..27ce168 100644 1.1708 +--- a/net/core/ethtool.c 1.1709 ++++ b/net/core/ethtool.c 1.1710 +@@ -30,7 +30,7 @@ u32 ethtool_op_get_link(struct net_devic 1.1711 + 1.1712 + u32 ethtool_op_get_tx_csum(struct net_device *dev) 1.1713 + { 1.1714 +- return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0; 1.1715 ++ return (dev->features & NETIF_F_ALL_CSUM) != 0; 1.1716 + } 1.1717 + 1.1718 + int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 1.1719 +@@ -551,9 +551,7 @@ static int ethtool_set_sg(struct net_dev 1.1720 + return -EFAULT; 1.1721 + 1.1722 + if (edata.data && 1.1723 +- !(dev->features & (NETIF_F_IP_CSUM | 1.1724 +- NETIF_F_NO_CSUM | 1.1725 +- NETIF_F_HW_CSUM))) 1.1726 ++ !(dev->features & NETIF_F_ALL_CSUM)) 1.1727 + return -EINVAL; 1.1728 + 1.1729 + return __ethtool_set_sg(dev, edata.data); 1.1730 +@@ -591,7 +589,7 @@ static int ethtool_set_tso(struct net_de 1.1731 + 1.1732 + static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr) 1.1733 + { 1.1734 +- struct ethtool_value edata = { ETHTOOL_GTSO }; 1.1735 ++ struct ethtool_value edata = { ETHTOOL_GUFO }; 1.1736 + 1.1737 + if (!dev->ethtool_ops->get_ufo) 1.1738 + return -EOPNOTSUPP; 1.1739 +@@ -600,6 +598,7 @@ static int ethtool_get_ufo(struct net_de 1.1740 + return -EFAULT; 1.1741 + return 0; 1.1742 + } 1.1743 ++ 1.1744 + static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr) 1.1745 + { 1.1746 + struct ethtool_value edata; 1.1747 +@@ -615,6 +614,29 @@ static int ethtool_set_ufo(struct net_de 1.1748 + return dev->ethtool_ops->set_ufo(dev, edata.data); 1.1749 + } 1.1750 + 1.1751 ++static int ethtool_get_gso(struct net_device *dev, char __user *useraddr) 1.1752 ++{ 1.1753 ++ struct ethtool_value edata = { ETHTOOL_GGSO }; 1.1754 ++ 1.1755 ++ edata.data = dev->features & NETIF_F_GSO; 1.1756 ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) 1.1757 ++ return -EFAULT; 1.1758 ++ return 0; 1.1759 ++} 1.1760 ++ 1.1761 ++static int ethtool_set_gso(struct net_device *dev, char __user *useraddr) 1.1762 ++{ 1.1763 ++ struct ethtool_value edata; 1.1764 ++ 1.1765 ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) 1.1766 ++ return -EFAULT; 1.1767 ++ if (edata.data) 1.1768 ++ dev->features |= NETIF_F_GSO; 1.1769 ++ else 1.1770 ++ dev->features &= ~NETIF_F_GSO; 1.1771 ++ return 0; 1.1772 ++} 1.1773 ++ 1.1774 + static int ethtool_self_test(struct net_device *dev, char __user *useraddr) 1.1775 + { 1.1776 + struct ethtool_test test; 1.1777 +@@ -906,6 +928,12 @@ int dev_ethtool(struct ifreq *ifr) 1.1778 + case ETHTOOL_SUFO: 1.1779 + rc = ethtool_set_ufo(dev, useraddr); 1.1780 + break; 1.1781 ++ case ETHTOOL_GGSO: 1.1782 ++ rc = ethtool_get_gso(dev, useraddr); 1.1783 ++ break; 1.1784 ++ case ETHTOOL_SGSO: 1.1785 ++ rc = ethtool_set_gso(dev, useraddr); 1.1786 ++ break; 1.1787 + default: 1.1788 + rc = -EOPNOTSUPP; 1.1789 + } 1.1790 +diff --git a/net/core/netpoll.c b/net/core/netpoll.c 1.1791 +index ea51f8d..ec28d3b 100644 1.1792 +--- a/net/core/netpoll.c 1.1793 ++++ b/net/core/netpoll.c 1.1794 +@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netp 1.1795 + 1.1796 + do { 1.1797 + npinfo->tries--; 1.1798 +- spin_lock(&np->dev->xmit_lock); 1.1799 +- np->dev->xmit_lock_owner = smp_processor_id(); 1.1800 ++ netif_tx_lock(np->dev); 1.1801 + 1.1802 + /* 1.1803 + * network drivers do not expect to be called if the queue is 1.1804 + * stopped. 1.1805 + */ 1.1806 + if (netif_queue_stopped(np->dev)) { 1.1807 +- np->dev->xmit_lock_owner = -1; 1.1808 +- spin_unlock(&np->dev->xmit_lock); 1.1809 ++ netif_tx_unlock(np->dev); 1.1810 + netpoll_poll(np); 1.1811 + udelay(50); 1.1812 + continue; 1.1813 + } 1.1814 + 1.1815 + status = np->dev->hard_start_xmit(skb, np->dev); 1.1816 +- np->dev->xmit_lock_owner = -1; 1.1817 +- spin_unlock(&np->dev->xmit_lock); 1.1818 ++ netif_tx_unlock(np->dev); 1.1819 + 1.1820 + /* success */ 1.1821 + if(!status) { 1.1822 +diff --git a/net/core/pktgen.c b/net/core/pktgen.c 1.1823 +index da16f8f..2380347 100644 1.1824 +--- a/net/core/pktgen.c 1.1825 ++++ b/net/core/pktgen.c 1.1826 +@@ -2582,7 +2582,7 @@ static __inline__ void pktgen_xmit(struc 1.1827 + } 1.1828 + } 1.1829 + 1.1830 +- spin_lock_bh(&odev->xmit_lock); 1.1831 ++ netif_tx_lock_bh(odev); 1.1832 + if (!netif_queue_stopped(odev)) { 1.1833 + 1.1834 + atomic_inc(&(pkt_dev->skb->users)); 1.1835 +@@ -2627,7 +2627,7 @@ retry_now: 1.1836 + pkt_dev->next_tx_ns = 0; 1.1837 + } 1.1838 + 1.1839 +- spin_unlock_bh(&odev->xmit_lock); 1.1840 ++ netif_tx_unlock_bh(odev); 1.1841 + 1.1842 + /* If pkt_dev->count is zero, then run forever */ 1.1843 + if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 1.1844 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c 1.1845 +index 2144952..46f56af 100644 1.1846 +--- a/net/core/skbuff.c 1.1847 ++++ b/net/core/skbuff.c 1.1848 +@@ -164,9 +164,9 @@ struct sk_buff *__alloc_skb(unsigned int 1.1849 + shinfo = skb_shinfo(skb); 1.1850 + atomic_set(&shinfo->dataref, 1); 1.1851 + shinfo->nr_frags = 0; 1.1852 +- shinfo->tso_size = 0; 1.1853 +- shinfo->tso_segs = 0; 1.1854 +- shinfo->ufo_size = 0; 1.1855 ++ shinfo->gso_size = 0; 1.1856 ++ shinfo->gso_segs = 0; 1.1857 ++ shinfo->gso_type = 0; 1.1858 + shinfo->ip6_frag_id = 0; 1.1859 + shinfo->frag_list = NULL; 1.1860 + 1.1861 +@@ -230,8 +230,9 @@ struct sk_buff *alloc_skb_from_cache(kme 1.1862 + 1.1863 + atomic_set(&(skb_shinfo(skb)->dataref), 1); 1.1864 + skb_shinfo(skb)->nr_frags = 0; 1.1865 +- skb_shinfo(skb)->tso_size = 0; 1.1866 +- skb_shinfo(skb)->tso_segs = 0; 1.1867 ++ skb_shinfo(skb)->gso_size = 0; 1.1868 ++ skb_shinfo(skb)->gso_segs = 0; 1.1869 ++ skb_shinfo(skb)->gso_type = 0; 1.1870 + skb_shinfo(skb)->frag_list = NULL; 1.1871 + out: 1.1872 + return skb; 1.1873 +@@ -501,8 +502,9 @@ #endif 1.1874 + new->tc_index = old->tc_index; 1.1875 + #endif 1.1876 + atomic_set(&new->users, 1); 1.1877 +- skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size; 1.1878 +- skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs; 1.1879 ++ skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 1.1880 ++ skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 1.1881 ++ skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 1.1882 + } 1.1883 + 1.1884 + /** 1.1885 +@@ -1777,6 +1779,133 @@ int skb_append_datato_frags(struct sock 1.1886 + return 0; 1.1887 + } 1.1888 + 1.1889 ++/** 1.1890 ++ * skb_segment - Perform protocol segmentation on skb. 1.1891 ++ * @skb: buffer to segment 1.1892 ++ * @features: features for the output path (see dev->features) 1.1893 ++ * 1.1894 ++ * This function performs segmentation on the given skb. It returns 1.1895 ++ * the segment at the given position. It returns NULL if there are 1.1896 ++ * no more segments to generate, or when an error is encountered. 1.1897 ++ */ 1.1898 ++struct sk_buff *skb_segment(struct sk_buff *skb, int features) 1.1899 ++{ 1.1900 ++ struct sk_buff *segs = NULL; 1.1901 ++ struct sk_buff *tail = NULL; 1.1902 ++ unsigned int mss = skb_shinfo(skb)->gso_size; 1.1903 ++ unsigned int doffset = skb->data - skb->mac.raw; 1.1904 ++ unsigned int offset = doffset; 1.1905 ++ unsigned int headroom; 1.1906 ++ unsigned int len; 1.1907 ++ int sg = features & NETIF_F_SG; 1.1908 ++ int nfrags = skb_shinfo(skb)->nr_frags; 1.1909 ++ int err = -ENOMEM; 1.1910 ++ int i = 0; 1.1911 ++ int pos; 1.1912 ++ 1.1913 ++ __skb_push(skb, doffset); 1.1914 ++ headroom = skb_headroom(skb); 1.1915 ++ pos = skb_headlen(skb); 1.1916 ++ 1.1917 ++ do { 1.1918 ++ struct sk_buff *nskb; 1.1919 ++ skb_frag_t *frag; 1.1920 ++ int hsize, nsize; 1.1921 ++ int k; 1.1922 ++ int size; 1.1923 ++ 1.1924 ++ len = skb->len - offset; 1.1925 ++ if (len > mss) 1.1926 ++ len = mss; 1.1927 ++ 1.1928 ++ hsize = skb_headlen(skb) - offset; 1.1929 ++ if (hsize < 0) 1.1930 ++ hsize = 0; 1.1931 ++ nsize = hsize + doffset; 1.1932 ++ if (nsize > len + doffset || !sg) 1.1933 ++ nsize = len + doffset; 1.1934 ++ 1.1935 ++ nskb = alloc_skb(nsize + headroom, GFP_ATOMIC); 1.1936 ++ if (unlikely(!nskb)) 1.1937 ++ goto err; 1.1938 ++ 1.1939 ++ if (segs) 1.1940 ++ tail->next = nskb; 1.1941 ++ else 1.1942 ++ segs = nskb; 1.1943 ++ tail = nskb; 1.1944 ++ 1.1945 ++ nskb->dev = skb->dev; 1.1946 ++ nskb->priority = skb->priority; 1.1947 ++ nskb->protocol = skb->protocol; 1.1948 ++ nskb->dst = dst_clone(skb->dst); 1.1949 ++ memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 1.1950 ++ nskb->pkt_type = skb->pkt_type; 1.1951 ++ nskb->mac_len = skb->mac_len; 1.1952 ++ 1.1953 ++ skb_reserve(nskb, headroom); 1.1954 ++ nskb->mac.raw = nskb->data; 1.1955 ++ nskb->nh.raw = nskb->data + skb->mac_len; 1.1956 ++ nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw); 1.1957 ++ memcpy(skb_put(nskb, doffset), skb->data, doffset); 1.1958 ++ 1.1959 ++ if (!sg) { 1.1960 ++ nskb->csum = skb_copy_and_csum_bits(skb, offset, 1.1961 ++ skb_put(nskb, len), 1.1962 ++ len, 0); 1.1963 ++ continue; 1.1964 ++ } 1.1965 ++ 1.1966 ++ frag = skb_shinfo(nskb)->frags; 1.1967 ++ k = 0; 1.1968 ++ 1.1969 ++ nskb->ip_summed = CHECKSUM_HW; 1.1970 ++ nskb->csum = skb->csum; 1.1971 ++ memcpy(skb_put(nskb, hsize), skb->data + offset, hsize); 1.1972 ++ 1.1973 ++ while (pos < offset + len) { 1.1974 ++ BUG_ON(i >= nfrags); 1.1975 ++ 1.1976 ++ *frag = skb_shinfo(skb)->frags[i]; 1.1977 ++ get_page(frag->page); 1.1978 ++ size = frag->size; 1.1979 ++ 1.1980 ++ if (pos < offset) { 1.1981 ++ frag->page_offset += offset - pos; 1.1982 ++ frag->size -= offset - pos; 1.1983 ++ } 1.1984 ++ 1.1985 ++ k++; 1.1986 ++ 1.1987 ++ if (pos + size <= offset + len) { 1.1988 ++ i++; 1.1989 ++ pos += size; 1.1990 ++ } else { 1.1991 ++ frag->size -= pos + size - (offset + len); 1.1992 ++ break; 1.1993 ++ } 1.1994 ++ 1.1995 ++ frag++; 1.1996 ++ } 1.1997 ++ 1.1998 ++ skb_shinfo(nskb)->nr_frags = k; 1.1999 ++ nskb->data_len = len - hsize; 1.2000 ++ nskb->len += nskb->data_len; 1.2001 ++ nskb->truesize += nskb->data_len; 1.2002 ++ } while ((offset += len) < skb->len); 1.2003 ++ 1.2004 ++ return segs; 1.2005 ++ 1.2006 ++err: 1.2007 ++ while ((skb = segs)) { 1.2008 ++ segs = skb->next; 1.2009 ++ kfree(skb); 1.2010 ++ } 1.2011 ++ return ERR_PTR(err); 1.2012 ++} 1.2013 ++ 1.2014 ++EXPORT_SYMBOL_GPL(skb_segment); 1.2015 ++ 1.2016 + void __init skb_init(void) 1.2017 + { 1.2018 + skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 1.2019 +diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c 1.2020 +index 44bda85..2e3323a 100644 1.2021 +--- a/net/decnet/dn_nsp_in.c 1.2022 ++++ b/net/decnet/dn_nsp_in.c 1.2023 +@@ -801,8 +801,7 @@ got_it: 1.2024 + * We linearize everything except data segments here. 1.2025 + */ 1.2026 + if (cb->nsp_flags & ~0x60) { 1.2027 +- if (unlikely(skb_is_nonlinear(skb)) && 1.2028 +- skb_linearize(skb, GFP_ATOMIC) != 0) 1.2029 ++ if (unlikely(skb_linearize(skb))) 1.2030 + goto free_out; 1.2031 + } 1.2032 + 1.2033 +diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c 1.2034 +index 3407f19..a0a25e0 100644 1.2035 +--- a/net/decnet/dn_route.c 1.2036 ++++ b/net/decnet/dn_route.c 1.2037 +@@ -629,8 +629,7 @@ int dn_route_rcv(struct sk_buff *skb, st 1.2038 + padlen); 1.2039 + 1.2040 + if (flags & DN_RT_PKT_CNTL) { 1.2041 +- if (unlikely(skb_is_nonlinear(skb)) && 1.2042 +- skb_linearize(skb, GFP_ATOMIC) != 0) 1.2043 ++ if (unlikely(skb_linearize(skb))) 1.2044 + goto dump_it; 1.2045 + 1.2046 + switch(flags & DN_RT_CNTL_MSK) { 1.2047 +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c 1.2048 +index 97c276f..5ba719e 100644 1.2049 +--- a/net/ipv4/af_inet.c 1.2050 ++++ b/net/ipv4/af_inet.c 1.2051 +@@ -68,6 +68,7 @@ 1.2052 + */ 1.2053 + 1.2054 + #include <linux/config.h> 1.2055 ++#include <linux/err.h> 1.2056 + #include <linux/errno.h> 1.2057 + #include <linux/types.h> 1.2058 + #include <linux/socket.h> 1.2059 +@@ -1084,6 +1085,54 @@ int inet_sk_rebuild_header(struct sock * 1.2060 + 1.2061 + EXPORT_SYMBOL(inet_sk_rebuild_header); 1.2062 + 1.2063 ++static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) 1.2064 ++{ 1.2065 ++ struct sk_buff *segs = ERR_PTR(-EINVAL); 1.2066 ++ struct iphdr *iph; 1.2067 ++ struct net_protocol *ops; 1.2068 ++ int proto; 1.2069 ++ int ihl; 1.2070 ++ int id; 1.2071 ++ 1.2072 ++ if (!pskb_may_pull(skb, sizeof(*iph))) 1.2073 ++ goto out; 1.2074 ++ 1.2075 ++ iph = skb->nh.iph; 1.2076 ++ ihl = iph->ihl * 4; 1.2077 ++ if (ihl < sizeof(*iph)) 1.2078 ++ goto out; 1.2079 ++ 1.2080 ++ if (!pskb_may_pull(skb, ihl)) 1.2081 ++ goto out; 1.2082 ++ 1.2083 ++ skb->h.raw = __skb_pull(skb, ihl); 1.2084 ++ iph = skb->nh.iph; 1.2085 ++ id = ntohs(iph->id); 1.2086 ++ proto = iph->protocol & (MAX_INET_PROTOS - 1); 1.2087 ++ segs = ERR_PTR(-EPROTONOSUPPORT); 1.2088 ++ 1.2089 ++ rcu_read_lock(); 1.2090 ++ ops = rcu_dereference(inet_protos[proto]); 1.2091 ++ if (ops && ops->gso_segment) 1.2092 ++ segs = ops->gso_segment(skb, features); 1.2093 ++ rcu_read_unlock(); 1.2094 ++ 1.2095 ++ if (!segs || unlikely(IS_ERR(segs))) 1.2096 ++ goto out; 1.2097 ++ 1.2098 ++ skb = segs; 1.2099 ++ do { 1.2100 ++ iph = skb->nh.iph; 1.2101 ++ iph->id = htons(id++); 1.2102 ++ iph->tot_len = htons(skb->len - skb->mac_len); 1.2103 ++ iph->check = 0; 1.2104 ++ iph->check = ip_fast_csum(skb->nh.raw, iph->ihl); 1.2105 ++ } while ((skb = skb->next)); 1.2106 ++ 1.2107 ++out: 1.2108 ++ return segs; 1.2109 ++} 1.2110 ++ 1.2111 + #ifdef CONFIG_IP_MULTICAST 1.2112 + static struct net_protocol igmp_protocol = { 1.2113 + .handler = igmp_rcv, 1.2114 +@@ -1093,6 +1142,7 @@ #endif 1.2115 + static struct net_protocol tcp_protocol = { 1.2116 + .handler = tcp_v4_rcv, 1.2117 + .err_handler = tcp_v4_err, 1.2118 ++ .gso_segment = tcp_tso_segment, 1.2119 + .no_policy = 1, 1.2120 + }; 1.2121 + 1.2122 +@@ -1138,6 +1188,7 @@ static int ipv4_proc_init(void); 1.2123 + static struct packet_type ip_packet_type = { 1.2124 + .type = __constant_htons(ETH_P_IP), 1.2125 + .func = ip_rcv, 1.2126 ++ .gso_segment = inet_gso_segment, 1.2127 + }; 1.2128 + 1.2129 + static int __init inet_init(void) 1.2130 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c 1.2131 +index 8dcba38..19c3c73 100644 1.2132 +--- a/net/ipv4/ip_output.c 1.2133 ++++ b/net/ipv4/ip_output.c 1.2134 +@@ -210,8 +210,7 @@ #if defined(CONFIG_NETFILTER) && defined 1.2135 + return dst_output(skb); 1.2136 + } 1.2137 + #endif 1.2138 +- if (skb->len > dst_mtu(skb->dst) && 1.2139 +- !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) 1.2140 ++ if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) 1.2141 + return ip_fragment(skb, ip_finish_output2); 1.2142 + else 1.2143 + return ip_finish_output2(skb); 1.2144 +@@ -362,7 +361,7 @@ packet_routed: 1.2145 + } 1.2146 + 1.2147 + ip_select_ident_more(iph, &rt->u.dst, sk, 1.2148 +- (skb_shinfo(skb)->tso_segs ?: 1) - 1); 1.2149 ++ (skb_shinfo(skb)->gso_segs ?: 1) - 1); 1.2150 + 1.2151 + /* Add an IP checksum. */ 1.2152 + ip_send_check(iph); 1.2153 +@@ -743,7 +742,8 @@ static inline int ip_ufo_append_data(str 1.2154 + (length - transhdrlen)); 1.2155 + if (!err) { 1.2156 + /* specify the length of each IP datagram fragment*/ 1.2157 +- skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen); 1.2158 ++ skb_shinfo(skb)->gso_size = mtu - fragheaderlen; 1.2159 ++ skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; 1.2160 + __skb_queue_tail(&sk->sk_write_queue, skb); 1.2161 + 1.2162 + return 0; 1.2163 +@@ -839,7 +839,7 @@ int ip_append_data(struct sock *sk, 1.2164 + */ 1.2165 + if (transhdrlen && 1.2166 + length + fragheaderlen <= mtu && 1.2167 +- rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) && 1.2168 ++ rt->u.dst.dev->features & NETIF_F_ALL_CSUM && 1.2169 + !exthdrlen) 1.2170 + csummode = CHECKSUM_HW; 1.2171 + 1.2172 +@@ -1086,14 +1086,16 @@ ssize_t ip_append_page(struct sock *sk, 1.2173 + 1.2174 + inet->cork.length += size; 1.2175 + if ((sk->sk_protocol == IPPROTO_UDP) && 1.2176 +- (rt->u.dst.dev->features & NETIF_F_UFO)) 1.2177 +- skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen); 1.2178 ++ (rt->u.dst.dev->features & NETIF_F_UFO)) { 1.2179 ++ skb_shinfo(skb)->gso_size = mtu - fragheaderlen; 1.2180 ++ skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; 1.2181 ++ } 1.2182 + 1.2183 + 1.2184 + while (size > 0) { 1.2185 + int i; 1.2186 + 1.2187 +- if (skb_shinfo(skb)->ufo_size) 1.2188 ++ if (skb_shinfo(skb)->gso_size) 1.2189 + len = size; 1.2190 + else { 1.2191 + 1.2192 +diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c 1.2193 +index d64e2ec..7494823 100644 1.2194 +--- a/net/ipv4/ipcomp.c 1.2195 ++++ b/net/ipv4/ipcomp.c 1.2196 +@@ -84,7 +84,7 @@ static int ipcomp_input(struct xfrm_stat 1.2197 + struct xfrm_decap_state *decap, struct sk_buff *skb) 1.2198 + { 1.2199 + u8 nexthdr; 1.2200 +- int err = 0; 1.2201 ++ int err = -ENOMEM; 1.2202 + struct iphdr *iph; 1.2203 + union { 1.2204 + struct iphdr iph; 1.2205 +@@ -92,11 +92,8 @@ static int ipcomp_input(struct xfrm_stat 1.2206 + } tmp_iph; 1.2207 + 1.2208 + 1.2209 +- if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && 1.2210 +- skb_linearize(skb, GFP_ATOMIC) != 0) { 1.2211 +- err = -ENOMEM; 1.2212 ++ if (skb_linearize_cow(skb)) 1.2213 + goto out; 1.2214 +- } 1.2215 + 1.2216 + skb->ip_summed = CHECKSUM_NONE; 1.2217 + 1.2218 +@@ -171,10 +168,8 @@ static int ipcomp_output(struct xfrm_sta 1.2219 + goto out_ok; 1.2220 + } 1.2221 + 1.2222 +- if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && 1.2223 +- skb_linearize(skb, GFP_ATOMIC) != 0) { 1.2224 ++ if (skb_linearize_cow(skb)) 1.2225 + goto out_ok; 1.2226 +- } 1.2227 + 1.2228 + err = ipcomp_compress(x, skb); 1.2229 + iph = skb->nh.iph; 1.2230 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c 1.2231 +index 00aa80e..84130c9 100644 1.2232 +--- a/net/ipv4/tcp.c 1.2233 ++++ b/net/ipv4/tcp.c 1.2234 +@@ -257,6 +257,7 @@ #include <linux/smp_lock.h> 1.2235 + #include <linux/fs.h> 1.2236 + #include <linux/random.h> 1.2237 + #include <linux/bootmem.h> 1.2238 ++#include <linux/err.h> 1.2239 + 1.2240 + #include <net/icmp.h> 1.2241 + #include <net/tcp.h> 1.2242 +@@ -570,7 +571,7 @@ new_segment: 1.2243 + skb->ip_summed = CHECKSUM_HW; 1.2244 + tp->write_seq += copy; 1.2245 + TCP_SKB_CB(skb)->end_seq += copy; 1.2246 +- skb_shinfo(skb)->tso_segs = 0; 1.2247 ++ skb_shinfo(skb)->gso_segs = 0; 1.2248 + 1.2249 + if (!copied) 1.2250 + TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; 1.2251 +@@ -621,14 +622,10 @@ ssize_t tcp_sendpage(struct socket *sock 1.2252 + ssize_t res; 1.2253 + struct sock *sk = sock->sk; 1.2254 + 1.2255 +-#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 1.2256 +- 1.2257 + if (!(sk->sk_route_caps & NETIF_F_SG) || 1.2258 +- !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS)) 1.2259 ++ !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) 1.2260 + return sock_no_sendpage(sock, page, offset, size, flags); 1.2261 + 1.2262 +-#undef TCP_ZC_CSUM_FLAGS 1.2263 +- 1.2264 + lock_sock(sk); 1.2265 + TCP_CHECK_TIMER(sk); 1.2266 + res = do_tcp_sendpages(sk, &page, offset, size, flags); 1.2267 +@@ -725,9 +722,7 @@ new_segment: 1.2268 + /* 1.2269 + * Check whether we can use HW checksum. 1.2270 + */ 1.2271 +- if (sk->sk_route_caps & 1.2272 +- (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | 1.2273 +- NETIF_F_HW_CSUM)) 1.2274 ++ if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 1.2275 + skb->ip_summed = CHECKSUM_HW; 1.2276 + 1.2277 + skb_entail(sk, tp, skb); 1.2278 +@@ -823,7 +818,7 @@ new_segment: 1.2279 + 1.2280 + tp->write_seq += copy; 1.2281 + TCP_SKB_CB(skb)->end_seq += copy; 1.2282 +- skb_shinfo(skb)->tso_segs = 0; 1.2283 ++ skb_shinfo(skb)->gso_segs = 0; 1.2284 + 1.2285 + from += copy; 1.2286 + copied += copy; 1.2287 +@@ -2026,6 +2021,71 @@ int tcp_getsockopt(struct sock *sk, int 1.2288 + } 1.2289 + 1.2290 + 1.2291 ++struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) 1.2292 ++{ 1.2293 ++ struct sk_buff *segs = ERR_PTR(-EINVAL); 1.2294 ++ struct tcphdr *th; 1.2295 ++ unsigned thlen; 1.2296 ++ unsigned int seq; 1.2297 ++ unsigned int delta; 1.2298 ++ unsigned int oldlen; 1.2299 ++ unsigned int len; 1.2300 ++ 1.2301 ++ if (!pskb_may_pull(skb, sizeof(*th))) 1.2302 ++ goto out; 1.2303 ++ 1.2304 ++ th = skb->h.th; 1.2305 ++ thlen = th->doff * 4; 1.2306 ++ if (thlen < sizeof(*th)) 1.2307 ++ goto out; 1.2308 ++ 1.2309 ++ if (!pskb_may_pull(skb, thlen)) 1.2310 ++ goto out; 1.2311 ++ 1.2312 ++ segs = NULL; 1.2313 ++ if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) 1.2314 ++ goto out; 1.2315 ++ 1.2316 ++ oldlen = (u16)~skb->len; 1.2317 ++ __skb_pull(skb, thlen); 1.2318 ++ 1.2319 ++ segs = skb_segment(skb, features); 1.2320 ++ if (IS_ERR(segs)) 1.2321 ++ goto out; 1.2322 ++ 1.2323 ++ len = skb_shinfo(skb)->gso_size; 1.2324 ++ delta = htonl(oldlen + (thlen + len)); 1.2325 ++ 1.2326 ++ skb = segs; 1.2327 ++ th = skb->h.th; 1.2328 ++ seq = ntohl(th->seq); 1.2329 ++ 1.2330 ++ do { 1.2331 ++ th->fin = th->psh = 0; 1.2332 ++ 1.2333 ++ th->check = ~csum_fold(th->check + delta); 1.2334 ++ if (skb->ip_summed != CHECKSUM_HW) 1.2335 ++ th->check = csum_fold(csum_partial(skb->h.raw, thlen, 1.2336 ++ skb->csum)); 1.2337 ++ 1.2338 ++ seq += len; 1.2339 ++ skb = skb->next; 1.2340 ++ th = skb->h.th; 1.2341 ++ 1.2342 ++ th->seq = htonl(seq); 1.2343 ++ th->cwr = 0; 1.2344 ++ } while (skb->next); 1.2345 ++ 1.2346 ++ delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); 1.2347 ++ th->check = ~csum_fold(th->check + delta); 1.2348 ++ if (skb->ip_summed != CHECKSUM_HW) 1.2349 ++ th->check = csum_fold(csum_partial(skb->h.raw, thlen, 1.2350 ++ skb->csum)); 1.2351 ++ 1.2352 ++out: 1.2353 ++ return segs; 1.2354 ++} 1.2355 ++ 1.2356 + extern void __skb_cb_too_small_for_tcp(int, int); 1.2357 + extern struct tcp_congestion_ops tcp_reno; 1.2358 + 1.2359 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c 1.2360 +index e9a54ae..defe77a 100644 1.2361 +--- a/net/ipv4/tcp_input.c 1.2362 ++++ b/net/ipv4/tcp_input.c 1.2363 +@@ -1072,7 +1072,7 @@ tcp_sacktag_write_queue(struct sock *sk, 1.2364 + else 1.2365 + pkt_len = (end_seq - 1.2366 + TCP_SKB_CB(skb)->seq); 1.2367 +- if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size)) 1.2368 ++ if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size)) 1.2369 + break; 1.2370 + pcount = tcp_skb_pcount(skb); 1.2371 + } 1.2372 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c 1.2373 +index 310f2e6..ee01f69 100644 1.2374 +--- a/net/ipv4/tcp_output.c 1.2375 ++++ b/net/ipv4/tcp_output.c 1.2376 +@@ -497,15 +497,17 @@ static void tcp_set_skb_tso_segs(struct 1.2377 + /* Avoid the costly divide in the normal 1.2378 + * non-TSO case. 1.2379 + */ 1.2380 +- skb_shinfo(skb)->tso_segs = 1; 1.2381 +- skb_shinfo(skb)->tso_size = 0; 1.2382 ++ skb_shinfo(skb)->gso_segs = 1; 1.2383 ++ skb_shinfo(skb)->gso_size = 0; 1.2384 ++ skb_shinfo(skb)->gso_type = 0; 1.2385 + } else { 1.2386 + unsigned int factor; 1.2387 + 1.2388 + factor = skb->len + (mss_now - 1); 1.2389 + factor /= mss_now; 1.2390 +- skb_shinfo(skb)->tso_segs = factor; 1.2391 +- skb_shinfo(skb)->tso_size = mss_now; 1.2392 ++ skb_shinfo(skb)->gso_segs = factor; 1.2393 ++ skb_shinfo(skb)->gso_size = mss_now; 1.2394 ++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 1.2395 + } 1.2396 + } 1.2397 + 1.2398 +@@ -850,7 +852,7 @@ static int tcp_init_tso_segs(struct sock 1.2399 + 1.2400 + if (!tso_segs || 1.2401 + (tso_segs > 1 && 1.2402 +- skb_shinfo(skb)->tso_size != mss_now)) { 1.2403 ++ tcp_skb_mss(skb) != mss_now)) { 1.2404 + tcp_set_skb_tso_segs(sk, skb, mss_now); 1.2405 + tso_segs = tcp_skb_pcount(skb); 1.2406 + } 1.2407 +@@ -1510,8 +1512,9 @@ int tcp_retransmit_skb(struct sock *sk, 1.2408 + tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 1.2409 + if (!pskb_trim(skb, 0)) { 1.2410 + TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; 1.2411 +- skb_shinfo(skb)->tso_segs = 1; 1.2412 +- skb_shinfo(skb)->tso_size = 0; 1.2413 ++ skb_shinfo(skb)->gso_segs = 1; 1.2414 ++ skb_shinfo(skb)->gso_size = 0; 1.2415 ++ skb_shinfo(skb)->gso_type = 0; 1.2416 + skb->ip_summed = CHECKSUM_NONE; 1.2417 + skb->csum = 0; 1.2418 + } 1.2419 +@@ -1716,8 +1719,9 @@ void tcp_send_fin(struct sock *sk) 1.2420 + skb->csum = 0; 1.2421 + TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 1.2422 + TCP_SKB_CB(skb)->sacked = 0; 1.2423 +- skb_shinfo(skb)->tso_segs = 1; 1.2424 +- skb_shinfo(skb)->tso_size = 0; 1.2425 ++ skb_shinfo(skb)->gso_segs = 1; 1.2426 ++ skb_shinfo(skb)->gso_size = 0; 1.2427 ++ skb_shinfo(skb)->gso_type = 0; 1.2428 + 1.2429 + /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 1.2430 + TCP_SKB_CB(skb)->seq = tp->write_seq; 1.2431 +@@ -1749,8 +1753,9 @@ void tcp_send_active_reset(struct sock * 1.2432 + skb->csum = 0; 1.2433 + TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 1.2434 + TCP_SKB_CB(skb)->sacked = 0; 1.2435 +- skb_shinfo(skb)->tso_segs = 1; 1.2436 +- skb_shinfo(skb)->tso_size = 0; 1.2437 ++ skb_shinfo(skb)->gso_segs = 1; 1.2438 ++ skb_shinfo(skb)->gso_size = 0; 1.2439 ++ skb_shinfo(skb)->gso_type = 0; 1.2440 + 1.2441 + /* Send it off. */ 1.2442 + TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); 1.2443 +@@ -1833,8 +1838,9 @@ struct sk_buff * tcp_make_synack(struct 1.2444 + TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; 1.2445 + TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 1.2446 + TCP_SKB_CB(skb)->sacked = 0; 1.2447 +- skb_shinfo(skb)->tso_segs = 1; 1.2448 +- skb_shinfo(skb)->tso_size = 0; 1.2449 ++ skb_shinfo(skb)->gso_segs = 1; 1.2450 ++ skb_shinfo(skb)->gso_size = 0; 1.2451 ++ skb_shinfo(skb)->gso_type = 0; 1.2452 + th->seq = htonl(TCP_SKB_CB(skb)->seq); 1.2453 + th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 1.2454 + if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 1.2455 +@@ -1937,8 +1943,9 @@ int tcp_connect(struct sock *sk) 1.2456 + TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; 1.2457 + TCP_ECN_send_syn(sk, tp, buff); 1.2458 + TCP_SKB_CB(buff)->sacked = 0; 1.2459 +- skb_shinfo(buff)->tso_segs = 1; 1.2460 +- skb_shinfo(buff)->tso_size = 0; 1.2461 ++ skb_shinfo(buff)->gso_segs = 1; 1.2462 ++ skb_shinfo(buff)->gso_size = 0; 1.2463 ++ skb_shinfo(buff)->gso_type = 0; 1.2464 + buff->csum = 0; 1.2465 + TCP_SKB_CB(buff)->seq = tp->write_seq++; 1.2466 + TCP_SKB_CB(buff)->end_seq = tp->write_seq; 1.2467 +@@ -2042,8 +2049,9 @@ void tcp_send_ack(struct sock *sk) 1.2468 + buff->csum = 0; 1.2469 + TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; 1.2470 + TCP_SKB_CB(buff)->sacked = 0; 1.2471 +- skb_shinfo(buff)->tso_segs = 1; 1.2472 +- skb_shinfo(buff)->tso_size = 0; 1.2473 ++ skb_shinfo(buff)->gso_segs = 1; 1.2474 ++ skb_shinfo(buff)->gso_size = 0; 1.2475 ++ skb_shinfo(buff)->gso_type = 0; 1.2476 + 1.2477 + /* Send it off, this clears delayed acks for us. */ 1.2478 + TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); 1.2479 +@@ -2078,8 +2086,9 @@ static int tcp_xmit_probe_skb(struct soc 1.2480 + skb->csum = 0; 1.2481 + TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; 1.2482 + TCP_SKB_CB(skb)->sacked = urgent; 1.2483 +- skb_shinfo(skb)->tso_segs = 1; 1.2484 +- skb_shinfo(skb)->tso_size = 0; 1.2485 ++ skb_shinfo(skb)->gso_segs = 1; 1.2486 ++ skb_shinfo(skb)->gso_size = 0; 1.2487 ++ skb_shinfo(skb)->gso_type = 0; 1.2488 + 1.2489 + /* Use a previous sequence. This should cause the other 1.2490 + * end to send an ack. Don't queue or clone SKB, just 1.2491 +diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c 1.2492 +index 32ad229..737c1db 100644 1.2493 +--- a/net/ipv4/xfrm4_output.c 1.2494 ++++ b/net/ipv4/xfrm4_output.c 1.2495 +@@ -9,6 +9,8 @@ 1.2496 + */ 1.2497 + 1.2498 + #include <linux/compiler.h> 1.2499 ++#include <linux/if_ether.h> 1.2500 ++#include <linux/kernel.h> 1.2501 + #include <linux/skbuff.h> 1.2502 + #include <linux/spinlock.h> 1.2503 + #include <linux/netfilter_ipv4.h> 1.2504 +@@ -152,16 +154,10 @@ error_nolock: 1.2505 + goto out_exit; 1.2506 + } 1.2507 + 1.2508 +-static int xfrm4_output_finish(struct sk_buff *skb) 1.2509 ++static int xfrm4_output_finish2(struct sk_buff *skb) 1.2510 + { 1.2511 + int err; 1.2512 + 1.2513 +-#ifdef CONFIG_NETFILTER 1.2514 +- if (!skb->dst->xfrm) { 1.2515 +- IPCB(skb)->flags |= IPSKB_REROUTED; 1.2516 +- return dst_output(skb); 1.2517 +- } 1.2518 +-#endif 1.2519 + while (likely((err = xfrm4_output_one(skb)) == 0)) { 1.2520 + nf_reset(skb); 1.2521 + 1.2522 +@@ -174,7 +170,7 @@ #endif 1.2523 + return dst_output(skb); 1.2524 + 1.2525 + err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL, 1.2526 +- skb->dst->dev, xfrm4_output_finish); 1.2527 ++ skb->dst->dev, xfrm4_output_finish2); 1.2528 + if (unlikely(err != 1)) 1.2529 + break; 1.2530 + } 1.2531 +@@ -182,6 +178,48 @@ #endif 1.2532 + return err; 1.2533 + } 1.2534 + 1.2535 ++static int xfrm4_output_finish(struct sk_buff *skb) 1.2536 ++{ 1.2537 ++ struct sk_buff *segs; 1.2538 ++ 1.2539 ++#ifdef CONFIG_NETFILTER 1.2540 ++ if (!skb->dst->xfrm) { 1.2541 ++ IPCB(skb)->flags |= IPSKB_REROUTED; 1.2542 ++ return dst_output(skb); 1.2543 ++ } 1.2544 ++#endif 1.2545 ++ 1.2546 ++ if (!skb_shinfo(skb)->gso_size) 1.2547 ++ return xfrm4_output_finish2(skb); 1.2548 ++ 1.2549 ++ skb->protocol = htons(ETH_P_IP); 1.2550 ++ segs = skb_gso_segment(skb, 0); 1.2551 ++ kfree_skb(skb); 1.2552 ++ if (unlikely(IS_ERR(segs))) 1.2553 ++ return PTR_ERR(segs); 1.2554 ++ 1.2555 ++ do { 1.2556 ++ struct sk_buff *nskb = segs->next; 1.2557 ++ int err; 1.2558 ++ 1.2559 ++ segs->next = NULL; 1.2560 ++ err = xfrm4_output_finish2(segs); 1.2561 ++ 1.2562 ++ if (unlikely(err)) { 1.2563 ++ while ((segs = nskb)) { 1.2564 ++ nskb = segs->next; 1.2565 ++ segs->next = NULL; 1.2566 ++ kfree_skb(segs); 1.2567 ++ } 1.2568 ++ return err; 1.2569 ++ } 1.2570 ++ 1.2571 ++ segs = nskb; 1.2572 ++ } while (segs); 1.2573 ++ 1.2574 ++ return 0; 1.2575 ++} 1.2576 ++ 1.2577 + int xfrm4_output(struct sk_buff *skb) 1.2578 + { 1.2579 + return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev, 1.2580 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c 1.2581 +index 5bf70b1..cf5d17e 100644 1.2582 +--- a/net/ipv6/ip6_output.c 1.2583 ++++ b/net/ipv6/ip6_output.c 1.2584 +@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s 1.2585 + 1.2586 + int ip6_output(struct sk_buff *skb) 1.2587 + { 1.2588 +- if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) || 1.2589 ++ if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) || 1.2590 + dst_allfrag(skb->dst)) 1.2591 + return ip6_fragment(skb, ip6_output2); 1.2592 + else 1.2593 +@@ -829,8 +829,9 @@ static inline int ip6_ufo_append_data(st 1.2594 + struct frag_hdr fhdr; 1.2595 + 1.2596 + /* specify the length of each IP datagram fragment*/ 1.2597 +- skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) - 1.2598 +- sizeof(struct frag_hdr); 1.2599 ++ skb_shinfo(skb)->gso_size = mtu - fragheaderlen - 1.2600 ++ sizeof(struct frag_hdr); 1.2601 ++ skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; 1.2602 + ipv6_select_ident(skb, &fhdr); 1.2603 + skb_shinfo(skb)->ip6_frag_id = fhdr.identification; 1.2604 + __skb_queue_tail(&sk->sk_write_queue, skb); 1.2605 +diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c 1.2606 +index d511a88..ef56d5d 100644 1.2607 +--- a/net/ipv6/ipcomp6.c 1.2608 ++++ b/net/ipv6/ipcomp6.c 1.2609 +@@ -64,7 +64,7 @@ static LIST_HEAD(ipcomp6_tfms_list); 1.2610 + 1.2611 + static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) 1.2612 + { 1.2613 +- int err = 0; 1.2614 ++ int err = -ENOMEM; 1.2615 + u8 nexthdr = 0; 1.2616 + int hdr_len = skb->h.raw - skb->nh.raw; 1.2617 + unsigned char *tmp_hdr = NULL; 1.2618 +@@ -75,11 +75,8 @@ static int ipcomp6_input(struct xfrm_sta 1.2619 + struct crypto_tfm *tfm; 1.2620 + int cpu; 1.2621 + 1.2622 +- if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && 1.2623 +- skb_linearize(skb, GFP_ATOMIC) != 0) { 1.2624 +- err = -ENOMEM; 1.2625 ++ if (skb_linearize_cow(skb)) 1.2626 + goto out; 1.2627 +- } 1.2628 + 1.2629 + skb->ip_summed = CHECKSUM_NONE; 1.2630 + 1.2631 +@@ -158,10 +155,8 @@ static int ipcomp6_output(struct xfrm_st 1.2632 + goto out_ok; 1.2633 + } 1.2634 + 1.2635 +- if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && 1.2636 +- skb_linearize(skb, GFP_ATOMIC) != 0) { 1.2637 ++ if (skb_linearize_cow(skb)) 1.2638 + goto out_ok; 1.2639 +- } 1.2640 + 1.2641 + /* compression */ 1.2642 + plen = skb->len - hdr_len; 1.2643 +diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c 1.2644 +index 8024217..39bdeec 100644 1.2645 +--- a/net/ipv6/xfrm6_output.c 1.2646 ++++ b/net/ipv6/xfrm6_output.c 1.2647 +@@ -151,7 +151,7 @@ error_nolock: 1.2648 + goto out_exit; 1.2649 + } 1.2650 + 1.2651 +-static int xfrm6_output_finish(struct sk_buff *skb) 1.2652 ++static int xfrm6_output_finish2(struct sk_buff *skb) 1.2653 + { 1.2654 + int err; 1.2655 + 1.2656 +@@ -167,7 +167,7 @@ static int xfrm6_output_finish(struct sk 1.2657 + return dst_output(skb); 1.2658 + 1.2659 + err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL, 1.2660 +- skb->dst->dev, xfrm6_output_finish); 1.2661 ++ skb->dst->dev, xfrm6_output_finish2); 1.2662 + if (unlikely(err != 1)) 1.2663 + break; 1.2664 + } 1.2665 +@@ -175,6 +175,41 @@ static int xfrm6_output_finish(struct sk 1.2666 + return err; 1.2667 + } 1.2668 + 1.2669 ++static int xfrm6_output_finish(struct sk_buff *skb) 1.2670 ++{ 1.2671 ++ struct sk_buff *segs; 1.2672 ++ 1.2673 ++ if (!skb_shinfo(skb)->gso_size) 1.2674 ++ return xfrm6_output_finish2(skb); 1.2675 ++ 1.2676 ++ skb->protocol = htons(ETH_P_IP); 1.2677 ++ segs = skb_gso_segment(skb, 0); 1.2678 ++ kfree_skb(skb); 1.2679 ++ if (unlikely(IS_ERR(segs))) 1.2680 ++ return PTR_ERR(segs); 1.2681 ++ 1.2682 ++ do { 1.2683 ++ struct sk_buff *nskb = segs->next; 1.2684 ++ int err; 1.2685 ++ 1.2686 ++ segs->next = NULL; 1.2687 ++ err = xfrm6_output_finish2(segs); 1.2688 ++ 1.2689 ++ if (unlikely(err)) { 1.2690 ++ while ((segs = nskb)) { 1.2691 ++ nskb = segs->next; 1.2692 ++ segs->next = NULL; 1.2693 ++ kfree_skb(segs); 1.2694 ++ } 1.2695 ++ return err; 1.2696 ++ } 1.2697 ++ 1.2698 ++ segs = nskb; 1.2699 ++ } while (segs); 1.2700 ++ 1.2701 ++ return 0; 1.2702 ++} 1.2703 ++ 1.2704 + int xfrm6_output(struct sk_buff *skb) 1.2705 + { 1.2706 + return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dst->dev, 1.2707 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c 1.2708 +index 99ceb91..28c9efd 100644 1.2709 +--- a/net/sched/sch_generic.c 1.2710 ++++ b/net/sched/sch_generic.c 1.2711 +@@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device 1.2712 + dev->queue_lock serializes queue accesses for this device 1.2713 + AND dev->qdisc pointer itself. 1.2714 + 1.2715 +- dev->xmit_lock serializes accesses to device driver. 1.2716 ++ netif_tx_lock serializes accesses to device driver. 1.2717 + 1.2718 +- dev->queue_lock and dev->xmit_lock are mutually exclusive, 1.2719 ++ dev->queue_lock and netif_tx_lock are mutually exclusive, 1.2720 + if one is grabbed, another must be free. 1.2721 + */ 1.2722 + 1.2723 +@@ -90,14 +90,17 @@ void qdisc_unlock_tree(struct net_device 1.2724 + NOTE: Called under dev->queue_lock with locally disabled BH. 1.2725 + */ 1.2726 + 1.2727 +-int qdisc_restart(struct net_device *dev) 1.2728 ++static inline int qdisc_restart(struct net_device *dev) 1.2729 + { 1.2730 + struct Qdisc *q = dev->qdisc; 1.2731 + struct sk_buff *skb; 1.2732 + 1.2733 + /* Dequeue packet */ 1.2734 +- if ((skb = q->dequeue(q)) != NULL) { 1.2735 ++ if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) { 1.2736 + unsigned nolock = (dev->features & NETIF_F_LLTX); 1.2737 ++ 1.2738 ++ dev->gso_skb = NULL; 1.2739 ++ 1.2740 + /* 1.2741 + * When the driver has LLTX set it does its own locking 1.2742 + * in start_xmit. No need to add additional overhead by 1.2743 +@@ -108,7 +111,7 @@ int qdisc_restart(struct net_device *dev 1.2744 + * will be requeued. 1.2745 + */ 1.2746 + if (!nolock) { 1.2747 +- if (!spin_trylock(&dev->xmit_lock)) { 1.2748 ++ if (!netif_tx_trylock(dev)) { 1.2749 + collision: 1.2750 + /* So, someone grabbed the driver. */ 1.2751 + 1.2752 +@@ -126,8 +129,6 @@ int qdisc_restart(struct net_device *dev 1.2753 + __get_cpu_var(netdev_rx_stat).cpu_collision++; 1.2754 + goto requeue; 1.2755 + } 1.2756 +- /* Remember that the driver is grabbed by us. */ 1.2757 +- dev->xmit_lock_owner = smp_processor_id(); 1.2758 + } 1.2759 + 1.2760 + { 1.2761 +@@ -136,14 +137,11 @@ int qdisc_restart(struct net_device *dev 1.2762 + 1.2763 + if (!netif_queue_stopped(dev)) { 1.2764 + int ret; 1.2765 +- if (netdev_nit) 1.2766 +- dev_queue_xmit_nit(skb, dev); 1.2767 + 1.2768 +- ret = dev->hard_start_xmit(skb, dev); 1.2769 ++ ret = dev_hard_start_xmit(skb, dev); 1.2770 + if (ret == NETDEV_TX_OK) { 1.2771 + if (!nolock) { 1.2772 +- dev->xmit_lock_owner = -1; 1.2773 +- spin_unlock(&dev->xmit_lock); 1.2774 ++ netif_tx_unlock(dev); 1.2775 + } 1.2776 + spin_lock(&dev->queue_lock); 1.2777 + return -1; 1.2778 +@@ -157,8 +155,7 @@ int qdisc_restart(struct net_device *dev 1.2779 + /* NETDEV_TX_BUSY - we need to requeue */ 1.2780 + /* Release the driver */ 1.2781 + if (!nolock) { 1.2782 +- dev->xmit_lock_owner = -1; 1.2783 +- spin_unlock(&dev->xmit_lock); 1.2784 ++ netif_tx_unlock(dev); 1.2785 + } 1.2786 + spin_lock(&dev->queue_lock); 1.2787 + q = dev->qdisc; 1.2788 +@@ -175,7 +172,10 @@ int qdisc_restart(struct net_device *dev 1.2789 + */ 1.2790 + 1.2791 + requeue: 1.2792 +- q->ops->requeue(skb, q); 1.2793 ++ if (skb->next) 1.2794 ++ dev->gso_skb = skb; 1.2795 ++ else 1.2796 ++ q->ops->requeue(skb, q); 1.2797 + netif_schedule(dev); 1.2798 + return 1; 1.2799 + } 1.2800 +@@ -183,11 +183,23 @@ requeue: 1.2801 + return q->q.qlen; 1.2802 + } 1.2803 + 1.2804 ++void __qdisc_run(struct net_device *dev) 1.2805 ++{ 1.2806 ++ if (unlikely(dev->qdisc == &noop_qdisc)) 1.2807 ++ goto out; 1.2808 ++ 1.2809 ++ while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev)) 1.2810 ++ /* NOTHING */; 1.2811 ++ 1.2812 ++out: 1.2813 ++ clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 1.2814 ++} 1.2815 ++ 1.2816 + static void dev_watchdog(unsigned long arg) 1.2817 + { 1.2818 + struct net_device *dev = (struct net_device *)arg; 1.2819 + 1.2820 +- spin_lock(&dev->xmit_lock); 1.2821 ++ netif_tx_lock(dev); 1.2822 + if (dev->qdisc != &noop_qdisc) { 1.2823 + if (netif_device_present(dev) && 1.2824 + netif_running(dev) && 1.2825 +@@ -201,7 +213,7 @@ static void dev_watchdog(unsigned long a 1.2826 + dev_hold(dev); 1.2827 + } 1.2828 + } 1.2829 +- spin_unlock(&dev->xmit_lock); 1.2830 ++ netif_tx_unlock(dev); 1.2831 + 1.2832 + dev_put(dev); 1.2833 + } 1.2834 +@@ -225,17 +237,17 @@ void __netdev_watchdog_up(struct net_dev 1.2835 + 1.2836 + static void dev_watchdog_up(struct net_device *dev) 1.2837 + { 1.2838 +- spin_lock_bh(&dev->xmit_lock); 1.2839 ++ netif_tx_lock_bh(dev); 1.2840 + __netdev_watchdog_up(dev); 1.2841 +- spin_unlock_bh(&dev->xmit_lock); 1.2842 ++ netif_tx_unlock_bh(dev); 1.2843 + } 1.2844 + 1.2845 + static void dev_watchdog_down(struct net_device *dev) 1.2846 + { 1.2847 +- spin_lock_bh(&dev->xmit_lock); 1.2848 ++ netif_tx_lock_bh(dev); 1.2849 + if (del_timer(&dev->watchdog_timer)) 1.2850 + __dev_put(dev); 1.2851 +- spin_unlock_bh(&dev->xmit_lock); 1.2852 ++ netif_tx_unlock_bh(dev); 1.2853 + } 1.2854 + 1.2855 + void netif_carrier_on(struct net_device *dev) 1.2856 +@@ -577,10 +589,17 @@ void dev_deactivate(struct net_device *d 1.2857 + 1.2858 + dev_watchdog_down(dev); 1.2859 + 1.2860 +- while (test_bit(__LINK_STATE_SCHED, &dev->state)) 1.2861 ++ /* Wait for outstanding dev_queue_xmit calls. */ 1.2862 ++ synchronize_rcu(); 1.2863 ++ 1.2864 ++ /* Wait for outstanding qdisc_run calls. */ 1.2865 ++ while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 1.2866 + yield(); 1.2867 + 1.2868 +- spin_unlock_wait(&dev->xmit_lock); 1.2869 ++ if (dev->gso_skb) { 1.2870 ++ kfree_skb(dev->gso_skb); 1.2871 ++ dev->gso_skb = NULL; 1.2872 ++ } 1.2873 + } 1.2874 + 1.2875 + void dev_init_scheduler(struct net_device *dev) 1.2876 +@@ -622,6 +641,5 @@ EXPORT_SYMBOL(qdisc_create_dflt); 1.2877 + EXPORT_SYMBOL(qdisc_alloc); 1.2878 + EXPORT_SYMBOL(qdisc_destroy); 1.2879 + EXPORT_SYMBOL(qdisc_reset); 1.2880 +-EXPORT_SYMBOL(qdisc_restart); 1.2881 + EXPORT_SYMBOL(qdisc_lock_tree); 1.2882 + EXPORT_SYMBOL(qdisc_unlock_tree); 1.2883 +diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c 1.2884 +index 79b8ef3..4c16ad5 100644 1.2885 +--- a/net/sched/sch_teql.c 1.2886 ++++ b/net/sched/sch_teql.c 1.2887 +@@ -302,20 +302,17 @@ restart: 1.2888 + 1.2889 + switch (teql_resolve(skb, skb_res, slave)) { 1.2890 + case 0: 1.2891 +- if (spin_trylock(&slave->xmit_lock)) { 1.2892 +- slave->xmit_lock_owner = smp_processor_id(); 1.2893 ++ if (netif_tx_trylock(slave)) { 1.2894 + if (!netif_queue_stopped(slave) && 1.2895 + slave->hard_start_xmit(skb, slave) == 0) { 1.2896 +- slave->xmit_lock_owner = -1; 1.2897 +- spin_unlock(&slave->xmit_lock); 1.2898 ++ netif_tx_unlock(slave); 1.2899 + master->slaves = NEXT_SLAVE(q); 1.2900 + netif_wake_queue(dev); 1.2901 + master->stats.tx_packets++; 1.2902 + master->stats.tx_bytes += len; 1.2903 + return 0; 1.2904 + } 1.2905 +- slave->xmit_lock_owner = -1; 1.2906 +- spin_unlock(&slave->xmit_lock); 1.2907 ++ netif_tx_unlock(slave); 1.2908 + } 1.2909 + if (netif_queue_stopped(dev)) 1.2910 + busy = 1;
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/patches/linux-2.6.16.13/net-gso-1-check-dodgy.patch Tue Jul 25 15:06:39 2006 +0100 2.3 @@ -0,0 +1,27 @@ 2.4 +diff -urp a/net/ipv4/tcp.c b/net/ipv4/tcp.c 2.5 +--- a/net/ipv4/tcp.c 2006-07-25 14:42:53.194910626 +0100 2.6 ++++ b/net/ipv4/tcp.c 2006-07-25 14:41:00.955501910 +0100 2.7 +@@ -2042,13 +2042,19 @@ struct sk_buff *tcp_tso_segment(struct s 2.8 + if (!pskb_may_pull(skb, thlen)) 2.9 + goto out; 2.10 + 2.11 +- segs = NULL; 2.12 +- if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) 2.13 +- goto out; 2.14 +- 2.15 + oldlen = (u16)~skb->len; 2.16 + __skb_pull(skb, thlen); 2.17 + 2.18 ++ if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 2.19 ++ /* Packet is from an untrusted source, reset gso_segs. */ 2.20 ++ int mss = skb_shinfo(skb)->gso_size; 2.21 ++ 2.22 ++ skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; 2.23 ++ 2.24 ++ segs = NULL; 2.25 ++ goto out; 2.26 ++ } 2.27 ++ 2.28 + segs = skb_segment(skb, features); 2.29 + if (IS_ERR(segs)) 2.30 + goto out;
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/patches/linux-2.6.16.13/net-gso-2-checksum-fix.patch Tue Jul 25 15:06:39 2006 +0100 3.3 @@ -0,0 +1,451 @@ 3.4 +diff -urp a/drivers/net/bnx2.c b/drivers/net/bnx2.c 3.5 +--- a/drivers/net/bnx2.c 2006-07-25 14:41:00.905507519 +0100 3.6 ++++ b/drivers/net/bnx2.c 2006-07-25 14:36:00.288561400 +0100 3.7 +@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp) 3.8 + skb = tx_buf->skb; 3.9 + #ifdef BCM_TSO 3.10 + /* partial BD completions possible with TSO packets */ 3.11 +- if (skb_shinfo(skb)->gso_size) { 3.12 ++ if (skb_is_gso(skb)) { 3.13 + u16 last_idx, last_ring_idx; 3.14 + 3.15 + last_idx = sw_cons + 3.16 +diff -urp a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c 3.17 +--- a/drivers/net/chelsio/sge.c 2006-07-25 14:41:00.908507183 +0100 3.18 ++++ b/drivers/net/chelsio/sge.c 2006-07-25 14:36:00.291561087 +0100 3.19 +@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s 3.20 + struct cpl_tx_pkt *cpl; 3.21 + 3.22 + #ifdef NETIF_F_TSO 3.23 +- if (skb_shinfo(skb)->gso_size) { 3.24 ++ if (skb_is_gso(skb)) { 3.25 + int eth_type; 3.26 + struct cpl_tx_pkt_lso *hdr; 3.27 + 3.28 +diff -urp a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c 3.29 +--- a/drivers/net/e1000/e1000_main.c 2006-07-25 14:41:00.910506958 +0100 3.30 ++++ b/drivers/net/e1000/e1000_main.c 2006-07-25 14:36:00.293560878 +0100 3.31 +@@ -2526,7 +2526,7 @@ e1000_tso(struct e1000_adapter *adapter, 3.32 + uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 3.33 + int err; 3.34 + 3.35 +- if (skb_shinfo(skb)->gso_size) { 3.36 ++ if (skb_is_gso(skb)) { 3.37 + if (skb_header_cloned(skb)) { 3.38 + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 3.39 + if (err) 3.40 +@@ -2651,7 +2651,7 @@ e1000_tx_map(struct e1000_adapter *adapt 3.41 + * tso gets written back prematurely before the data is fully 3.42 + * DMAd to the controller */ 3.43 + if (!skb->data_len && tx_ring->last_tx_tso && 3.44 +- !skb_shinfo(skb)->gso_size) { 3.45 ++ !skb_is_gso(skb)) { 3.46 + tx_ring->last_tx_tso = 0; 3.47 + size -= 4; 3.48 + } 3.49 +@@ -2934,8 +2934,7 @@ e1000_xmit_frame(struct sk_buff *skb, st 3.50 + 3.51 + #ifdef NETIF_F_TSO 3.52 + /* Controller Erratum workaround */ 3.53 +- if (!skb->data_len && tx_ring->last_tx_tso && 3.54 +- !skb_shinfo(skb)->gso_size) 3.55 ++ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3.56 + count++; 3.57 + #endif 3.58 + 3.59 +diff -urp a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c 3.60 +--- a/drivers/net/forcedeth.c 2006-07-25 14:41:00.912506734 +0100 3.61 ++++ b/drivers/net/forcedeth.c 2006-07-25 14:36:00.295560669 +0100 3.62 +@@ -1105,7 +1105,7 @@ static int nv_start_xmit(struct sk_buff 3.63 + np->tx_skbuff[nr] = skb; 3.64 + 3.65 + #ifdef NETIF_F_TSO 3.66 +- if (skb_shinfo(skb)->gso_size) 3.67 ++ if (skb_is_gso(skb)) 3.68 + tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 3.69 + else 3.70 + #endif 3.71 +diff -urp a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c 3.72 +--- a/drivers/net/ixgb/ixgb_main.c 2006-07-25 14:41:00.915506397 +0100 3.73 ++++ b/drivers/net/ixgb/ixgb_main.c 2006-07-25 14:36:00.298560355 +0100 3.74 +@@ -1163,7 +1163,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s 3.75 + uint16_t ipcse, tucse, mss; 3.76 + int err; 3.77 + 3.78 +- if(likely(skb_shinfo(skb)->gso_size)) { 3.79 ++ if (likely(skb_is_gso(skb))) { 3.80 + if (skb_header_cloned(skb)) { 3.81 + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 3.82 + if (err) 3.83 +diff -urp a/drivers/net/loopback.c b/drivers/net/loopback.c 3.84 +--- a/drivers/net/loopback.c 2006-07-25 14:41:00.915506397 +0100 3.85 ++++ b/drivers/net/loopback.c 2006-07-25 14:36:00.298560355 +0100 3.86 +@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff 3.87 + #endif 3.88 + 3.89 + #ifdef LOOPBACK_TSO 3.90 +- if (skb_shinfo(skb)->gso_size) { 3.91 ++ if (skb_is_gso(skb)) { 3.92 + BUG_ON(skb->protocol != htons(ETH_P_IP)); 3.93 + BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); 3.94 + 3.95 +diff -urp a/drivers/net/sky2.c b/drivers/net/sky2.c 3.96 +--- a/drivers/net/sky2.c 2006-07-25 14:41:00.924505388 +0100 3.97 ++++ b/drivers/net/sky2.c 2006-07-25 14:36:00.306559519 +0100 3.98 +@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s 3.99 + count = sizeof(dma_addr_t) / sizeof(u32); 3.100 + count += skb_shinfo(skb)->nr_frags * count; 3.101 + 3.102 +- if (skb_shinfo(skb)->gso_size) 3.103 ++ if (skb_is_gso(skb)) 3.104 + ++count; 3.105 + 3.106 + if (skb->ip_summed == CHECKSUM_HW) 3.107 +diff -urp a/drivers/net/typhoon.c b/drivers/net/typhoon.c 3.108 +--- a/drivers/net/typhoon.c 2006-07-25 14:41:00.931504603 +0100 3.109 ++++ b/drivers/net/typhoon.c 2006-07-25 14:36:00.314558683 +0100 3.110 +@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, st 3.111 + * If problems develop with TSO, check this first. 3.112 + */ 3.113 + numDesc = skb_shinfo(skb)->nr_frags + 1; 3.114 +- if(skb_tso_size(skb)) 3.115 ++ if (skb_is_gso(skb)) 3.116 + numDesc++; 3.117 + 3.118 + /* When checking for free space in the ring, we need to also 3.119 +@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, st 3.120 + TYPHOON_TX_PF_VLAN_TAG_SHIFT); 3.121 + } 3.122 + 3.123 +- if(skb_tso_size(skb)) { 3.124 ++ if (skb_is_gso(skb)) { 3.125 + first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; 3.126 + first_txd->numDesc++; 3.127 + 3.128 +diff -urp a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c 3.129 +--- a/drivers/s390/net/qeth_main.c 2006-07-25 14:41:00.939503705 +0100 3.130 ++++ b/drivers/s390/net/qeth_main.c 2006-07-25 14:36:00.321557952 +0100 3.131 +@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card, 3.132 + queue = card->qdio.out_qs 3.133 + [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 3.134 + 3.135 +- if (skb_shinfo(skb)->gso_size) 3.136 ++ if (skb_is_gso(skb)) 3.137 + large_send = card->options.large_send; 3.138 + 3.139 + /*are we able to do TSO ? If so ,prepare and send it from here */ 3.140 +@@ -4501,8 +4501,7 @@ qeth_send_packet(struct qeth_card *card, 3.141 + card->stats.tx_packets++; 3.142 + card->stats.tx_bytes += skb->len; 3.143 + #ifdef CONFIG_QETH_PERF_STATS 3.144 +- if (skb_shinfo(skb)->gso_size && 3.145 +- !(large_send == QETH_LARGE_SEND_NO)) { 3.146 ++ if (skb_is_gso(skb) && !(large_send == QETH_LARGE_SEND_NO)) { 3.147 + card->perf_stats.large_send_bytes += skb->len; 3.148 + card->perf_stats.large_send_cnt++; 3.149 + } 3.150 +diff -urp a/include/linux/netdevice.h b/include/linux/netdevice.h 3.151 +--- a/include/linux/netdevice.h 2006-07-25 14:41:00.940503593 +0100 3.152 ++++ b/include/linux/netdevice.h 2006-07-25 14:36:00.323557743 +0100 3.153 +@@ -541,6 +541,7 @@ struct packet_type { 3.154 + struct net_device *); 3.155 + struct sk_buff *(*gso_segment)(struct sk_buff *skb, 3.156 + int features); 3.157 ++ int (*gso_send_check)(struct sk_buff *skb); 3.158 + void *af_packet_priv; 3.159 + struct list_head list; 3.160 + }; 3.161 +@@ -1001,14 +1002,15 @@ extern void linkwatch_run_queue(void); 3.162 + 3.163 + static inline int skb_gso_ok(struct sk_buff *skb, int features) 3.164 + { 3.165 +- int feature = skb_shinfo(skb)->gso_size ? 3.166 +- skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0; 3.167 ++ int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT; 3.168 + return (features & feature) == feature; 3.169 + } 3.170 + 3.171 + static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 3.172 + { 3.173 +- return !skb_gso_ok(skb, dev->features); 3.174 ++ return skb_is_gso(skb) && 3.175 ++ (!skb_gso_ok(skb, dev->features) || 3.176 ++ unlikely(skb->ip_summed != CHECKSUM_HW)); 3.177 + } 3.178 + 3.179 + #endif /* __KERNEL__ */ 3.180 +diff -urp a/include/linux/skbuff.h b/include/linux/skbuff.h 3.181 +--- a/include/linux/skbuff.h 2006-07-25 14:41:00.941503481 +0100 3.182 ++++ b/include/linux/skbuff.h 2006-07-25 14:36:00.323557743 +0100 3.183 +@@ -1403,5 +1403,10 @@ static inline void nf_bridge_get(struct 3.184 + static inline void nf_reset(struct sk_buff *skb) {} 3.185 + #endif /* CONFIG_NETFILTER */ 3.186 + 3.187 ++static inline int skb_is_gso(const struct sk_buff *skb) 3.188 ++{ 3.189 ++ return skb_shinfo(skb)->gso_size; 3.190 ++} 3.191 ++ 3.192 + #endif /* __KERNEL__ */ 3.193 + #endif /* _LINUX_SKBUFF_H */ 3.194 +diff -urp a/include/net/protocol.h b/include/net/protocol.h 3.195 +--- a/include/net/protocol.h 2006-07-25 14:41:00.942503369 +0100 3.196 ++++ b/include/net/protocol.h 2006-07-25 14:36:00.324557639 +0100 3.197 +@@ -37,6 +37,7 @@ 3.198 + struct net_protocol { 3.199 + int (*handler)(struct sk_buff *skb); 3.200 + void (*err_handler)(struct sk_buff *skb, u32 info); 3.201 ++ int (*gso_send_check)(struct sk_buff *skb); 3.202 + struct sk_buff *(*gso_segment)(struct sk_buff *skb, 3.203 + int features); 3.204 + int no_policy; 3.205 +diff -urp a/include/net/tcp.h b/include/net/tcp.h 3.206 +--- a/include/net/tcp.h 2006-07-25 14:41:00.943503256 +0100 3.207 ++++ b/include/net/tcp.h 2006-07-25 14:36:00.325557534 +0100 3.208 +@@ -1063,6 +1063,7 @@ extern struct request_sock_ops tcp_reque 3.209 + 3.210 + extern int tcp_v4_destroy_sock(struct sock *sk); 3.211 + 3.212 ++extern int tcp_v4_gso_send_check(struct sk_buff *skb); 3.213 + extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); 3.214 + 3.215 + #ifdef CONFIG_PROC_FS 3.216 +diff -urp a/net/bridge/br_forward.c b/net/bridge/br_forward.c 3.217 +--- a/net/bridge/br_forward.c 2006-07-25 14:41:00.944503144 +0100 3.218 ++++ b/net/bridge/br_forward.c 2006-07-25 14:36:00.326557430 +0100 3.219 +@@ -32,7 +32,7 @@ static inline int should_deliver(const s 3.220 + int br_dev_queue_push_xmit(struct sk_buff *skb) 3.221 + { 3.222 + /* drop mtu oversized packets except tso */ 3.223 +- if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size) 3.224 ++ if (skb->len > skb->dev->mtu && !skb_is_gso(skb)) 3.225 + kfree_skb(skb); 3.226 + else { 3.227 + #ifdef CONFIG_BRIDGE_NETFILTER 3.228 +diff -urp a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c 3.229 +--- a/net/bridge/br_netfilter.c 2006-07-25 14:41:00.945503032 +0100 3.230 ++++ b/net/bridge/br_netfilter.c 2006-07-25 14:36:00.327557325 +0100 3.231 +@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s 3.232 + { 3.233 + if (skb->protocol == htons(ETH_P_IP) && 3.234 + skb->len > skb->dev->mtu && 3.235 +- !skb_shinfo(skb)->gso_size) 3.236 ++ !skb_is_gso(skb)) 3.237 + return ip_fragment(skb, br_dev_queue_push_xmit); 3.238 + else 3.239 + return br_dev_queue_push_xmit(skb); 3.240 +diff -urp a/net/core/dev.c b/net/core/dev.c 3.241 +--- a/net/core/dev.c 2006-07-25 14:41:00.947502808 +0100 3.242 ++++ b/net/core/dev.c 2006-07-25 14:36:00.329557116 +0100 3.243 +@@ -1083,9 +1083,17 @@ int skb_checksum_help(struct sk_buff *sk 3.244 + unsigned int csum; 3.245 + int ret = 0, offset = skb->h.raw - skb->data; 3.246 + 3.247 +- if (inward) { 3.248 +- skb->ip_summed = CHECKSUM_NONE; 3.249 +- goto out; 3.250 ++ if (inward) 3.251 ++ goto out_set_summed; 3.252 ++ 3.253 ++ if (unlikely(skb_shinfo(skb)->gso_size)) { 3.254 ++ static int warned; 3.255 ++ 3.256 ++ WARN_ON(!warned); 3.257 ++ warned = 1; 3.258 ++ 3.259 ++ /* Let GSO fix up the checksum. */ 3.260 ++ goto out_set_summed; 3.261 + } 3.262 + 3.263 + if (skb_cloned(skb)) { 3.264 +@@ -1102,6 +1110,8 @@ int skb_checksum_help(struct sk_buff *sk 3.265 + BUG_ON(skb->csum + 2 > offset); 3.266 + 3.267 + *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); 3.268 ++ 3.269 ++out_set_summed: 3.270 + skb->ip_summed = CHECKSUM_NONE; 3.271 + out: 3.272 + return ret; 3.273 +@@ -1122,17 +1132,35 @@ struct sk_buff *skb_gso_segment(struct s 3.274 + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 3.275 + struct packet_type *ptype; 3.276 + int type = skb->protocol; 3.277 ++ int err; 3.278 + 3.279 + BUG_ON(skb_shinfo(skb)->frag_list); 3.280 +- BUG_ON(skb->ip_summed != CHECKSUM_HW); 3.281 + 3.282 + skb->mac.raw = skb->data; 3.283 + skb->mac_len = skb->nh.raw - skb->data; 3.284 + __skb_pull(skb, skb->mac_len); 3.285 + 3.286 ++ if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 3.287 ++ static int warned; 3.288 ++ 3.289 ++ WARN_ON(!warned); 3.290 ++ warned = 1; 3.291 ++ 3.292 ++ if (skb_header_cloned(skb) && 3.293 ++ (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 3.294 ++ return ERR_PTR(err); 3.295 ++ } 3.296 ++ 3.297 + rcu_read_lock(); 3.298 + list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { 3.299 + if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 3.300 ++ if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 3.301 ++ err = ptype->gso_send_check(skb); 3.302 ++ segs = ERR_PTR(err); 3.303 ++ if (err || skb_gso_ok(skb, features)) 3.304 ++ break; 3.305 ++ __skb_push(skb, skb->data - skb->nh.raw); 3.306 ++ } 3.307 + segs = ptype->gso_segment(skb, features); 3.308 + break; 3.309 + } 3.310 +diff -urp a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c 3.311 +--- a/net/ipv4/af_inet.c 2006-07-25 14:41:00.952502247 +0100 3.312 ++++ b/net/ipv4/af_inet.c 2006-07-25 14:36:00.334556594 +0100 3.313 +@@ -1085,6 +1085,40 @@ int inet_sk_rebuild_header(struct sock * 3.314 + 3.315 + EXPORT_SYMBOL(inet_sk_rebuild_header); 3.316 + 3.317 ++static int inet_gso_send_check(struct sk_buff *skb) 3.318 ++{ 3.319 ++ struct iphdr *iph; 3.320 ++ struct net_protocol *ops; 3.321 ++ int proto; 3.322 ++ int ihl; 3.323 ++ int err = -EINVAL; 3.324 ++ 3.325 ++ if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) 3.326 ++ goto out; 3.327 ++ 3.328 ++ iph = skb->nh.iph; 3.329 ++ ihl = iph->ihl * 4; 3.330 ++ if (ihl < sizeof(*iph)) 3.331 ++ goto out; 3.332 ++ 3.333 ++ if (unlikely(!pskb_may_pull(skb, ihl))) 3.334 ++ goto out; 3.335 ++ 3.336 ++ skb->h.raw = __skb_pull(skb, ihl); 3.337 ++ iph = skb->nh.iph; 3.338 ++ proto = iph->protocol & (MAX_INET_PROTOS - 1); 3.339 ++ err = -EPROTONOSUPPORT; 3.340 ++ 3.341 ++ rcu_read_lock(); 3.342 ++ ops = rcu_dereference(inet_protos[proto]); 3.343 ++ if (likely(ops && ops->gso_send_check)) 3.344 ++ err = ops->gso_send_check(skb); 3.345 ++ rcu_read_unlock(); 3.346 ++ 3.347 ++out: 3.348 ++ return err; 3.349 ++} 3.350 ++ 3.351 + static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) 3.352 + { 3.353 + struct sk_buff *segs = ERR_PTR(-EINVAL); 3.354 +@@ -1142,6 +1176,7 @@ static struct net_protocol igmp_protocol 3.355 + static struct net_protocol tcp_protocol = { 3.356 + .handler = tcp_v4_rcv, 3.357 + .err_handler = tcp_v4_err, 3.358 ++ .gso_send_check = tcp_v4_gso_send_check, 3.359 + .gso_segment = tcp_tso_segment, 3.360 + .no_policy = 1, 3.361 + }; 3.362 +@@ -1188,6 +1223,7 @@ static int ipv4_proc_init(void); 3.363 + static struct packet_type ip_packet_type = { 3.364 + .type = __constant_htons(ETH_P_IP), 3.365 + .func = ip_rcv, 3.366 ++ .gso_send_check = inet_gso_send_check, 3.367 + .gso_segment = inet_gso_segment, 3.368 + }; 3.369 + 3.370 +diff -urp a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c 3.371 +--- a/net/ipv4/ip_output.c 2006-07-25 14:41:00.953502135 +0100 3.372 ++++ b/net/ipv4/ip_output.c 2006-07-25 14:36:00.335556489 +0100 3.373 +@@ -210,7 +210,7 @@ static inline int ip_finish_output(struc 3.374 + return dst_output(skb); 3.375 + } 3.376 + #endif 3.377 +- if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) 3.378 ++ if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) 3.379 + return ip_fragment(skb, ip_finish_output2); 3.380 + else 3.381 + return ip_finish_output2(skb); 3.382 +@@ -1095,7 +1095,7 @@ ssize_t ip_append_page(struct sock *sk, 3.383 + while (size > 0) { 3.384 + int i; 3.385 + 3.386 +- if (skb_shinfo(skb)->gso_size) 3.387 ++ if (skb_is_gso(skb)) 3.388 + len = size; 3.389 + else { 3.390 + 3.391 +diff -urp a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c 3.392 +--- a/net/ipv4/tcp_ipv4.c 2006-07-25 14:39:15.985080788 +0100 3.393 ++++ b/net/ipv4/tcp_ipv4.c 2006-07-25 14:36:00.339556071 +0100 3.394 +@@ -495,6 +495,24 @@ void tcp_v4_send_check(struct sock *sk, 3.395 + } 3.396 + } 3.397 + 3.398 ++int tcp_v4_gso_send_check(struct sk_buff *skb) 3.399 ++{ 3.400 ++ struct iphdr *iph; 3.401 ++ struct tcphdr *th; 3.402 ++ 3.403 ++ if (!pskb_may_pull(skb, sizeof(*th))) 3.404 ++ return -EINVAL; 3.405 ++ 3.406 ++ iph = skb->nh.iph; 3.407 ++ th = skb->h.th; 3.408 ++ 3.409 ++ th->check = 0; 3.410 ++ th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); 3.411 ++ skb->csum = offsetof(struct tcphdr, check); 3.412 ++ skb->ip_summed = CHECKSUM_HW; 3.413 ++ return 0; 3.414 ++} 3.415 ++ 3.416 + /* 3.417 + * This routine will send an RST to the other tcp. 3.418 + * 3.419 +diff -urp a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c 3.420 +--- a/net/ipv4/xfrm4_output.c 2006-07-25 14:41:00.958501574 +0100 3.421 ++++ b/net/ipv4/xfrm4_output.c 2006-07-25 14:36:00.341555862 +0100 3.422 +@@ -189,7 +189,7 @@ static int xfrm4_output_finish(struct sk 3.423 + } 3.424 + #endif 3.425 + 3.426 +- if (!skb_shinfo(skb)->gso_size) 3.427 ++ if (!skb_is_gso(skb)) 3.428 + return xfrm4_output_finish2(skb); 3.429 + 3.430 + skb->protocol = htons(ETH_P_IP); 3.431 +diff -urp a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c 3.432 +--- a/net/ipv6/ip6_output.c 2006-07-25 14:41:00.959501461 +0100 3.433 ++++ b/net/ipv6/ip6_output.c 2006-07-25 14:36:00.341555862 +0100 3.434 +@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s 3.435 + 3.436 + int ip6_output(struct sk_buff *skb) 3.437 + { 3.438 +- if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) || 3.439 ++ if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) || 3.440 + dst_allfrag(skb->dst)) 3.441 + return ip6_fragment(skb, ip6_output2); 3.442 + else 3.443 +diff -urp a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c 3.444 +--- a/net/ipv6/xfrm6_output.c 2006-07-25 14:41:00.960501349 +0100 3.445 ++++ b/net/ipv6/xfrm6_output.c 2006-07-25 14:36:00.342555758 +0100 3.446 +@@ -179,7 +179,7 @@ static int xfrm6_output_finish(struct sk 3.447 + { 3.448 + struct sk_buff *segs; 3.449 + 3.450 +- if (!skb_shinfo(skb)->gso_size) 3.451 ++ if (!skb_is_gso(skb)) 3.452 + return xfrm6_output_finish2(skb); 3.453 + 3.454 + skb->protocol = htons(ETH_P_IP);
4.1 --- a/patches/linux-2.6.16.13/net-gso.patch Tue Jul 25 14:26:54 2006 +0100 4.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 4.3 @@ -1,3059 +0,0 @@ 4.4 -diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt 4.5 -index 3c0a5ba..847cedb 100644 4.6 ---- a/Documentation/networking/netdevices.txt 4.7 -+++ b/Documentation/networking/netdevices.txt 4.8 -@@ -42,9 +42,9 @@ dev->get_stats: 4.9 - Context: nominally process, but don't sleep inside an rwlock 4.10 - 4.11 - dev->hard_start_xmit: 4.12 -- Synchronization: dev->xmit_lock spinlock. 4.13 -+ Synchronization: netif_tx_lock spinlock. 4.14 - When the driver sets NETIF_F_LLTX in dev->features this will be 4.15 -- called without holding xmit_lock. In this case the driver 4.16 -+ called without holding netif_tx_lock. In this case the driver 4.17 - has to lock by itself when needed. It is recommended to use a try lock 4.18 - for this and return -1 when the spin lock fails. 4.19 - The locking there should also properly protect against 4.20 -@@ -62,12 +62,12 @@ dev->hard_start_xmit: 4.21 - Only valid when NETIF_F_LLTX is set. 4.22 - 4.23 - dev->tx_timeout: 4.24 -- Synchronization: dev->xmit_lock spinlock. 4.25 -+ Synchronization: netif_tx_lock spinlock. 4.26 - Context: BHs disabled 4.27 - Notes: netif_queue_stopped() is guaranteed true 4.28 - 4.29 - dev->set_multicast_list: 4.30 -- Synchronization: dev->xmit_lock spinlock. 4.31 -+ Synchronization: netif_tx_lock spinlock. 4.32 - Context: BHs disabled 4.33 - 4.34 - dev->poll: 4.35 -diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c 4.36 -index 4be9769..2e7cac7 100644 4.37 ---- a/drivers/block/aoe/aoenet.c 4.38 -+++ b/drivers/block/aoe/aoenet.c 4.39 -@@ -95,9 +95,8 @@ mac_addr(char addr[6]) 4.40 - static struct sk_buff * 4.41 - skb_check(struct sk_buff *skb) 4.42 - { 4.43 -- if (skb_is_nonlinear(skb)) 4.44 - if ((skb = skb_share_check(skb, GFP_ATOMIC))) 4.45 -- if (skb_linearize(skb, GFP_ATOMIC) < 0) { 4.46 -+ if (skb_linearize(skb)) { 4.47 - dev_kfree_skb(skb); 4.48 - return NULL; 4.49 - } 4.50 -diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 4.51 -index a2408d7..c90e620 100644 4.52 ---- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 4.53 -+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 4.54 -@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_ 4.55 - 4.56 - ipoib_mcast_stop_thread(dev, 0); 4.57 - 4.58 -- spin_lock_irqsave(&dev->xmit_lock, flags); 4.59 -+ local_irq_save(flags); 4.60 -+ netif_tx_lock(dev); 4.61 - spin_lock(&priv->lock); 4.62 - 4.63 - /* 4.64 -@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_ 4.65 - } 4.66 - 4.67 - spin_unlock(&priv->lock); 4.68 -- spin_unlock_irqrestore(&dev->xmit_lock, flags); 4.69 -+ netif_tx_unlock(dev); 4.70 -+ local_irq_restore(flags); 4.71 - 4.72 - /* We have to cancel outside of the spinlock */ 4.73 - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 4.74 -diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c 4.75 -index 6711eb6..8d2351f 100644 4.76 ---- a/drivers/media/dvb/dvb-core/dvb_net.c 4.77 -+++ b/drivers/media/dvb/dvb-core/dvb_net.c 4.78 -@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void 4.79 - 4.80 - dvb_net_feed_stop(dev); 4.81 - priv->rx_mode = RX_MODE_UNI; 4.82 -- spin_lock_bh(&dev->xmit_lock); 4.83 -+ netif_tx_lock_bh(dev); 4.84 - 4.85 - if (dev->flags & IFF_PROMISC) { 4.86 - dprintk("%s: promiscuous mode\n", dev->name); 4.87 -@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void 4.88 - } 4.89 - } 4.90 - 4.91 -- spin_unlock_bh(&dev->xmit_lock); 4.92 -+ netif_tx_unlock_bh(dev); 4.93 - dvb_net_feed_start(dev); 4.94 - } 4.95 - 4.96 -diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c 4.97 -index dd41049..6615583 100644 4.98 ---- a/drivers/net/8139cp.c 4.99 -+++ b/drivers/net/8139cp.c 4.100 -@@ -794,7 +794,7 @@ #endif 4.101 - entry = cp->tx_head; 4.102 - eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 4.103 - if (dev->features & NETIF_F_TSO) 4.104 -- mss = skb_shinfo(skb)->tso_size; 4.105 -+ mss = skb_shinfo(skb)->gso_size; 4.106 - 4.107 - if (skb_shinfo(skb)->nr_frags == 0) { 4.108 - struct cp_desc *txd = &cp->tx_ring[entry]; 4.109 -diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c 4.110 -index a24200d..29d9218 100644 4.111 ---- a/drivers/net/bnx2.c 4.112 -+++ b/drivers/net/bnx2.c 4.113 -@@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp) 4.114 - skb = tx_buf->skb; 4.115 - #ifdef BCM_TSO 4.116 - /* partial BD completions possible with TSO packets */ 4.117 -- if (skb_shinfo(skb)->tso_size) { 4.118 -+ if (skb_is_gso(skb)) { 4.119 - u16 last_idx, last_ring_idx; 4.120 - 4.121 - last_idx = sw_cons + 4.122 -@@ -1948,7 +1948,7 @@ bnx2_poll(struct net_device *dev, int *b 4.123 - return 1; 4.124 - } 4.125 - 4.126 --/* Called with rtnl_lock from vlan functions and also dev->xmit_lock 4.127 -+/* Called with rtnl_lock from vlan functions and also netif_tx_lock 4.128 - * from set_multicast. 4.129 - */ 4.130 - static void 4.131 -@@ -4403,7 +4403,7 @@ bnx2_vlan_rx_kill_vid(struct net_device 4.132 - } 4.133 - #endif 4.134 - 4.135 --/* Called with dev->xmit_lock. 4.136 -+/* Called with netif_tx_lock. 4.137 - * hard_start_xmit is pseudo-lockless - a lock is only required when 4.138 - * the tx queue is full. This way, we get the benefit of lockless 4.139 - * operations most of the time without the complexities to handle 4.140 -@@ -4441,7 +4441,7 @@ bnx2_start_xmit(struct sk_buff *skb, str 4.141 - (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); 4.142 - } 4.143 - #ifdef BCM_TSO 4.144 -- if ((mss = skb_shinfo(skb)->tso_size) && 4.145 -+ if ((mss = skb_shinfo(skb)->gso_size) && 4.146 - (skb->len > (bp->dev->mtu + ETH_HLEN))) { 4.147 - u32 tcp_opt_len, ip_tcp_len; 4.148 - 4.149 -diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c 4.150 -index bcf9f17..e970921 100644 4.151 ---- a/drivers/net/bonding/bond_main.c 4.152 -+++ b/drivers/net/bonding/bond_main.c 4.153 -@@ -1145,8 +1145,7 @@ int bond_sethwaddr(struct net_device *bo 4.154 - } 4.155 - 4.156 - #define BOND_INTERSECT_FEATURES \ 4.157 -- (NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\ 4.158 -- NETIF_F_TSO|NETIF_F_UFO) 4.159 -+ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO) 4.160 - 4.161 - /* 4.162 - * Compute the common dev->feature set available to all slaves. Some 4.163 -@@ -1164,9 +1163,7 @@ static int bond_compute_features(struct 4.164 - features &= (slave->dev->features & BOND_INTERSECT_FEATURES); 4.165 - 4.166 - if ((features & NETIF_F_SG) && 4.167 -- !(features & (NETIF_F_IP_CSUM | 4.168 -- NETIF_F_NO_CSUM | 4.169 -- NETIF_F_HW_CSUM))) 4.170 -+ !(features & NETIF_F_ALL_CSUM)) 4.171 - features &= ~NETIF_F_SG; 4.172 - 4.173 - /* 4.174 -@@ -4147,7 +4144,7 @@ static int bond_init(struct net_device * 4.175 - */ 4.176 - bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 4.177 - 4.178 -- /* don't acquire bond device's xmit_lock when 4.179 -+ /* don't acquire bond device's netif_tx_lock when 4.180 - * transmitting */ 4.181 - bond_dev->features |= NETIF_F_LLTX; 4.182 - 4.183 -diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c 4.184 -index 30ff8ea..7d72e16 100644 4.185 ---- a/drivers/net/chelsio/sge.c 4.186 -+++ b/drivers/net/chelsio/sge.c 4.187 -@@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s 4.188 - struct cpl_tx_pkt *cpl; 4.189 - 4.190 - #ifdef NETIF_F_TSO 4.191 -- if (skb_shinfo(skb)->tso_size) { 4.192 -+ if (skb_is_gso(skb)) { 4.193 - int eth_type; 4.194 - struct cpl_tx_pkt_lso *hdr; 4.195 - 4.196 -@@ -1434,7 +1434,7 @@ #ifdef NETIF_F_TSO 4.197 - hdr->ip_hdr_words = skb->nh.iph->ihl; 4.198 - hdr->tcp_hdr_words = skb->h.th->doff; 4.199 - hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, 4.200 -- skb_shinfo(skb)->tso_size)); 4.201 -+ skb_shinfo(skb)->gso_size)); 4.202 - hdr->len = htonl(skb->len - sizeof(*hdr)); 4.203 - cpl = (struct cpl_tx_pkt *)hdr; 4.204 - sge->stats.tx_lso_pkts++; 4.205 -diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c 4.206 -index fa29402..96ddc24 100644 4.207 ---- a/drivers/net/e1000/e1000_main.c 4.208 -+++ b/drivers/net/e1000/e1000_main.c 4.209 -@@ -2526,7 +2526,7 @@ #ifdef NETIF_F_TSO 4.210 - uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 4.211 - int err; 4.212 - 4.213 -- if (skb_shinfo(skb)->tso_size) { 4.214 -+ if (skb_is_gso(skb)) { 4.215 - if (skb_header_cloned(skb)) { 4.216 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4.217 - if (err) 4.218 -@@ -2534,7 +2534,7 @@ #ifdef NETIF_F_TSO 4.219 - } 4.220 - 4.221 - hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 4.222 -- mss = skb_shinfo(skb)->tso_size; 4.223 -+ mss = skb_shinfo(skb)->gso_size; 4.224 - if (skb->protocol == ntohs(ETH_P_IP)) { 4.225 - skb->nh.iph->tot_len = 0; 4.226 - skb->nh.iph->check = 0; 4.227 -@@ -2651,7 +2651,7 @@ #ifdef NETIF_F_TSO 4.228 - * tso gets written back prematurely before the data is fully 4.229 - * DMAd to the controller */ 4.230 - if (!skb->data_len && tx_ring->last_tx_tso && 4.231 -- !skb_shinfo(skb)->tso_size) { 4.232 -+ !skb_is_gso(skb)) { 4.233 - tx_ring->last_tx_tso = 0; 4.234 - size -= 4; 4.235 - } 4.236 -@@ -2893,7 +2893,7 @@ #endif 4.237 - } 4.238 - 4.239 - #ifdef NETIF_F_TSO 4.240 -- mss = skb_shinfo(skb)->tso_size; 4.241 -+ mss = skb_shinfo(skb)->gso_size; 4.242 - /* The controller does a simple calculation to 4.243 - * make sure there is enough room in the FIFO before 4.244 - * initiating the DMA for each buffer. The calc is: 4.245 -@@ -2934,8 +2934,7 @@ #endif 4.246 - 4.247 - #ifdef NETIF_F_TSO 4.248 - /* Controller Erratum workaround */ 4.249 -- if (!skb->data_len && tx_ring->last_tx_tso && 4.250 -- !skb_shinfo(skb)->tso_size) 4.251 -+ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 4.252 - count++; 4.253 - #endif 4.254 - 4.255 -diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c 4.256 -index 3682ec6..c6ca459 100644 4.257 ---- a/drivers/net/forcedeth.c 4.258 -+++ b/drivers/net/forcedeth.c 4.259 -@@ -482,9 +482,9 @@ #define LPA_1000HALF 0x0400 4.260 - * critical parts: 4.261 - * - rx is (pseudo-) lockless: it relies on the single-threading provided 4.262 - * by the arch code for interrupts. 4.263 -- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission 4.264 -+ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 4.265 - * needs dev->priv->lock :-( 4.266 -- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock. 4.267 -+ * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 4.268 - */ 4.269 - 4.270 - /* in dev: base, irq */ 4.271 -@@ -1016,7 +1016,7 @@ static void drain_ring(struct net_device 4.272 - 4.273 - /* 4.274 - * nv_start_xmit: dev->hard_start_xmit function 4.275 -- * Called with dev->xmit_lock held. 4.276 -+ * Called with netif_tx_lock held. 4.277 - */ 4.278 - static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 4.279 - { 4.280 -@@ -1105,8 +1105,8 @@ static int nv_start_xmit(struct sk_buff 4.281 - np->tx_skbuff[nr] = skb; 4.282 - 4.283 - #ifdef NETIF_F_TSO 4.284 -- if (skb_shinfo(skb)->tso_size) 4.285 -- tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT); 4.286 -+ if (skb_is_gso(skb)) 4.287 -+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 4.288 - else 4.289 - #endif 4.290 - tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 4.291 -@@ -1203,7 +1203,7 @@ static void nv_tx_done(struct net_device 4.292 - 4.293 - /* 4.294 - * nv_tx_timeout: dev->tx_timeout function 4.295 -- * Called with dev->xmit_lock held. 4.296 -+ * Called with netif_tx_lock held. 4.297 - */ 4.298 - static void nv_tx_timeout(struct net_device *dev) 4.299 - { 4.300 -@@ -1524,7 +1524,7 @@ static int nv_change_mtu(struct net_devi 4.301 - * Changing the MTU is a rare event, it shouldn't matter. 4.302 - */ 4.303 - disable_irq(dev->irq); 4.304 -- spin_lock_bh(&dev->xmit_lock); 4.305 -+ netif_tx_lock_bh(dev); 4.306 - spin_lock(&np->lock); 4.307 - /* stop engines */ 4.308 - nv_stop_rx(dev); 4.309 -@@ -1559,7 +1559,7 @@ static int nv_change_mtu(struct net_devi 4.310 - nv_start_rx(dev); 4.311 - nv_start_tx(dev); 4.312 - spin_unlock(&np->lock); 4.313 -- spin_unlock_bh(&dev->xmit_lock); 4.314 -+ netif_tx_unlock_bh(dev); 4.315 - enable_irq(dev->irq); 4.316 - } 4.317 - return 0; 4.318 -@@ -1594,7 +1594,7 @@ static int nv_set_mac_address(struct net 4.319 - memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 4.320 - 4.321 - if (netif_running(dev)) { 4.322 -- spin_lock_bh(&dev->xmit_lock); 4.323 -+ netif_tx_lock_bh(dev); 4.324 - spin_lock_irq(&np->lock); 4.325 - 4.326 - /* stop rx engine */ 4.327 -@@ -1606,7 +1606,7 @@ static int nv_set_mac_address(struct net 4.328 - /* restart rx engine */ 4.329 - nv_start_rx(dev); 4.330 - spin_unlock_irq(&np->lock); 4.331 -- spin_unlock_bh(&dev->xmit_lock); 4.332 -+ netif_tx_unlock_bh(dev); 4.333 - } else { 4.334 - nv_copy_mac_to_hw(dev); 4.335 - } 4.336 -@@ -1615,7 +1615,7 @@ static int nv_set_mac_address(struct net 4.337 - 4.338 - /* 4.339 - * nv_set_multicast: dev->set_multicast function 4.340 -- * Called with dev->xmit_lock held. 4.341 -+ * Called with netif_tx_lock held. 4.342 - */ 4.343 - static void nv_set_multicast(struct net_device *dev) 4.344 - { 4.345 -diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c 4.346 -index 102c1f0..d12605f 100644 4.347 ---- a/drivers/net/hamradio/6pack.c 4.348 -+++ b/drivers/net/hamradio/6pack.c 4.349 -@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net 4.350 - { 4.351 - struct sockaddr_ax25 *sa = addr; 4.352 - 4.353 -- spin_lock_irq(&dev->xmit_lock); 4.354 -+ netif_tx_lock_bh(dev); 4.355 - memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); 4.356 -- spin_unlock_irq(&dev->xmit_lock); 4.357 -+ netif_tx_unlock_bh(dev); 4.358 - 4.359 - return 0; 4.360 - } 4.361 -@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_stru 4.362 - break; 4.363 - } 4.364 - 4.365 -- spin_lock_irq(&dev->xmit_lock); 4.366 -+ netif_tx_lock_bh(dev); 4.367 - memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN); 4.368 -- spin_unlock_irq(&dev->xmit_lock); 4.369 -+ netif_tx_unlock_bh(dev); 4.370 - 4.371 - err = 0; 4.372 - break; 4.373 -diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c 4.374 -index dc5e9d5..5c66f5a 100644 4.375 ---- a/drivers/net/hamradio/mkiss.c 4.376 -+++ b/drivers/net/hamradio/mkiss.c 4.377 -@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net 4.378 - { 4.379 - struct sockaddr_ax25 *sa = addr; 4.380 - 4.381 -- spin_lock_irq(&dev->xmit_lock); 4.382 -+ netif_tx_lock_bh(dev); 4.383 - memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); 4.384 -- spin_unlock_irq(&dev->xmit_lock); 4.385 -+ netif_tx_unlock_bh(dev); 4.386 - 4.387 - return 0; 4.388 - } 4.389 -@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct 4.390 - break; 4.391 - } 4.392 - 4.393 -- spin_lock_irq(&dev->xmit_lock); 4.394 -+ netif_tx_lock_bh(dev); 4.395 - memcpy(dev->dev_addr, addr, AX25_ADDR_LEN); 4.396 -- spin_unlock_irq(&dev->xmit_lock); 4.397 -+ netif_tx_unlock_bh(dev); 4.398 - 4.399 - err = 0; 4.400 - break; 4.401 -diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c 4.402 -index 31fb2d7..2e222ef 100644 4.403 ---- a/drivers/net/ifb.c 4.404 -+++ b/drivers/net/ifb.c 4.405 -@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev 4.406 - dp->st_task_enter++; 4.407 - if ((skb = skb_peek(&dp->tq)) == NULL) { 4.408 - dp->st_txq_refl_try++; 4.409 -- if (spin_trylock(&_dev->xmit_lock)) { 4.410 -+ if (netif_tx_trylock(_dev)) { 4.411 - dp->st_rxq_enter++; 4.412 - while ((skb = skb_dequeue(&dp->rq)) != NULL) { 4.413 - skb_queue_tail(&dp->tq, skb); 4.414 - dp->st_rx2tx_tran++; 4.415 - } 4.416 -- spin_unlock(&_dev->xmit_lock); 4.417 -+ netif_tx_unlock(_dev); 4.418 - } else { 4.419 - /* reschedule */ 4.420 - dp->st_rxq_notenter++; 4.421 -@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev 4.422 - } 4.423 - } 4.424 - 4.425 -- if (spin_trylock(&_dev->xmit_lock)) { 4.426 -+ if (netif_tx_trylock(_dev)) { 4.427 - dp->st_rxq_check++; 4.428 - if ((skb = skb_peek(&dp->rq)) == NULL) { 4.429 - dp->tasklet_pending = 0; 4.430 -@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev 4.431 - netif_wake_queue(_dev); 4.432 - } else { 4.433 - dp->st_rxq_rsch++; 4.434 -- spin_unlock(&_dev->xmit_lock); 4.435 -+ netif_tx_unlock(_dev); 4.436 - goto resched; 4.437 - } 4.438 -- spin_unlock(&_dev->xmit_lock); 4.439 -+ netif_tx_unlock(_dev); 4.440 - } else { 4.441 - resched: 4.442 - dp->tasklet_pending = 1; 4.443 -diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c 4.444 -index a9f49f0..339d4a7 100644 4.445 ---- a/drivers/net/irda/vlsi_ir.c 4.446 -+++ b/drivers/net/irda/vlsi_ir.c 4.447 -@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct s 4.448 - || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec)) 4.449 - break; 4.450 - udelay(100); 4.451 -- /* must not sleep here - we are called under xmit_lock! */ 4.452 -+ /* must not sleep here - called under netif_tx_lock! */ 4.453 - } 4.454 - } 4.455 - 4.456 -diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c 4.457 -index f9f77e4..7d187d0 100644 4.458 ---- a/drivers/net/ixgb/ixgb_main.c 4.459 -+++ b/drivers/net/ixgb/ixgb_main.c 4.460 -@@ -1163,7 +1163,7 @@ #ifdef NETIF_F_TSO 4.461 - uint16_t ipcse, tucse, mss; 4.462 - int err; 4.463 - 4.464 -- if(likely(skb_shinfo(skb)->tso_size)) { 4.465 -+ if (likely(skb_is_gso(skb))) { 4.466 - if (skb_header_cloned(skb)) { 4.467 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4.468 - if (err) 4.469 -@@ -1171,7 +1171,7 @@ #ifdef NETIF_F_TSO 4.470 - } 4.471 - 4.472 - hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 4.473 -- mss = skb_shinfo(skb)->tso_size; 4.474 -+ mss = skb_shinfo(skb)->gso_size; 4.475 - skb->nh.iph->tot_len = 0; 4.476 - skb->nh.iph->check = 0; 4.477 - skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, 4.478 -diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c 4.479 -index 690a1aa..3843e0a 100644 4.480 ---- a/drivers/net/loopback.c 4.481 -+++ b/drivers/net/loopback.c 4.482 -@@ -74,7 +74,7 @@ static void emulate_large_send_offload(s 4.483 - struct iphdr *iph = skb->nh.iph; 4.484 - struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4)); 4.485 - unsigned int doffset = (iph->ihl + th->doff) * 4; 4.486 -- unsigned int mtu = skb_shinfo(skb)->tso_size + doffset; 4.487 -+ unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; 4.488 - unsigned int offset = 0; 4.489 - u32 seq = ntohl(th->seq); 4.490 - u16 id = ntohs(iph->id); 4.491 -@@ -139,7 +139,7 @@ #ifndef LOOPBACK_MUST_CHECKSUM 4.492 - #endif 4.493 - 4.494 - #ifdef LOOPBACK_TSO 4.495 -- if (skb_shinfo(skb)->tso_size) { 4.496 -+ if (skb_is_gso(skb)) { 4.497 - BUG_ON(skb->protocol != htons(ETH_P_IP)); 4.498 - BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); 4.499 - 4.500 -diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c 4.501 -index c0998ef..0fac9d5 100644 4.502 ---- a/drivers/net/mv643xx_eth.c 4.503 -+++ b/drivers/net/mv643xx_eth.c 4.504 -@@ -1107,7 +1107,7 @@ static int mv643xx_eth_start_xmit(struct 4.505 - 4.506 - #ifdef MV643XX_CHECKSUM_OFFLOAD_TX 4.507 - if (has_tiny_unaligned_frags(skb)) { 4.508 -- if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { 4.509 -+ if (__skb_linearize(skb)) { 4.510 - stats->tx_dropped++; 4.511 - printk(KERN_DEBUG "%s: failed to linearize tiny " 4.512 - "unaligned fragment\n", dev->name); 4.513 -diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c 4.514 -index 9d6d254..c9ed624 100644 4.515 ---- a/drivers/net/natsemi.c 4.516 -+++ b/drivers/net/natsemi.c 4.517 -@@ -323,12 +323,12 @@ performance critical codepaths: 4.518 - The rx process only runs in the interrupt handler. Access from outside 4.519 - the interrupt handler is only permitted after disable_irq(). 4.520 - 4.521 --The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap 4.522 -+The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap 4.523 - is set, then access is permitted under spin_lock_irq(&np->lock). 4.524 - 4.525 - Thus configuration functions that want to access everything must call 4.526 - disable_irq(dev->irq); 4.527 -- spin_lock_bh(dev->xmit_lock); 4.528 -+ netif_tx_lock_bh(dev); 4.529 - spin_lock_irq(&np->lock); 4.530 - 4.531 - IV. Notes 4.532 -diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c 4.533 -index 8cc0d0b..e53b313 100644 4.534 ---- a/drivers/net/r8169.c 4.535 -+++ b/drivers/net/r8169.c 4.536 -@@ -2171,7 +2171,7 @@ static int rtl8169_xmit_frags(struct rtl 4.537 - static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) 4.538 - { 4.539 - if (dev->features & NETIF_F_TSO) { 4.540 -- u32 mss = skb_shinfo(skb)->tso_size; 4.541 -+ u32 mss = skb_shinfo(skb)->gso_size; 4.542 - 4.543 - if (mss) 4.544 - return LargeSend | ((mss & MSSMask) << MSSShift); 4.545 -diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c 4.546 -index b7f00d6..439f45f 100644 4.547 ---- a/drivers/net/s2io.c 4.548 -+++ b/drivers/net/s2io.c 4.549 -@@ -3522,8 +3522,8 @@ #endif 4.550 - txdp->Control_1 = 0; 4.551 - txdp->Control_2 = 0; 4.552 - #ifdef NETIF_F_TSO 4.553 -- mss = skb_shinfo(skb)->tso_size; 4.554 -- if (mss) { 4.555 -+ mss = skb_shinfo(skb)->gso_size; 4.556 -+ if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) { 4.557 - txdp->Control_1 |= TXD_TCP_LSO_EN; 4.558 - txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 4.559 - } 4.560 -@@ -3543,10 +3543,10 @@ #endif 4.561 - } 4.562 - 4.563 - frg_len = skb->len - skb->data_len; 4.564 -- if (skb_shinfo(skb)->ufo_size) { 4.565 -+ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) { 4.566 - int ufo_size; 4.567 - 4.568 -- ufo_size = skb_shinfo(skb)->ufo_size; 4.569 -+ ufo_size = skb_shinfo(skb)->gso_size; 4.570 - ufo_size &= ~7; 4.571 - txdp->Control_1 |= TXD_UFO_EN; 4.572 - txdp->Control_1 |= TXD_UFO_MSS(ufo_size); 4.573 -@@ -3572,7 +3572,7 @@ #endif 4.574 - txdp->Host_Control = (unsigned long) skb; 4.575 - txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 4.576 - 4.577 -- if (skb_shinfo(skb)->ufo_size) 4.578 -+ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) 4.579 - txdp->Control_1 |= TXD_UFO_EN; 4.580 - 4.581 - frg_cnt = skb_shinfo(skb)->nr_frags; 4.582 -@@ -3587,12 +3587,12 @@ #endif 4.583 - (sp->pdev, frag->page, frag->page_offset, 4.584 - frag->size, PCI_DMA_TODEVICE); 4.585 - txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 4.586 -- if (skb_shinfo(skb)->ufo_size) 4.587 -+ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) 4.588 - txdp->Control_1 |= TXD_UFO_EN; 4.589 - } 4.590 - txdp->Control_1 |= TXD_GATHER_CODE_LAST; 4.591 - 4.592 -- if (skb_shinfo(skb)->ufo_size) 4.593 -+ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) 4.594 - frg_cnt++; /* as Txd0 was used for inband header */ 4.595 - 4.596 - tx_fifo = mac_control->tx_FIFO_start[queue]; 4.597 -@@ -3606,7 +3606,7 @@ #ifdef NETIF_F_TSO 4.598 - if (mss) 4.599 - val64 |= TX_FIFO_SPECIAL_FUNC; 4.600 - #endif 4.601 -- if (skb_shinfo(skb)->ufo_size) 4.602 -+ if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) 4.603 - val64 |= TX_FIFO_SPECIAL_FUNC; 4.604 - writeq(val64, &tx_fifo->List_Control); 4.605 - 4.606 -diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c 4.607 -index 0618cd5..aa06a82 100644 4.608 ---- a/drivers/net/sky2.c 4.609 -+++ b/drivers/net/sky2.c 4.610 -@@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s 4.611 - count = sizeof(dma_addr_t) / sizeof(u32); 4.612 - count += skb_shinfo(skb)->nr_frags * count; 4.613 - 4.614 -- if (skb_shinfo(skb)->tso_size) 4.615 -+ if (skb_is_gso(skb)) 4.616 - ++count; 4.617 - 4.618 - if (skb->ip_summed == CHECKSUM_HW) 4.619 -@@ -1197,7 +1197,7 @@ static int sky2_xmit_frame(struct sk_buf 4.620 - } 4.621 - 4.622 - /* Check for TCP Segmentation Offload */ 4.623 -- mss = skb_shinfo(skb)->tso_size; 4.624 -+ mss = skb_shinfo(skb)->gso_size; 4.625 - if (mss != 0) { 4.626 - /* just drop the packet if non-linear expansion fails */ 4.627 - if (skb_header_cloned(skb) && 4.628 -diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c 4.629 -index caf4102..fc9164a 100644 4.630 ---- a/drivers/net/tg3.c 4.631 -+++ b/drivers/net/tg3.c 4.632 -@@ -3664,7 +3664,7 @@ static int tg3_start_xmit(struct sk_buff 4.633 - #if TG3_TSO_SUPPORT != 0 4.634 - mss = 0; 4.635 - if (skb->len > (tp->dev->mtu + ETH_HLEN) && 4.636 -- (mss = skb_shinfo(skb)->tso_size) != 0) { 4.637 -+ (mss = skb_shinfo(skb)->gso_size) != 0) { 4.638 - int tcp_opt_len, ip_tcp_len; 4.639 - 4.640 - if (skb_header_cloned(skb) && 4.641 -diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c 4.642 -index 5b1af39..11de5af 100644 4.643 ---- a/drivers/net/tulip/winbond-840.c 4.644 -+++ b/drivers/net/tulip/winbond-840.c 4.645 -@@ -1605,11 +1605,11 @@ #ifdef CONFIG_PM 4.646 - * - get_stats: 4.647 - * spin_lock_irq(np->lock), doesn't touch hw if not present 4.648 - * - hard_start_xmit: 4.649 -- * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock); 4.650 -+ * synchronize_irq + netif_tx_disable; 4.651 - * - tx_timeout: 4.652 -- * netif_device_detach + spin_unlock_wait(&dev->xmit_lock); 4.653 -+ * netif_device_detach + netif_tx_disable; 4.654 - * - set_multicast_list 4.655 -- * netif_device_detach + spin_unlock_wait(&dev->xmit_lock); 4.656 -+ * netif_device_detach + netif_tx_disable; 4.657 - * - interrupt handler 4.658 - * doesn't touch hw if not present, synchronize_irq waits for 4.659 - * running instances of the interrupt handler. 4.660 -@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev 4.661 - netif_device_detach(dev); 4.662 - update_csr6(dev, 0); 4.663 - iowrite32(0, ioaddr + IntrEnable); 4.664 -- netif_stop_queue(dev); 4.665 - spin_unlock_irq(&np->lock); 4.666 - 4.667 -- spin_unlock_wait(&dev->xmit_lock); 4.668 - synchronize_irq(dev->irq); 4.669 -+ netif_tx_disable(dev); 4.670 - 4.671 - np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 4.672 - 4.673 -diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c 4.674 -index 4c76cb7..3d62abc 100644 4.675 ---- a/drivers/net/typhoon.c 4.676 -+++ b/drivers/net/typhoon.c 4.677 -@@ -340,7 +340,7 @@ #define typhoon_synchronize_irq(x) synch 4.678 - #endif 4.679 - 4.680 - #if defined(NETIF_F_TSO) 4.681 --#define skb_tso_size(x) (skb_shinfo(x)->tso_size) 4.682 -+#define skb_tso_size(x) (skb_shinfo(x)->gso_size) 4.683 - #define TSO_NUM_DESCRIPTORS 2 4.684 - #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT 4.685 - #else 4.686 -@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, st 4.687 - * If problems develop with TSO, check this first. 4.688 - */ 4.689 - numDesc = skb_shinfo(skb)->nr_frags + 1; 4.690 -- if(skb_tso_size(skb)) 4.691 -+ if (skb_is_gso(skb)) 4.692 - numDesc++; 4.693 - 4.694 - /* When checking for free space in the ring, we need to also 4.695 -@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, st 4.696 - TYPHOON_TX_PF_VLAN_TAG_SHIFT); 4.697 - } 4.698 - 4.699 -- if(skb_tso_size(skb)) { 4.700 -+ if (skb_is_gso(skb)) { 4.701 - first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; 4.702 - first_txd->numDesc++; 4.703 - 4.704 -diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c 4.705 -index ed1f837..2eb6b5f 100644 4.706 ---- a/drivers/net/via-velocity.c 4.707 -+++ b/drivers/net/via-velocity.c 4.708 -@@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff 4.709 - 4.710 - int pktlen = skb->len; 4.711 - 4.712 -+#ifdef VELOCITY_ZERO_COPY_SUPPORT 4.713 -+ if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { 4.714 -+ kfree_skb(skb); 4.715 -+ return 0; 4.716 -+ } 4.717 -+#endif 4.718 -+ 4.719 - spin_lock_irqsave(&vptr->lock, flags); 4.720 - 4.721 - index = vptr->td_curr[qnum]; 4.722 -@@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff 4.723 - */ 4.724 - if (pktlen < ETH_ZLEN) { 4.725 - /* Cannot occur until ZC support */ 4.726 -- if(skb_linearize(skb, GFP_ATOMIC)) 4.727 -- return 0; 4.728 - pktlen = ETH_ZLEN; 4.729 - memcpy(tdinfo->buf, skb->data, skb->len); 4.730 - memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); 4.731 -@@ -1933,7 +1938,6 @@ #ifdef VELOCITY_ZERO_COPY_SUPPORT 4.732 - int nfrags = skb_shinfo(skb)->nr_frags; 4.733 - tdinfo->skb = skb; 4.734 - if (nfrags > 6) { 4.735 -- skb_linearize(skb, GFP_ATOMIC); 4.736 - memcpy(tdinfo->buf, skb->data, skb->len); 4.737 - tdinfo->skb_dma[0] = tdinfo->buf_dma; 4.738 - td_ptr->tdesc0.pktsize = 4.739 -diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c 4.740 -index 6fd0bf7..75237c1 100644 4.741 ---- a/drivers/net/wireless/orinoco.c 4.742 -+++ b/drivers/net/wireless/orinoco.c 4.743 -@@ -1835,7 +1835,9 @@ static int __orinoco_program_rids(struct 4.744 - /* Set promiscuity / multicast*/ 4.745 - priv->promiscuous = 0; 4.746 - priv->mc_count = 0; 4.747 -- __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */ 4.748 -+ 4.749 -+ /* FIXME: what about netif_tx_lock */ 4.750 -+ __orinoco_set_multicast_list(dev); 4.751 - 4.752 - return 0; 4.753 - } 4.754 -diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c 4.755 -index 82cb4af..57cec40 100644 4.756 ---- a/drivers/s390/net/qeth_eddp.c 4.757 -+++ b/drivers/s390/net/qeth_eddp.c 4.758 -@@ -421,7 +421,7 @@ #endif /* CONFIG_QETH_VLAN */ 4.759 - } 4.760 - tcph = eddp->skb->h.th; 4.761 - while (eddp->skb_offset < eddp->skb->len) { 4.762 -- data_len = min((int)skb_shinfo(eddp->skb)->tso_size, 4.763 -+ data_len = min((int)skb_shinfo(eddp->skb)->gso_size, 4.764 - (int)(eddp->skb->len - eddp->skb_offset)); 4.765 - /* prepare qdio hdr */ 4.766 - if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ 4.767 -@@ -516,20 +516,20 @@ qeth_eddp_calc_num_pages(struct qeth_edd 4.768 - 4.769 - QETH_DBF_TEXT(trace, 5, "eddpcanp"); 4.770 - /* can we put multiple skbs in one page? */ 4.771 -- skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len); 4.772 -+ skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len); 4.773 - if (skbs_per_page > 1){ 4.774 -- ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) / 4.775 -+ ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) / 4.776 - skbs_per_page + 1; 4.777 - ctx->elements_per_skb = 1; 4.778 - } else { 4.779 - /* no -> how many elements per skb? */ 4.780 -- ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len + 4.781 -+ ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len + 4.782 - PAGE_SIZE) >> PAGE_SHIFT; 4.783 - ctx->num_pages = ctx->elements_per_skb * 4.784 -- (skb_shinfo(skb)->tso_segs + 1); 4.785 -+ (skb_shinfo(skb)->gso_segs + 1); 4.786 - } 4.787 - ctx->num_elements = ctx->elements_per_skb * 4.788 -- (skb_shinfo(skb)->tso_segs + 1); 4.789 -+ (skb_shinfo(skb)->gso_segs + 1); 4.790 - } 4.791 - 4.792 - static inline struct qeth_eddp_context * 4.793 -diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c 4.794 -index dba7f7f..a3ea8e0 100644 4.795 ---- a/drivers/s390/net/qeth_main.c 4.796 -+++ b/drivers/s390/net/qeth_main.c 4.797 -@@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card, 4.798 - queue = card->qdio.out_qs 4.799 - [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 4.800 - 4.801 -- if (skb_shinfo(skb)->tso_size) 4.802 -+ if (skb_is_gso(skb)) 4.803 - large_send = card->options.large_send; 4.804 - 4.805 - /*are we able to do TSO ? If so ,prepare and send it from here */ 4.806 -@@ -4501,8 +4501,7 @@ qeth_send_packet(struct qeth_card *card, 4.807 - card->stats.tx_packets++; 4.808 - card->stats.tx_bytes += skb->len; 4.809 - #ifdef CONFIG_QETH_PERF_STATS 4.810 -- if (skb_shinfo(skb)->tso_size && 4.811 -- !(large_send == QETH_LARGE_SEND_NO)) { 4.812 -+ if (skb_is_gso(skb) && !(large_send == QETH_LARGE_SEND_NO)) { 4.813 - card->perf_stats.large_send_bytes += skb->len; 4.814 - card->perf_stats.large_send_cnt++; 4.815 - } 4.816 -diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h 4.817 -index 1286dde..89cbf34 100644 4.818 ---- a/drivers/s390/net/qeth_tso.h 4.819 -+++ b/drivers/s390/net/qeth_tso.h 4.820 -@@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *c 4.821 - hdr->ext.hdr_version = 1; 4.822 - hdr->ext.hdr_len = 28; 4.823 - /*insert non-fix values */ 4.824 -- hdr->ext.mss = skb_shinfo(skb)->tso_size; 4.825 -+ hdr->ext.mss = skb_shinfo(skb)->gso_size; 4.826 - hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); 4.827 - hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - 4.828 - sizeof(struct qeth_hdr_tso)); 4.829 -diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h 4.830 -index 93535f0..9269df7 100644 4.831 ---- a/include/linux/ethtool.h 4.832 -+++ b/include/linux/ethtool.h 4.833 -@@ -408,6 +408,8 @@ #define ETHTOOL_STSO 0x0000001f /* Set 4.834 - #define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */ 4.835 - #define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */ 4.836 - #define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */ 4.837 -+#define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */ 4.838 -+#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ 4.839 - 4.840 - /* compatibility with older code */ 4.841 - #define SPARC_ETH_GSET ETHTOOL_GSET 4.842 -diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h 4.843 -index 7fda03d..9865736 100644 4.844 ---- a/include/linux/netdevice.h 4.845 -+++ b/include/linux/netdevice.h 4.846 -@@ -230,7 +230,8 @@ enum netdev_state_t 4.847 - __LINK_STATE_SCHED, 4.848 - __LINK_STATE_NOCARRIER, 4.849 - __LINK_STATE_RX_SCHED, 4.850 -- __LINK_STATE_LINKWATCH_PENDING 4.851 -+ __LINK_STATE_LINKWATCH_PENDING, 4.852 -+ __LINK_STATE_QDISC_RUNNING, 4.853 - }; 4.854 - 4.855 - 4.856 -@@ -306,9 +307,17 @@ #define NETIF_F_HW_VLAN_TX 128 /* Transm 4.857 - #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ 4.858 - #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ 4.859 - #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ 4.860 --#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ 4.861 -+#define NETIF_F_GSO 2048 /* Enable software GSO. */ 4.862 - #define NETIF_F_LLTX 4096 /* LockLess TX */ 4.863 --#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/ 4.864 -+ 4.865 -+ /* Segmentation offload features */ 4.866 -+#define NETIF_F_GSO_SHIFT 16 4.867 -+#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) 4.868 -+#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) 4.869 -+#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) 4.870 -+ 4.871 -+#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 4.872 -+#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) 4.873 - 4.874 - struct net_device *next_sched; 4.875 - 4.876 -@@ -394,6 +403,9 @@ #define NETIF_F_UFO 8192 4.877 - struct list_head qdisc_list; 4.878 - unsigned long tx_queue_len; /* Max frames per queue allowed */ 4.879 - 4.880 -+ /* Partially transmitted GSO packet. */ 4.881 -+ struct sk_buff *gso_skb; 4.882 -+ 4.883 - /* ingress path synchronizer */ 4.884 - spinlock_t ingress_lock; 4.885 - struct Qdisc *qdisc_ingress; 4.886 -@@ -402,7 +414,7 @@ #define NETIF_F_UFO 8192 4.887 - * One part is mostly used on xmit path (device) 4.888 - */ 4.889 - /* hard_start_xmit synchronizer */ 4.890 -- spinlock_t xmit_lock ____cacheline_aligned_in_smp; 4.891 -+ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 4.892 - /* cpu id of processor entered to hard_start_xmit or -1, 4.893 - if nobody entered there. 4.894 - */ 4.895 -@@ -527,6 +539,9 @@ struct packet_type { 4.896 - struct net_device *, 4.897 - struct packet_type *, 4.898 - struct net_device *); 4.899 -+ struct sk_buff *(*gso_segment)(struct sk_buff *skb, 4.900 -+ int features); 4.901 -+ int (*gso_send_check)(struct sk_buff *skb); 4.902 - void *af_packet_priv; 4.903 - struct list_head list; 4.904 - }; 4.905 -@@ -693,7 +708,8 @@ extern int dev_change_name(struct net_d 4.906 - extern int dev_set_mtu(struct net_device *, int); 4.907 - extern int dev_set_mac_address(struct net_device *, 4.908 - struct sockaddr *); 4.909 --extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 4.910 -+extern int dev_hard_start_xmit(struct sk_buff *skb, 4.911 -+ struct net_device *dev); 4.912 - 4.913 - extern void dev_init(void); 4.914 - 4.915 -@@ -900,11 +916,43 @@ static inline void __netif_rx_complete(s 4.916 - clear_bit(__LINK_STATE_RX_SCHED, &dev->state); 4.917 - } 4.918 - 4.919 -+static inline void netif_tx_lock(struct net_device *dev) 4.920 -+{ 4.921 -+ spin_lock(&dev->_xmit_lock); 4.922 -+ dev->xmit_lock_owner = smp_processor_id(); 4.923 -+} 4.924 -+ 4.925 -+static inline void netif_tx_lock_bh(struct net_device *dev) 4.926 -+{ 4.927 -+ spin_lock_bh(&dev->_xmit_lock); 4.928 -+ dev->xmit_lock_owner = smp_processor_id(); 4.929 -+} 4.930 -+ 4.931 -+static inline int netif_tx_trylock(struct net_device *dev) 4.932 -+{ 4.933 -+ int err = spin_trylock(&dev->_xmit_lock); 4.934 -+ if (!err) 4.935 -+ dev->xmit_lock_owner = smp_processor_id(); 4.936 -+ return err; 4.937 -+} 4.938 -+ 4.939 -+static inline void netif_tx_unlock(struct net_device *dev) 4.940 -+{ 4.941 -+ dev->xmit_lock_owner = -1; 4.942 -+ spin_unlock(&dev->_xmit_lock); 4.943 -+} 4.944 -+ 4.945 -+static inline void netif_tx_unlock_bh(struct net_device *dev) 4.946 -+{ 4.947 -+ dev->xmit_lock_owner = -1; 4.948 -+ spin_unlock_bh(&dev->_xmit_lock); 4.949 -+} 4.950 -+ 4.951 - static inline void netif_tx_disable(struct net_device *dev) 4.952 - { 4.953 -- spin_lock_bh(&dev->xmit_lock); 4.954 -+ netif_tx_lock_bh(dev); 4.955 - netif_stop_queue(dev); 4.956 -- spin_unlock_bh(&dev->xmit_lock); 4.957 -+ netif_tx_unlock_bh(dev); 4.958 - } 4.959 - 4.960 - /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 4.961 -@@ -932,6 +980,7 @@ extern int netdev_max_backlog; 4.962 - extern int weight_p; 4.963 - extern int netdev_set_master(struct net_device *dev, struct net_device *master); 4.964 - extern int skb_checksum_help(struct sk_buff *skb, int inward); 4.965 -+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); 4.966 - #ifdef CONFIG_BUG 4.967 - extern void netdev_rx_csum_fault(struct net_device *dev); 4.968 - #else 4.969 -@@ -951,6 +1000,19 @@ #endif 4.970 - 4.971 - extern void linkwatch_run_queue(void); 4.972 - 4.973 -+static inline int skb_gso_ok(struct sk_buff *skb, int features) 4.974 -+{ 4.975 -+ int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT; 4.976 -+ return (features & feature) == feature; 4.977 -+} 4.978 -+ 4.979 -+static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 4.980 -+{ 4.981 -+ return skb_is_gso(skb) && 4.982 -+ (!skb_gso_ok(skb, dev->features) || 4.983 -+ unlikely(skb->ip_summed != CHECKSUM_HW)); 4.984 -+} 4.985 -+ 4.986 - #endif /* __KERNEL__ */ 4.987 - 4.988 - #endif /* _LINUX_DEV_H */ 4.989 -diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h 4.990 -index ad7cc22..adfe3a8 100644 4.991 ---- a/include/linux/skbuff.h 4.992 -+++ b/include/linux/skbuff.h 4.993 -@@ -134,9 +134,10 @@ struct skb_frag_struct { 4.994 - struct skb_shared_info { 4.995 - atomic_t dataref; 4.996 - unsigned short nr_frags; 4.997 -- unsigned short tso_size; 4.998 -- unsigned short tso_segs; 4.999 -- unsigned short ufo_size; 4.1000 -+ unsigned short gso_size; 4.1001 -+ /* Warning: this field is not always filled in (UFO)! */ 4.1002 -+ unsigned short gso_segs; 4.1003 -+ unsigned short gso_type; 4.1004 - unsigned int ip6_frag_id; 4.1005 - struct sk_buff *frag_list; 4.1006 - skb_frag_t frags[MAX_SKB_FRAGS]; 4.1007 -@@ -168,6 +169,14 @@ enum { 4.1008 - SKB_FCLONE_CLONE, 4.1009 - }; 4.1010 - 4.1011 -+enum { 4.1012 -+ SKB_GSO_TCPV4 = 1 << 0, 4.1013 -+ SKB_GSO_UDPV4 = 1 << 1, 4.1014 -+ 4.1015 -+ /* This indicates the skb is from an untrusted source. */ 4.1016 -+ SKB_GSO_DODGY = 1 << 2, 4.1017 -+}; 4.1018 -+ 4.1019 - /** 4.1020 - * struct sk_buff - socket buffer 4.1021 - * @next: Next buffer in list 4.1022 -@@ -1148,18 +1157,34 @@ static inline int skb_can_coalesce(struc 4.1023 - return 0; 4.1024 - } 4.1025 - 4.1026 -+static inline int __skb_linearize(struct sk_buff *skb) 4.1027 -+{ 4.1028 -+ return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 4.1029 -+} 4.1030 -+ 4.1031 - /** 4.1032 - * skb_linearize - convert paged skb to linear one 4.1033 - * @skb: buffer to linarize 4.1034 -- * @gfp: allocation mode 4.1035 - * 4.1036 - * If there is no free memory -ENOMEM is returned, otherwise zero 4.1037 - * is returned and the old skb data released. 4.1038 - */ 4.1039 --extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp); 4.1040 --static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp) 4.1041 -+static inline int skb_linearize(struct sk_buff *skb) 4.1042 -+{ 4.1043 -+ return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 4.1044 -+} 4.1045 -+ 4.1046 -+/** 4.1047 -+ * skb_linearize_cow - make sure skb is linear and writable 4.1048 -+ * @skb: buffer to process 4.1049 -+ * 4.1050 -+ * If there is no free memory -ENOMEM is returned, otherwise zero 4.1051 -+ * is returned and the old skb data released. 4.1052 -+ */ 4.1053 -+static inline int skb_linearize_cow(struct sk_buff *skb) 4.1054 - { 4.1055 -- return __skb_linearize(skb, gfp); 4.1056 -+ return skb_is_nonlinear(skb) || skb_cloned(skb) ? 4.1057 -+ __skb_linearize(skb) : 0; 4.1058 - } 4.1059 - 4.1060 - /** 4.1061 -@@ -1254,6 +1279,7 @@ extern void skb_split(struct sk_b 4.1062 - struct sk_buff *skb1, const u32 len); 4.1063 - 4.1064 - extern void skb_release_data(struct sk_buff *skb); 4.1065 -+extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 4.1066 - 4.1067 - static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 4.1068 - int len, void *buffer) 4.1069 -@@ -1377,5 +1403,10 @@ #else /* CONFIG_NETFILTER */ 4.1070 - static inline void nf_reset(struct sk_buff *skb) {} 4.1071 - #endif /* CONFIG_NETFILTER */ 4.1072 - 4.1073 -+static inline int skb_is_gso(const struct sk_buff *skb) 4.1074 -+{ 4.1075 -+ return skb_shinfo(skb)->gso_size; 4.1076 -+} 4.1077 -+ 4.1078 - #endif /* __KERNEL__ */ 4.1079 - #endif /* _LINUX_SKBUFF_H */ 4.1080 -diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h 4.1081 -index b94d1ad..75b5b93 100644 4.1082 ---- a/include/net/pkt_sched.h 4.1083 -+++ b/include/net/pkt_sched.h 4.1084 -@@ -218,12 +218,13 @@ extern struct qdisc_rate_table *qdisc_ge 4.1085 - struct rtattr *tab); 4.1086 - extern void qdisc_put_rtab(struct qdisc_rate_table *tab); 4.1087 - 4.1088 --extern int qdisc_restart(struct net_device *dev); 4.1089 -+extern void __qdisc_run(struct net_device *dev); 4.1090 - 4.1091 - static inline void qdisc_run(struct net_device *dev) 4.1092 - { 4.1093 -- while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0) 4.1094 -- /* NOTHING */; 4.1095 -+ if (!netif_queue_stopped(dev) && 4.1096 -+ !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 4.1097 -+ __qdisc_run(dev); 4.1098 - } 4.1099 - 4.1100 - extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, 4.1101 -diff --git a/include/net/protocol.h b/include/net/protocol.h 4.1102 -index 6dc5970..d516c58 100644 4.1103 ---- a/include/net/protocol.h 4.1104 -+++ b/include/net/protocol.h 4.1105 -@@ -37,6 +37,9 @@ #define MAX_INET_PROTOS 256 /* Must be 4.1106 - struct net_protocol { 4.1107 - int (*handler)(struct sk_buff *skb); 4.1108 - void (*err_handler)(struct sk_buff *skb, u32 info); 4.1109 -+ int (*gso_send_check)(struct sk_buff *skb); 4.1110 -+ struct sk_buff *(*gso_segment)(struct sk_buff *skb, 4.1111 -+ int features); 4.1112 - int no_policy; 4.1113 - }; 4.1114 - 4.1115 -diff --git a/include/net/sock.h b/include/net/sock.h 4.1116 -index f63d0d5..a8e8d21 100644 4.1117 ---- a/include/net/sock.h 4.1118 -+++ b/include/net/sock.h 4.1119 -@@ -1064,9 +1064,13 @@ static inline void sk_setup_caps(struct 4.1120 - { 4.1121 - __sk_dst_set(sk, dst); 4.1122 - sk->sk_route_caps = dst->dev->features; 4.1123 -+ if (sk->sk_route_caps & NETIF_F_GSO) 4.1124 -+ sk->sk_route_caps |= NETIF_F_TSO; 4.1125 - if (sk->sk_route_caps & NETIF_F_TSO) { 4.1126 - if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len) 4.1127 - sk->sk_route_caps &= ~NETIF_F_TSO; 4.1128 -+ else 4.1129 -+ sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 4.1130 - } 4.1131 - } 4.1132 - 4.1133 -diff --git a/include/net/tcp.h b/include/net/tcp.h 4.1134 -index 77f21c6..22dbbac 100644 4.1135 ---- a/include/net/tcp.h 4.1136 -+++ b/include/net/tcp.h 4.1137 -@@ -552,13 +552,13 @@ #include <net/tcp_ecn.h> 4.1138 - */ 4.1139 - static inline int tcp_skb_pcount(const struct sk_buff *skb) 4.1140 - { 4.1141 -- return skb_shinfo(skb)->tso_segs; 4.1142 -+ return skb_shinfo(skb)->gso_segs; 4.1143 - } 4.1144 - 4.1145 - /* This is valid iff tcp_skb_pcount() > 1. */ 4.1146 - static inline int tcp_skb_mss(const struct sk_buff *skb) 4.1147 - { 4.1148 -- return skb_shinfo(skb)->tso_size; 4.1149 -+ return skb_shinfo(skb)->gso_size; 4.1150 - } 4.1151 - 4.1152 - static inline void tcp_dec_pcount_approx(__u32 *count, 4.1153 -@@ -1063,6 +1063,9 @@ extern struct request_sock_ops tcp_reque 4.1154 - 4.1155 - extern int tcp_v4_destroy_sock(struct sock *sk); 4.1156 - 4.1157 -+extern int tcp_v4_gso_send_check(struct sk_buff *skb); 4.1158 -+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); 4.1159 -+ 4.1160 - #ifdef CONFIG_PROC_FS 4.1161 - extern int tcp4_proc_init(void); 4.1162 - extern void tcp4_proc_exit(void); 4.1163 -diff --git a/net/atm/clip.c b/net/atm/clip.c 4.1164 -index 1842a4e..6dc21a7 100644 4.1165 ---- a/net/atm/clip.c 4.1166 -+++ b/net/atm/clip.c 4.1167 -@@ -101,7 +101,7 @@ static void unlink_clip_vcc(struct clip_ 4.1168 - printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc); 4.1169 - return; 4.1170 - } 4.1171 -- spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */ 4.1172 -+ netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ 4.1173 - entry->neigh->used = jiffies; 4.1174 - for (walk = &entry->vccs; *walk; walk = &(*walk)->next) 4.1175 - if (*walk == clip_vcc) { 4.1176 -@@ -125,7 +125,7 @@ static void unlink_clip_vcc(struct clip_ 4.1177 - printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " 4.1178 - "0x%p)\n",entry,clip_vcc); 4.1179 - out: 4.1180 -- spin_unlock_bh(&entry->neigh->dev->xmit_lock); 4.1181 -+ netif_tx_unlock_bh(entry->neigh->dev); 4.1182 - } 4.1183 - 4.1184 - /* The neighbour entry n->lock is held. */ 4.1185 -diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c 4.1186 -index 0b33a7b..180e79b 100644 4.1187 ---- a/net/bridge/br_device.c 4.1188 -+++ b/net/bridge/br_device.c 4.1189 -@@ -146,9 +146,9 @@ static int br_set_tx_csum(struct net_dev 4.1190 - struct net_bridge *br = netdev_priv(dev); 4.1191 - 4.1192 - if (data) 4.1193 -- br->feature_mask |= NETIF_F_IP_CSUM; 4.1194 -+ br->feature_mask |= NETIF_F_NO_CSUM; 4.1195 - else 4.1196 -- br->feature_mask &= ~NETIF_F_IP_CSUM; 4.1197 -+ br->feature_mask &= ~NETIF_F_ALL_CSUM; 4.1198 - 4.1199 - br_features_recompute(br); 4.1200 - return 0; 4.1201 -@@ -185,6 +185,6 @@ void br_dev_setup(struct net_device *dev 4.1202 - dev->set_mac_address = br_set_mac_address; 4.1203 - dev->priv_flags = IFF_EBRIDGE; 4.1204 - 4.1205 -- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 4.1206 -- | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM; 4.1207 -+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 4.1208 -+ NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST; 4.1209 - } 4.1210 -diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c 4.1211 -index 2d24fb4..b34e76f 100644 4.1212 ---- a/net/bridge/br_forward.c 4.1213 -+++ b/net/bridge/br_forward.c 4.1214 -@@ -32,7 +32,7 @@ static inline int should_deliver(const s 4.1215 - int br_dev_queue_push_xmit(struct sk_buff *skb) 4.1216 - { 4.1217 - /* drop mtu oversized packets except tso */ 4.1218 -- if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size) 4.1219 -+ if (skb->len > skb->dev->mtu && !skb_is_gso(skb)) 4.1220 - kfree_skb(skb); 4.1221 - else { 4.1222 - #ifdef CONFIG_BRIDGE_NETFILTER 4.1223 -diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c 4.1224 -index f36b35e..0617146 100644 4.1225 ---- a/net/bridge/br_if.c 4.1226 -+++ b/net/bridge/br_if.c 4.1227 -@@ -385,17 +385,28 @@ void br_features_recompute(struct net_br 4.1228 - struct net_bridge_port *p; 4.1229 - unsigned long features, checksum; 4.1230 - 4.1231 -- features = br->feature_mask &~ NETIF_F_IP_CSUM; 4.1232 -- checksum = br->feature_mask & NETIF_F_IP_CSUM; 4.1233 -+ checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0; 4.1234 -+ features = br->feature_mask & ~NETIF_F_ALL_CSUM; 4.1235 - 4.1236 - list_for_each_entry(p, &br->port_list, list) { 4.1237 -- if (!(p->dev->features 4.1238 -- & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM))) 4.1239 -+ unsigned long feature = p->dev->features; 4.1240 -+ 4.1241 -+ if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM)) 4.1242 -+ checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM; 4.1243 -+ if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM)) 4.1244 -+ checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM; 4.1245 -+ if (!(feature & NETIF_F_IP_CSUM)) 4.1246 - checksum = 0; 4.1247 -- features &= p->dev->features; 4.1248 -+ 4.1249 -+ if (feature & NETIF_F_GSO) 4.1250 -+ feature |= NETIF_F_TSO; 4.1251 -+ feature |= NETIF_F_GSO; 4.1252 -+ 4.1253 -+ features &= feature; 4.1254 - } 4.1255 - 4.1256 -- br->dev->features = features | checksum | NETIF_F_LLTX; 4.1257 -+ br->dev->features = features | checksum | NETIF_F_LLTX | 4.1258 -+ NETIF_F_GSO_ROBUST; 4.1259 - } 4.1260 - 4.1261 - /* called with RTNL */ 4.1262 -diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c 4.1263 -index 9e27373..b2dba74 100644 4.1264 ---- a/net/bridge/br_netfilter.c 4.1265 -+++ b/net/bridge/br_netfilter.c 4.1266 -@@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s 4.1267 - { 4.1268 - if (skb->protocol == htons(ETH_P_IP) && 4.1269 - skb->len > skb->dev->mtu && 4.1270 -- !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) 4.1271 -+ !skb_is_gso(skb)) 4.1272 - return ip_fragment(skb, br_dev_queue_push_xmit); 4.1273 - else 4.1274 - return br_dev_queue_push_xmit(skb); 4.1275 -diff --git a/net/core/dev.c b/net/core/dev.c 4.1276 -index 12a214c..e814a89 100644 4.1277 ---- a/net/core/dev.c 4.1278 -+++ b/net/core/dev.c 4.1279 -@@ -115,6 +115,7 @@ #include <linux/wireless.h> /* Note : w 4.1280 - #include <net/iw_handler.h> 4.1281 - #endif /* CONFIG_NET_RADIO */ 4.1282 - #include <asm/current.h> 4.1283 -+#include <linux/err.h> 4.1284 - 4.1285 - /* 4.1286 - * The list of packet types we will receive (as opposed to discard) 4.1287 -@@ -1032,7 +1033,7 @@ static inline void net_timestamp(struct 4.1288 - * taps currently in use. 4.1289 - */ 4.1290 - 4.1291 --void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 4.1292 -+static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 4.1293 - { 4.1294 - struct packet_type *ptype; 4.1295 - 4.1296 -@@ -1082,9 +1083,17 @@ int skb_checksum_help(struct sk_buff *sk 4.1297 - unsigned int csum; 4.1298 - int ret = 0, offset = skb->h.raw - skb->data; 4.1299 - 4.1300 -- if (inward) { 4.1301 -- skb->ip_summed = CHECKSUM_NONE; 4.1302 -- goto out; 4.1303 -+ if (inward) 4.1304 -+ goto out_set_summed; 4.1305 -+ 4.1306 -+ if (unlikely(skb_shinfo(skb)->gso_size)) { 4.1307 -+ static int warned; 4.1308 -+ 4.1309 -+ WARN_ON(!warned); 4.1310 -+ warned = 1; 4.1311 -+ 4.1312 -+ /* Let GSO fix up the checksum. */ 4.1313 -+ goto out_set_summed; 4.1314 - } 4.1315 - 4.1316 - if (skb_cloned(skb)) { 4.1317 -@@ -1101,11 +1110,70 @@ int skb_checksum_help(struct sk_buff *sk 4.1318 - BUG_ON(skb->csum + 2 > offset); 4.1319 - 4.1320 - *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); 4.1321 -+ 4.1322 -+out_set_summed: 4.1323 - skb->ip_summed = CHECKSUM_NONE; 4.1324 - out: 4.1325 - return ret; 4.1326 - } 4.1327 - 4.1328 -+/** 4.1329 -+ * skb_gso_segment - Perform segmentation on skb. 4.1330 -+ * @skb: buffer to segment 4.1331 -+ * @features: features for the output path (see dev->features) 4.1332 -+ * 4.1333 -+ * This function segments the given skb and returns a list of segments. 4.1334 -+ * 4.1335 -+ * It may return NULL if the skb requires no segmentation. This is 4.1336 -+ * only possible when GSO is used for verifying header integrity. 4.1337 -+ */ 4.1338 -+struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) 4.1339 -+{ 4.1340 -+ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 4.1341 -+ struct packet_type *ptype; 4.1342 -+ int type = skb->protocol; 4.1343 -+ int err; 4.1344 -+ 4.1345 -+ BUG_ON(skb_shinfo(skb)->frag_list); 4.1346 -+ 4.1347 -+ skb->mac.raw = skb->data; 4.1348 -+ skb->mac_len = skb->nh.raw - skb->data; 4.1349 -+ __skb_pull(skb, skb->mac_len); 4.1350 -+ 4.1351 -+ if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 4.1352 -+ static int warned; 4.1353 -+ 4.1354 -+ WARN_ON(!warned); 4.1355 -+ warned = 1; 4.1356 -+ 4.1357 -+ if (skb_header_cloned(skb) && 4.1358 -+ (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 4.1359 -+ return ERR_PTR(err); 4.1360 -+ } 4.1361 -+ 4.1362 -+ rcu_read_lock(); 4.1363 -+ list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { 4.1364 -+ if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 4.1365 -+ if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 4.1366 -+ err = ptype->gso_send_check(skb); 4.1367 -+ segs = ERR_PTR(err); 4.1368 -+ if (err || skb_gso_ok(skb, features)) 4.1369 -+ break; 4.1370 -+ __skb_push(skb, skb->data - skb->nh.raw); 4.1371 -+ } 4.1372 -+ segs = ptype->gso_segment(skb, features); 4.1373 -+ break; 4.1374 -+ } 4.1375 -+ } 4.1376 -+ rcu_read_unlock(); 4.1377 -+ 4.1378 -+ __skb_push(skb, skb->data - skb->mac.raw); 4.1379 -+ 4.1380 -+ return segs; 4.1381 -+} 4.1382 -+ 4.1383 -+EXPORT_SYMBOL(skb_gso_segment); 4.1384 -+ 4.1385 - /* Take action when hardware reception checksum errors are detected. */ 4.1386 - #ifdef CONFIG_BUG 4.1387 - void netdev_rx_csum_fault(struct net_device *dev) 4.1388 -@@ -1142,75 +1210,108 @@ #else 4.1389 - #define illegal_highdma(dev, skb) (0) 4.1390 - #endif 4.1391 - 4.1392 --/* Keep head the same: replace data */ 4.1393 --int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask) 4.1394 --{ 4.1395 -- unsigned int size; 4.1396 -- u8 *data; 4.1397 -- long offset; 4.1398 -- struct skb_shared_info *ninfo; 4.1399 -- int headerlen = skb->data - skb->head; 4.1400 -- int expand = (skb->tail + skb->data_len) - skb->end; 4.1401 -- 4.1402 -- if (skb_shared(skb)) 4.1403 -- BUG(); 4.1404 -- 4.1405 -- if (expand <= 0) 4.1406 -- expand = 0; 4.1407 -- 4.1408 -- size = skb->end - skb->head + expand; 4.1409 -- size = SKB_DATA_ALIGN(size); 4.1410 -- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 4.1411 -- if (!data) 4.1412 -- return -ENOMEM; 4.1413 -- 4.1414 -- /* Copy entire thing */ 4.1415 -- if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len)) 4.1416 -- BUG(); 4.1417 -- 4.1418 -- /* Set up shinfo */ 4.1419 -- ninfo = (struct skb_shared_info*)(data + size); 4.1420 -- atomic_set(&ninfo->dataref, 1); 4.1421 -- ninfo->tso_size = skb_shinfo(skb)->tso_size; 4.1422 -- ninfo->tso_segs = skb_shinfo(skb)->tso_segs; 4.1423 -- ninfo->nr_frags = 0; 4.1424 -- ninfo->frag_list = NULL; 4.1425 -- 4.1426 -- /* Offset between the two in bytes */ 4.1427 -- offset = data - skb->head; 4.1428 -- 4.1429 -- /* Free old data. */ 4.1430 -- skb_release_data(skb); 4.1431 -- 4.1432 -- skb->head = data; 4.1433 -- skb->end = data + size; 4.1434 -- 4.1435 -- /* Set up new pointers */ 4.1436 -- skb->h.raw += offset; 4.1437 -- skb->nh.raw += offset; 4.1438 -- skb->mac.raw += offset; 4.1439 -- skb->tail += offset; 4.1440 -- skb->data += offset; 4.1441 -- 4.1442 -- /* We are no longer a clone, even if we were. */ 4.1443 -- skb->cloned = 0; 4.1444 -- 4.1445 -- skb->tail += skb->data_len; 4.1446 -- skb->data_len = 0; 4.1447 -+struct dev_gso_cb { 4.1448 -+ void (*destructor)(struct sk_buff *skb); 4.1449 -+}; 4.1450 -+ 4.1451 -+#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) 4.1452 -+ 4.1453 -+static void dev_gso_skb_destructor(struct sk_buff *skb) 4.1454 -+{ 4.1455 -+ struct dev_gso_cb *cb; 4.1456 -+ 4.1457 -+ do { 4.1458 -+ struct sk_buff *nskb = skb->next; 4.1459 -+ 4.1460 -+ skb->next = nskb->next; 4.1461 -+ nskb->next = NULL; 4.1462 -+ kfree_skb(nskb); 4.1463 -+ } while (skb->next); 4.1464 -+ 4.1465 -+ cb = DEV_GSO_CB(skb); 4.1466 -+ if (cb->destructor) 4.1467 -+ cb->destructor(skb); 4.1468 -+} 4.1469 -+ 4.1470 -+/** 4.1471 -+ * dev_gso_segment - Perform emulated hardware segmentation on skb. 4.1472 -+ * @skb: buffer to segment 4.1473 -+ * 4.1474 -+ * This function segments the given skb and stores the list of segments 4.1475 -+ * in skb->next. 4.1476 -+ */ 4.1477 -+static int dev_gso_segment(struct sk_buff *skb) 4.1478 -+{ 4.1479 -+ struct net_device *dev = skb->dev; 4.1480 -+ struct sk_buff *segs; 4.1481 -+ int features = dev->features & ~(illegal_highdma(dev, skb) ? 4.1482 -+ NETIF_F_SG : 0); 4.1483 -+ 4.1484 -+ segs = skb_gso_segment(skb, features); 4.1485 -+ 4.1486 -+ /* Verifying header integrity only. */ 4.1487 -+ if (!segs) 4.1488 -+ return 0; 4.1489 -+ 4.1490 -+ if (unlikely(IS_ERR(segs))) 4.1491 -+ return PTR_ERR(segs); 4.1492 -+ 4.1493 -+ skb->next = segs; 4.1494 -+ DEV_GSO_CB(skb)->destructor = skb->destructor; 4.1495 -+ skb->destructor = dev_gso_skb_destructor; 4.1496 -+ 4.1497 -+ return 0; 4.1498 -+} 4.1499 -+ 4.1500 -+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 4.1501 -+{ 4.1502 -+ if (likely(!skb->next)) { 4.1503 -+ if (netdev_nit) 4.1504 -+ dev_queue_xmit_nit(skb, dev); 4.1505 -+ 4.1506 -+ if (netif_needs_gso(dev, skb)) { 4.1507 -+ if (unlikely(dev_gso_segment(skb))) 4.1508 -+ goto out_kfree_skb; 4.1509 -+ if (skb->next) 4.1510 -+ goto gso; 4.1511 -+ } 4.1512 -+ 4.1513 -+ return dev->hard_start_xmit(skb, dev); 4.1514 -+ } 4.1515 -+ 4.1516 -+gso: 4.1517 -+ do { 4.1518 -+ struct sk_buff *nskb = skb->next; 4.1519 -+ int rc; 4.1520 -+ 4.1521 -+ skb->next = nskb->next; 4.1522 -+ nskb->next = NULL; 4.1523 -+ rc = dev->hard_start_xmit(nskb, dev); 4.1524 -+ if (unlikely(rc)) { 4.1525 -+ nskb->next = skb->next; 4.1526 -+ skb->next = nskb; 4.1527 -+ return rc; 4.1528 -+ } 4.1529 -+ if (unlikely(netif_queue_stopped(dev) && skb->next)) 4.1530 -+ return NETDEV_TX_BUSY; 4.1531 -+ } while (skb->next); 4.1532 -+ 4.1533 -+ skb->destructor = DEV_GSO_CB(skb)->destructor; 4.1534 -+ 4.1535 -+out_kfree_skb: 4.1536 -+ kfree_skb(skb); 4.1537 - return 0; 4.1538 - } 4.1539 - 4.1540 - #define HARD_TX_LOCK(dev, cpu) { \ 4.1541 - if ((dev->features & NETIF_F_LLTX) == 0) { \ 4.1542 -- spin_lock(&dev->xmit_lock); \ 4.1543 -- dev->xmit_lock_owner = cpu; \ 4.1544 -+ netif_tx_lock(dev); \ 4.1545 - } \ 4.1546 - } 4.1547 - 4.1548 - #define HARD_TX_UNLOCK(dev) { \ 4.1549 - if ((dev->features & NETIF_F_LLTX) == 0) { \ 4.1550 -- dev->xmit_lock_owner = -1; \ 4.1551 -- spin_unlock(&dev->xmit_lock); \ 4.1552 -+ netif_tx_unlock(dev); \ 4.1553 - } \ 4.1554 - } 4.1555 - 4.1556 -@@ -1246,9 +1347,13 @@ int dev_queue_xmit(struct sk_buff *skb) 4.1557 - struct Qdisc *q; 4.1558 - int rc = -ENOMEM; 4.1559 - 4.1560 -+ /* GSO will handle the following emulations directly. */ 4.1561 -+ if (netif_needs_gso(dev, skb)) 4.1562 -+ goto gso; 4.1563 -+ 4.1564 - if (skb_shinfo(skb)->frag_list && 4.1565 - !(dev->features & NETIF_F_FRAGLIST) && 4.1566 -- __skb_linearize(skb, GFP_ATOMIC)) 4.1567 -+ __skb_linearize(skb)) 4.1568 - goto out_kfree_skb; 4.1569 - 4.1570 - /* Fragmented skb is linearized if device does not support SG, 4.1571 -@@ -1257,25 +1362,26 @@ int dev_queue_xmit(struct sk_buff *skb) 4.1572 - */ 4.1573 - if (skb_shinfo(skb)->nr_frags && 4.1574 - (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && 4.1575 -- __skb_linearize(skb, GFP_ATOMIC)) 4.1576 -+ __skb_linearize(skb)) 4.1577 - goto out_kfree_skb; 4.1578 - 4.1579 - /* If packet is not checksummed and device does not support 4.1580 - * checksumming for this protocol, complete checksumming here. 4.1581 - */ 4.1582 - if (skb->ip_summed == CHECKSUM_HW && 4.1583 -- (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) && 4.1584 -+ (!(dev->features & NETIF_F_GEN_CSUM) && 4.1585 - (!(dev->features & NETIF_F_IP_CSUM) || 4.1586 - skb->protocol != htons(ETH_P_IP)))) 4.1587 - if (skb_checksum_help(skb, 0)) 4.1588 - goto out_kfree_skb; 4.1589 - 4.1590 -+gso: 4.1591 - spin_lock_prefetch(&dev->queue_lock); 4.1592 - 4.1593 - /* Disable soft irqs for various locks below. Also 4.1594 - * stops preemption for RCU. 4.1595 - */ 4.1596 -- local_bh_disable(); 4.1597 -+ rcu_read_lock_bh(); 4.1598 - 4.1599 - /* Updates of qdisc are serialized by queue_lock. 4.1600 - * The struct Qdisc which is pointed to by qdisc is now a 4.1601 -@@ -1309,8 +1415,8 @@ #endif 4.1602 - /* The device has no queue. Common case for software devices: 4.1603 - loopback, all the sorts of tunnels... 4.1604 - 4.1605 -- Really, it is unlikely that xmit_lock protection is necessary here. 4.1606 -- (f.e. loopback and IP tunnels are clean ignoring statistics 4.1607 -+ Really, it is unlikely that netif_tx_lock protection is necessary 4.1608 -+ here. (f.e. loopback and IP tunnels are clean ignoring statistics 4.1609 - counters.) 4.1610 - However, it is possible, that they rely on protection 4.1611 - made by us here. 4.1612 -@@ -1326,11 +1432,8 @@ #endif 4.1613 - HARD_TX_LOCK(dev, cpu); 4.1614 - 4.1615 - if (!netif_queue_stopped(dev)) { 4.1616 -- if (netdev_nit) 4.1617 -- dev_queue_xmit_nit(skb, dev); 4.1618 -- 4.1619 - rc = 0; 4.1620 -- if (!dev->hard_start_xmit(skb, dev)) { 4.1621 -+ if (!dev_hard_start_xmit(skb, dev)) { 4.1622 - HARD_TX_UNLOCK(dev); 4.1623 - goto out; 4.1624 - } 4.1625 -@@ -1349,13 +1452,13 @@ #endif 4.1626 - } 4.1627 - 4.1628 - rc = -ENETDOWN; 4.1629 -- local_bh_enable(); 4.1630 -+ rcu_read_unlock_bh(); 4.1631 - 4.1632 - out_kfree_skb: 4.1633 - kfree_skb(skb); 4.1634 - return rc; 4.1635 - out: 4.1636 -- local_bh_enable(); 4.1637 -+ rcu_read_unlock_bh(); 4.1638 - return rc; 4.1639 - } 4.1640 - 4.1641 -@@ -2670,7 +2773,7 @@ int register_netdevice(struct net_device 4.1642 - BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 4.1643 - 4.1644 - spin_lock_init(&dev->queue_lock); 4.1645 -- spin_lock_init(&dev->xmit_lock); 4.1646 -+ spin_lock_init(&dev->_xmit_lock); 4.1647 - dev->xmit_lock_owner = -1; 4.1648 - #ifdef CONFIG_NET_CLS_ACT 4.1649 - spin_lock_init(&dev->ingress_lock); 4.1650 -@@ -2714,9 +2817,7 @@ #endif 4.1651 - 4.1652 - /* Fix illegal SG+CSUM combinations. */ 4.1653 - if ((dev->features & NETIF_F_SG) && 4.1654 -- !(dev->features & (NETIF_F_IP_CSUM | 4.1655 -- NETIF_F_NO_CSUM | 4.1656 -- NETIF_F_HW_CSUM))) { 4.1657 -+ !(dev->features & NETIF_F_ALL_CSUM)) { 4.1658 - printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", 4.1659 - dev->name); 4.1660 - dev->features &= ~NETIF_F_SG; 4.1661 -@@ -3268,7 +3369,6 @@ subsys_initcall(net_dev_init); 4.1662 - EXPORT_SYMBOL(__dev_get_by_index); 4.1663 - EXPORT_SYMBOL(__dev_get_by_name); 4.1664 - EXPORT_SYMBOL(__dev_remove_pack); 4.1665 --EXPORT_SYMBOL(__skb_linearize); 4.1666 - EXPORT_SYMBOL(dev_valid_name); 4.1667 - EXPORT_SYMBOL(dev_add_pack); 4.1668 - EXPORT_SYMBOL(dev_alloc_name); 4.1669 -diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c 4.1670 -index 05d6085..c57d887 100644 4.1671 ---- a/net/core/dev_mcast.c 4.1672 -+++ b/net/core/dev_mcast.c 4.1673 -@@ -62,7 +62,7 @@ #include <net/arp.h> 4.1674 - * Device mc lists are changed by bh at least if IPv6 is enabled, 4.1675 - * so that it must be bh protected. 4.1676 - * 4.1677 -- * We block accesses to device mc filters with dev->xmit_lock. 4.1678 -+ * We block accesses to device mc filters with netif_tx_lock. 4.1679 - */ 4.1680 - 4.1681 - /* 4.1682 -@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_d 4.1683 - 4.1684 - void dev_mc_upload(struct net_device *dev) 4.1685 - { 4.1686 -- spin_lock_bh(&dev->xmit_lock); 4.1687 -+ netif_tx_lock_bh(dev); 4.1688 - __dev_mc_upload(dev); 4.1689 -- spin_unlock_bh(&dev->xmit_lock); 4.1690 -+ netif_tx_unlock_bh(dev); 4.1691 - } 4.1692 - 4.1693 - /* 4.1694 -@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev 4.1695 - int err = 0; 4.1696 - struct dev_mc_list *dmi, **dmip; 4.1697 - 4.1698 -- spin_lock_bh(&dev->xmit_lock); 4.1699 -+ netif_tx_lock_bh(dev); 4.1700 - 4.1701 - for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { 4.1702 - /* 4.1703 -@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev 4.1704 - */ 4.1705 - __dev_mc_upload(dev); 4.1706 - 4.1707 -- spin_unlock_bh(&dev->xmit_lock); 4.1708 -+ netif_tx_unlock_bh(dev); 4.1709 - return 0; 4.1710 - } 4.1711 - } 4.1712 - err = -ENOENT; 4.1713 - done: 4.1714 -- spin_unlock_bh(&dev->xmit_lock); 4.1715 -+ netif_tx_unlock_bh(dev); 4.1716 - return err; 4.1717 - } 4.1718 - 4.1719 -@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, v 4.1720 - 4.1721 - dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC); 4.1722 - 4.1723 -- spin_lock_bh(&dev->xmit_lock); 4.1724 -+ netif_tx_lock_bh(dev); 4.1725 - for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { 4.1726 - if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && 4.1727 - dmi->dmi_addrlen == alen) { 4.1728 -@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, v 4.1729 - } 4.1730 - 4.1731 - if ((dmi = dmi1) == NULL) { 4.1732 -- spin_unlock_bh(&dev->xmit_lock); 4.1733 -+ netif_tx_unlock_bh(dev); 4.1734 - return -ENOMEM; 4.1735 - } 4.1736 - memcpy(dmi->dmi_addr, addr, alen); 4.1737 -@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, v 4.1738 - 4.1739 - __dev_mc_upload(dev); 4.1740 - 4.1741 -- spin_unlock_bh(&dev->xmit_lock); 4.1742 -+ netif_tx_unlock_bh(dev); 4.1743 - return 0; 4.1744 - 4.1745 - done: 4.1746 -- spin_unlock_bh(&dev->xmit_lock); 4.1747 -+ netif_tx_unlock_bh(dev); 4.1748 - kfree(dmi1); 4.1749 - return err; 4.1750 - } 4.1751 -@@ -204,7 +204,7 @@ done: 4.1752 - 4.1753 - void dev_mc_discard(struct net_device *dev) 4.1754 - { 4.1755 -- spin_lock_bh(&dev->xmit_lock); 4.1756 -+ netif_tx_lock_bh(dev); 4.1757 - 4.1758 - while (dev->mc_list != NULL) { 4.1759 - struct dev_mc_list *tmp = dev->mc_list; 4.1760 -@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *d 4.1761 - } 4.1762 - dev->mc_count = 0; 4.1763 - 4.1764 -- spin_unlock_bh(&dev->xmit_lock); 4.1765 -+ netif_tx_unlock_bh(dev); 4.1766 - } 4.1767 - 4.1768 - #ifdef CONFIG_PROC_FS 4.1769 -@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_fi 4.1770 - struct dev_mc_list *m; 4.1771 - struct net_device *dev = v; 4.1772 - 4.1773 -- spin_lock_bh(&dev->xmit_lock); 4.1774 -+ netif_tx_lock_bh(dev); 4.1775 - for (m = dev->mc_list; m; m = m->next) { 4.1776 - int i; 4.1777 - 4.1778 -@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_fi 4.1779 - 4.1780 - seq_putc(seq, '\n'); 4.1781 - } 4.1782 -- spin_unlock_bh(&dev->xmit_lock); 4.1783 -+ netif_tx_unlock_bh(dev); 4.1784 - return 0; 4.1785 - } 4.1786 - 4.1787 -diff --git a/net/core/ethtool.c b/net/core/ethtool.c 4.1788 -index e6f7610..27ce168 100644 4.1789 ---- a/net/core/ethtool.c 4.1790 -+++ b/net/core/ethtool.c 4.1791 -@@ -30,7 +30,7 @@ u32 ethtool_op_get_link(struct net_devic 4.1792 - 4.1793 - u32 ethtool_op_get_tx_csum(struct net_device *dev) 4.1794 - { 4.1795 -- return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0; 4.1796 -+ return (dev->features & NETIF_F_ALL_CSUM) != 0; 4.1797 - } 4.1798 - 4.1799 - int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 4.1800 -@@ -551,9 +551,7 @@ static int ethtool_set_sg(struct net_dev 4.1801 - return -EFAULT; 4.1802 - 4.1803 - if (edata.data && 4.1804 -- !(dev->features & (NETIF_F_IP_CSUM | 4.1805 -- NETIF_F_NO_CSUM | 4.1806 -- NETIF_F_HW_CSUM))) 4.1807 -+ !(dev->features & NETIF_F_ALL_CSUM)) 4.1808 - return -EINVAL; 4.1809 - 4.1810 - return __ethtool_set_sg(dev, edata.data); 4.1811 -@@ -591,7 +589,7 @@ static int ethtool_set_tso(struct net_de 4.1812 - 4.1813 - static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr) 4.1814 - { 4.1815 -- struct ethtool_value edata = { ETHTOOL_GTSO }; 4.1816 -+ struct ethtool_value edata = { ETHTOOL_GUFO }; 4.1817 - 4.1818 - if (!dev->ethtool_ops->get_ufo) 4.1819 - return -EOPNOTSUPP; 4.1820 -@@ -600,6 +598,7 @@ static int ethtool_get_ufo(struct net_de 4.1821 - return -EFAULT; 4.1822 - return 0; 4.1823 - } 4.1824 -+ 4.1825 - static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr) 4.1826 - { 4.1827 - struct ethtool_value edata; 4.1828 -@@ -615,6 +614,29 @@ static int ethtool_set_ufo(struct net_de 4.1829 - return dev->ethtool_ops->set_ufo(dev, edata.data); 4.1830 - } 4.1831 - 4.1832 -+static int ethtool_get_gso(struct net_device *dev, char __user *useraddr) 4.1833 -+{ 4.1834 -+ struct ethtool_value edata = { ETHTOOL_GGSO }; 4.1835 -+ 4.1836 -+ edata.data = dev->features & NETIF_F_GSO; 4.1837 -+ if (copy_to_user(useraddr, &edata, sizeof(edata))) 4.1838 -+ return -EFAULT; 4.1839 -+ return 0; 4.1840 -+} 4.1841 -+ 4.1842 -+static int ethtool_set_gso(struct net_device *dev, char __user *useraddr) 4.1843 -+{ 4.1844 -+ struct ethtool_value edata; 4.1845 -+ 4.1846 -+ if (copy_from_user(&edata, useraddr, sizeof(edata))) 4.1847 -+ return -EFAULT; 4.1848 -+ if (edata.data) 4.1849 -+ dev->features |= NETIF_F_GSO; 4.1850 -+ else 4.1851 -+ dev->features &= ~NETIF_F_GSO; 4.1852 -+ return 0; 4.1853 -+} 4.1854 -+ 4.1855 - static int ethtool_self_test(struct net_device *dev, char __user *useraddr) 4.1856 - { 4.1857 - struct ethtool_test test; 4.1858 -@@ -906,6 +928,12 @@ int dev_ethtool(struct ifreq *ifr) 4.1859 - case ETHTOOL_SUFO: 4.1860 - rc = ethtool_set_ufo(dev, useraddr); 4.1861 - break; 4.1862 -+ case ETHTOOL_GGSO: 4.1863 -+ rc = ethtool_get_gso(dev, useraddr); 4.1864 -+ break; 4.1865 -+ case ETHTOOL_SGSO: 4.1866 -+ rc = ethtool_set_gso(dev, useraddr); 4.1867 -+ break; 4.1868 - default: 4.1869 - rc = -EOPNOTSUPP; 4.1870 - } 4.1871 -diff --git a/net/core/netpoll.c b/net/core/netpoll.c 4.1872 -index ea51f8d..ec28d3b 100644 4.1873 ---- a/net/core/netpoll.c 4.1874 -+++ b/net/core/netpoll.c 4.1875 -@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netp 4.1876 - 4.1877 - do { 4.1878 - npinfo->tries--; 4.1879 -- spin_lock(&np->dev->xmit_lock); 4.1880 -- np->dev->xmit_lock_owner = smp_processor_id(); 4.1881 -+ netif_tx_lock(np->dev); 4.1882 - 4.1883 - /* 4.1884 - * network drivers do not expect to be called if the queue is 4.1885 - * stopped. 4.1886 - */ 4.1887 - if (netif_queue_stopped(np->dev)) { 4.1888 -- np->dev->xmit_lock_owner = -1; 4.1889 -- spin_unlock(&np->dev->xmit_lock); 4.1890 -+ netif_tx_unlock(np->dev); 4.1891 - netpoll_poll(np); 4.1892 - udelay(50); 4.1893 - continue; 4.1894 - } 4.1895 - 4.1896 - status = np->dev->hard_start_xmit(skb, np->dev); 4.1897 -- np->dev->xmit_lock_owner = -1; 4.1898 -- spin_unlock(&np->dev->xmit_lock); 4.1899 -+ netif_tx_unlock(np->dev); 4.1900 - 4.1901 - /* success */ 4.1902 - if(!status) { 4.1903 -diff --git a/net/core/pktgen.c b/net/core/pktgen.c 4.1904 -index da16f8f..2380347 100644 4.1905 ---- a/net/core/pktgen.c 4.1906 -+++ b/net/core/pktgen.c 4.1907 -@@ -2582,7 +2582,7 @@ static __inline__ void pktgen_xmit(struc 4.1908 - } 4.1909 - } 4.1910 - 4.1911 -- spin_lock_bh(&odev->xmit_lock); 4.1912 -+ netif_tx_lock_bh(odev); 4.1913 - if (!netif_queue_stopped(odev)) { 4.1914 - 4.1915 - atomic_inc(&(pkt_dev->skb->users)); 4.1916 -@@ -2627,7 +2627,7 @@ retry_now: 4.1917 - pkt_dev->next_tx_ns = 0; 4.1918 - } 4.1919 - 4.1920 -- spin_unlock_bh(&odev->xmit_lock); 4.1921 -+ netif_tx_unlock_bh(odev); 4.1922 - 4.1923 - /* If pkt_dev->count is zero, then run forever */ 4.1924 - if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 4.1925 -diff --git a/net/core/skbuff.c b/net/core/skbuff.c 4.1926 -index 2144952..46f56af 100644 4.1927 ---- a/net/core/skbuff.c 4.1928 -+++ b/net/core/skbuff.c 4.1929 -@@ -164,9 +164,9 @@ struct sk_buff *__alloc_skb(unsigned int 4.1930 - shinfo = skb_shinfo(skb); 4.1931 - atomic_set(&shinfo->dataref, 1); 4.1932 - shinfo->nr_frags = 0; 4.1933 -- shinfo->tso_size = 0; 4.1934 -- shinfo->tso_segs = 0; 4.1935 -- shinfo->ufo_size = 0; 4.1936 -+ shinfo->gso_size = 0; 4.1937 -+ shinfo->gso_segs = 0; 4.1938 -+ shinfo->gso_type = 0; 4.1939 - shinfo->ip6_frag_id = 0; 4.1940 - shinfo->frag_list = NULL; 4.1941 - 4.1942 -@@ -230,8 +230,9 @@ struct sk_buff *alloc_skb_from_cache(kme 4.1943 - 4.1944 - atomic_set(&(skb_shinfo(skb)->dataref), 1); 4.1945 - skb_shinfo(skb)->nr_frags = 0; 4.1946 -- skb_shinfo(skb)->tso_size = 0; 4.1947 -- skb_shinfo(skb)->tso_segs = 0; 4.1948 -+ skb_shinfo(skb)->gso_size = 0; 4.1949 -+ skb_shinfo(skb)->gso_segs = 0; 4.1950 -+ skb_shinfo(skb)->gso_type = 0; 4.1951 - skb_shinfo(skb)->frag_list = NULL; 4.1952 - out: 4.1953 - return skb; 4.1954 -@@ -501,8 +502,9 @@ #endif 4.1955 - new->tc_index = old->tc_index; 4.1956 - #endif 4.1957 - atomic_set(&new->users, 1); 4.1958 -- skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size; 4.1959 -- skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs; 4.1960 -+ skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 4.1961 -+ skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 4.1962 -+ skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 4.1963 - } 4.1964 - 4.1965 - /** 4.1966 -@@ -1777,6 +1779,133 @@ int skb_append_datato_frags(struct sock 4.1967 - return 0; 4.1968 - } 4.1969 - 4.1970 -+/** 4.1971 -+ * skb_segment - Perform protocol segmentation on skb. 4.1972 -+ * @skb: buffer to segment 4.1973 -+ * @features: features for the output path (see dev->features) 4.1974 -+ * 4.1975 -+ * This function performs segmentation on the given skb. It returns 4.1976 -+ * the segment at the given position. It returns NULL if there are 4.1977 -+ * no more segments to generate, or when an error is encountered. 4.1978 -+ */ 4.1979 -+struct sk_buff *skb_segment(struct sk_buff *skb, int features) 4.1980 -+{ 4.1981 -+ struct sk_buff *segs = NULL; 4.1982 -+ struct sk_buff *tail = NULL; 4.1983 -+ unsigned int mss = skb_shinfo(skb)->gso_size; 4.1984 -+ unsigned int doffset = skb->data - skb->mac.raw; 4.1985 -+ unsigned int offset = doffset; 4.1986 -+ unsigned int headroom; 4.1987 -+ unsigned int len; 4.1988 -+ int sg = features & NETIF_F_SG; 4.1989 -+ int nfrags = skb_shinfo(skb)->nr_frags; 4.1990 -+ int err = -ENOMEM; 4.1991 -+ int i = 0; 4.1992 -+ int pos; 4.1993 -+ 4.1994 -+ __skb_push(skb, doffset); 4.1995 -+ headroom = skb_headroom(skb); 4.1996 -+ pos = skb_headlen(skb); 4.1997 -+ 4.1998 -+ do { 4.1999 -+ struct sk_buff *nskb; 4.2000 -+ skb_frag_t *frag; 4.2001 -+ int hsize, nsize; 4.2002 -+ int k; 4.2003 -+ int size; 4.2004 -+ 4.2005 -+ len = skb->len - offset; 4.2006 -+ if (len > mss) 4.2007 -+ len = mss; 4.2008 -+ 4.2009 -+ hsize = skb_headlen(skb) - offset; 4.2010 -+ if (hsize < 0) 4.2011 -+ hsize = 0; 4.2012 -+ nsize = hsize + doffset; 4.2013 -+ if (nsize > len + doffset || !sg) 4.2014 -+ nsize = len + doffset; 4.2015 -+ 4.2016 -+ nskb = alloc_skb(nsize + headroom, GFP_ATOMIC); 4.2017 -+ if (unlikely(!nskb)) 4.2018 -+ goto err; 4.2019 -+ 4.2020 -+ if (segs) 4.2021 -+ tail->next = nskb; 4.2022 -+ else 4.2023 -+ segs = nskb; 4.2024 -+ tail = nskb; 4.2025 -+ 4.2026 -+ nskb->dev = skb->dev; 4.2027 -+ nskb->priority = skb->priority; 4.2028 -+ nskb->protocol = skb->protocol; 4.2029 -+ nskb->dst = dst_clone(skb->dst); 4.2030 -+ memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4.2031 -+ nskb->pkt_type = skb->pkt_type; 4.2032 -+ nskb->mac_len = skb->mac_len; 4.2033 -+ 4.2034 -+ skb_reserve(nskb, headroom); 4.2035 -+ nskb->mac.raw = nskb->data; 4.2036 -+ nskb->nh.raw = nskb->data + skb->mac_len; 4.2037 -+ nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw); 4.2038 -+ memcpy(skb_put(nskb, doffset), skb->data, doffset); 4.2039 -+ 4.2040 -+ if (!sg) { 4.2041 -+ nskb->csum = skb_copy_and_csum_bits(skb, offset, 4.2042 -+ skb_put(nskb, len), 4.2043 -+ len, 0); 4.2044 -+ continue; 4.2045 -+ } 4.2046 -+ 4.2047 -+ frag = skb_shinfo(nskb)->frags; 4.2048 -+ k = 0; 4.2049 -+ 4.2050 -+ nskb->ip_summed = CHECKSUM_HW; 4.2051 -+ nskb->csum = skb->csum; 4.2052 -+ memcpy(skb_put(nskb, hsize), skb->data + offset, hsize); 4.2053 -+ 4.2054 -+ while (pos < offset + len) { 4.2055 -+ BUG_ON(i >= nfrags); 4.2056 -+ 4.2057 -+ *frag = skb_shinfo(skb)->frags[i]; 4.2058 -+ get_page(frag->page); 4.2059 -+ size = frag->size; 4.2060 -+ 4.2061 -+ if (pos < offset) { 4.2062 -+ frag->page_offset += offset - pos; 4.2063 -+ frag->size -= offset - pos; 4.2064 -+ } 4.2065 -+ 4.2066 -+ k++; 4.2067 -+ 4.2068 -+ if (pos + size <= offset + len) { 4.2069 -+ i++; 4.2070 -+ pos += size; 4.2071 -+ } else { 4.2072 -+ frag->size -= pos + size - (offset + len); 4.2073 -+ break; 4.2074 -+ } 4.2075 -+ 4.2076 -+ frag++; 4.2077 -+ } 4.2078 -+ 4.2079 -+ skb_shinfo(nskb)->nr_frags = k; 4.2080 -+ nskb->data_len = len - hsize; 4.2081 -+ nskb->len += nskb->data_len; 4.2082 -+ nskb->truesize += nskb->data_len; 4.2083 -+ } while ((offset += len) < skb->len); 4.2084 -+ 4.2085 -+ return segs; 4.2086 -+ 4.2087 -+err: 4.2088 -+ while ((skb = segs)) { 4.2089 -+ segs = skb->next; 4.2090 -+ kfree(skb); 4.2091 -+ } 4.2092 -+ return ERR_PTR(err); 4.2093 -+} 4.2094 -+ 4.2095 -+EXPORT_SYMBOL_GPL(skb_segment); 4.2096 -+ 4.2097 - void __init skb_init(void) 4.2098 - { 4.2099 - skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 4.2100 -diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c 4.2101 -index 44bda85..2e3323a 100644 4.2102 ---- a/net/decnet/dn_nsp_in.c 4.2103 -+++ b/net/decnet/dn_nsp_in.c 4.2104 -@@ -801,8 +801,7 @@ got_it: 4.2105 - * We linearize everything except data segments here. 4.2106 - */ 4.2107 - if (cb->nsp_flags & ~0x60) { 4.2108 -- if (unlikely(skb_is_nonlinear(skb)) && 4.2109 -- skb_linearize(skb, GFP_ATOMIC) != 0) 4.2110 -+ if (unlikely(skb_linearize(skb))) 4.2111 - goto free_out; 4.2112 - } 4.2113 - 4.2114 -diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c 4.2115 -index 3407f19..a0a25e0 100644 4.2116 ---- a/net/decnet/dn_route.c 4.2117 -+++ b/net/decnet/dn_route.c 4.2118 -@@ -629,8 +629,7 @@ int dn_route_rcv(struct sk_buff *skb, st 4.2119 - padlen); 4.2120 - 4.2121 - if (flags & DN_RT_PKT_CNTL) { 4.2122 -- if (unlikely(skb_is_nonlinear(skb)) && 4.2123 -- skb_linearize(skb, GFP_ATOMIC) != 0) 4.2124 -+ if (unlikely(skb_linearize(skb))) 4.2125 - goto dump_it; 4.2126 - 4.2127 - switch(flags & DN_RT_CNTL_MSK) { 4.2128 -diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c 4.2129 -index 97c276f..0a8c559 100644 4.2130 ---- a/net/ipv4/af_inet.c 4.2131 -+++ b/net/ipv4/af_inet.c 4.2132 -@@ -68,6 +68,7 @@ 4.2133 - */ 4.2134 - 4.2135 - #include <linux/config.h> 4.2136 -+#include <linux/err.h> 4.2137 - #include <linux/errno.h> 4.2138 - #include <linux/types.h> 4.2139 - #include <linux/socket.h> 4.2140 -@@ -1084,6 +1085,88 @@ int inet_sk_rebuild_header(struct sock * 4.2141 - 4.2142 - EXPORT_SYMBOL(inet_sk_rebuild_header); 4.2143 - 4.2144 -+static int inet_gso_send_check(struct sk_buff *skb) 4.2145 -+{ 4.2146 -+ struct iphdr *iph; 4.2147 -+ struct net_protocol *ops; 4.2148 -+ int proto; 4.2149 -+ int ihl; 4.2150 -+ int err = -EINVAL; 4.2151 -+ 4.2152 -+ if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) 4.2153 -+ goto out; 4.2154 -+ 4.2155 -+ iph = skb->nh.iph; 4.2156 -+ ihl = iph->ihl * 4; 4.2157 -+ if (ihl < sizeof(*iph)) 4.2158 -+ goto out; 4.2159 -+ 4.2160 -+ if (unlikely(!pskb_may_pull(skb, ihl))) 4.2161 -+ goto out; 4.2162 -+ 4.2163 -+ skb->h.raw = __skb_pull(skb, ihl); 4.2164 -+ iph = skb->nh.iph; 4.2165 -+ proto = iph->protocol & (MAX_INET_PROTOS - 1); 4.2166 -+ err = -EPROTONOSUPPORT; 4.2167 -+ 4.2168 -+ rcu_read_lock(); 4.2169 -+ ops = rcu_dereference(inet_protos[proto]); 4.2170 -+ if (likely(ops && ops->gso_send_check)) 4.2171 -+ err = ops->gso_send_check(skb); 4.2172 -+ rcu_read_unlock(); 4.2173 -+ 4.2174 -+out: 4.2175 -+ return err; 4.2176 -+} 4.2177 -+ 4.2178 -+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) 4.2179 -+{ 4.2180 -+ struct sk_buff *segs = ERR_PTR(-EINVAL); 4.2181 -+ struct iphdr *iph; 4.2182 -+ struct net_protocol *ops; 4.2183 -+ int proto; 4.2184 -+ int ihl; 4.2185 -+ int id; 4.2186 -+ 4.2187 -+ if (!pskb_may_pull(skb, sizeof(*iph))) 4.2188 -+ goto out; 4.2189 -+ 4.2190 -+ iph = skb->nh.iph; 4.2191 -+ ihl = iph->ihl * 4; 4.2192 -+ if (ihl < sizeof(*iph)) 4.2193 -+ goto out; 4.2194 -+ 4.2195 -+ if (!pskb_may_pull(skb, ihl)) 4.2196 -+ goto out; 4.2197 -+ 4.2198 -+ skb->h.raw = __skb_pull(skb, ihl); 4.2199 -+ iph = skb->nh.iph; 4.2200 -+ id = ntohs(iph->id); 4.2201 -+ proto = iph->protocol & (MAX_INET_PROTOS - 1); 4.2202 -+ segs = ERR_PTR(-EPROTONOSUPPORT); 4.2203 -+ 4.2204 -+ rcu_read_lock(); 4.2205 -+ ops = rcu_dereference(inet_protos[proto]); 4.2206 -+ if (ops && ops->gso_segment) 4.2207 -+ segs = ops->gso_segment(skb, features); 4.2208 -+ rcu_read_unlock(); 4.2209 -+ 4.2210 -+ if (!segs || unlikely(IS_ERR(segs))) 4.2211 -+ goto out; 4.2212 -+ 4.2213 -+ skb = segs; 4.2214 -+ do { 4.2215 -+ iph = skb->nh.iph; 4.2216 -+ iph->id = htons(id++); 4.2217 -+ iph->tot_len = htons(skb->len - skb->mac_len); 4.2218 -+ iph->check = 0; 4.2219 -+ iph->check = ip_fast_csum(skb->nh.raw, iph->ihl); 4.2220 -+ } while ((skb = skb->next)); 4.2221 -+ 4.2222 -+out: 4.2223 -+ return segs; 4.2224 -+} 4.2225 -+ 4.2226 - #ifdef CONFIG_IP_MULTICAST 4.2227 - static struct net_protocol igmp_protocol = { 4.2228 - .handler = igmp_rcv, 4.2229 -@@ -1093,6 +1176,8 @@ #endif 4.2230 - static struct net_protocol tcp_protocol = { 4.2231 - .handler = tcp_v4_rcv, 4.2232 - .err_handler = tcp_v4_err, 4.2233 -+ .gso_send_check = tcp_v4_gso_send_check, 4.2234 -+ .gso_segment = tcp_tso_segment, 4.2235 - .no_policy = 1, 4.2236 - }; 4.2237 - 4.2238 -@@ -1138,6 +1223,8 @@ static int ipv4_proc_init(void); 4.2239 - static struct packet_type ip_packet_type = { 4.2240 - .type = __constant_htons(ETH_P_IP), 4.2241 - .func = ip_rcv, 4.2242 -+ .gso_send_check = inet_gso_send_check, 4.2243 -+ .gso_segment = inet_gso_segment, 4.2244 - }; 4.2245 - 4.2246 - static int __init inet_init(void) 4.2247 -diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c 4.2248 -index 8dcba38..2de887c 100644 4.2249 ---- a/net/ipv4/ip_output.c 4.2250 -+++ b/net/ipv4/ip_output.c 4.2251 -@@ -210,8 +210,7 @@ #if defined(CONFIG_NETFILTER) && defined 4.2252 - return dst_output(skb); 4.2253 - } 4.2254 - #endif 4.2255 -- if (skb->len > dst_mtu(skb->dst) && 4.2256 -- !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) 4.2257 -+ if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) 4.2258 - return ip_fragment(skb, ip_finish_output2); 4.2259 - else 4.2260 - return ip_finish_output2(skb); 4.2261 -@@ -362,7 +361,7 @@ packet_routed: 4.2262 - } 4.2263 - 4.2264 - ip_select_ident_more(iph, &rt->u.dst, sk, 4.2265 -- (skb_shinfo(skb)->tso_segs ?: 1) - 1); 4.2266 -+ (skb_shinfo(skb)->gso_segs ?: 1) - 1); 4.2267 - 4.2268 - /* Add an IP checksum. */ 4.2269 - ip_send_check(iph); 4.2270 -@@ -743,7 +742,8 @@ static inline int ip_ufo_append_data(str 4.2271 - (length - transhdrlen)); 4.2272 - if (!err) { 4.2273 - /* specify the length of each IP datagram fragment*/ 4.2274 -- skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen); 4.2275 -+ skb_shinfo(skb)->gso_size = mtu - fragheaderlen; 4.2276 -+ skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; 4.2277 - __skb_queue_tail(&sk->sk_write_queue, skb); 4.2278 - 4.2279 - return 0; 4.2280 -@@ -839,7 +839,7 @@ int ip_append_data(struct sock *sk, 4.2281 - */ 4.2282 - if (transhdrlen && 4.2283 - length + fragheaderlen <= mtu && 4.2284 -- rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) && 4.2285 -+ rt->u.dst.dev->features & NETIF_F_ALL_CSUM && 4.2286 - !exthdrlen) 4.2287 - csummode = CHECKSUM_HW; 4.2288 - 4.2289 -@@ -1086,14 +1086,16 @@ ssize_t ip_append_page(struct sock *sk, 4.2290 - 4.2291 - inet->cork.length += size; 4.2292 - if ((sk->sk_protocol == IPPROTO_UDP) && 4.2293 -- (rt->u.dst.dev->features & NETIF_F_UFO)) 4.2294 -- skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen); 4.2295 -+ (rt->u.dst.dev->features & NETIF_F_UFO)) { 4.2296 -+ skb_shinfo(skb)->gso_size = mtu - fragheaderlen; 4.2297 -+ skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; 4.2298 -+ } 4.2299 - 4.2300 - 4.2301 - while (size > 0) { 4.2302 - int i; 4.2303 - 4.2304 -- if (skb_shinfo(skb)->ufo_size) 4.2305 -+ if (skb_is_gso(skb)) 4.2306 - len = size; 4.2307 - else { 4.2308 - 4.2309 -diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c 4.2310 -index d64e2ec..7494823 100644 4.2311 ---- a/net/ipv4/ipcomp.c 4.2312 -+++ b/net/ipv4/ipcomp.c 4.2313 -@@ -84,7 +84,7 @@ static int ipcomp_input(struct xfrm_stat 4.2314 - struct xfrm_decap_state *decap, struct sk_buff *skb) 4.2315 - { 4.2316 - u8 nexthdr; 4.2317 -- int err = 0; 4.2318 -+ int err = -ENOMEM; 4.2319 - struct iphdr *iph; 4.2320 - union { 4.2321 - struct iphdr iph; 4.2322 -@@ -92,11 +92,8 @@ static int ipcomp_input(struct xfrm_stat 4.2323 - } tmp_iph; 4.2324 - 4.2325 - 4.2326 -- if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && 4.2327 -- skb_linearize(skb, GFP_ATOMIC) != 0) { 4.2328 -- err = -ENOMEM; 4.2329 -+ if (skb_linearize_cow(skb)) 4.2330 - goto out; 4.2331 -- } 4.2332 - 4.2333 - skb->ip_summed = CHECKSUM_NONE; 4.2334 - 4.2335 -@@ -171,10 +168,8 @@ static int ipcomp_output(struct xfrm_sta 4.2336 - goto out_ok; 4.2337 - } 4.2338 - 4.2339 -- if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && 4.2340 -- skb_linearize(skb, GFP_ATOMIC) != 0) { 4.2341 -+ if (skb_linearize_cow(skb)) 4.2342 - goto out_ok; 4.2343 -- } 4.2344 - 4.2345 - err = ipcomp_compress(x, skb); 4.2346 - iph = skb->nh.iph; 4.2347 -diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c 4.2348 -index 00aa80e..30c81a8 100644 4.2349 ---- a/net/ipv4/tcp.c 4.2350 -+++ b/net/ipv4/tcp.c 4.2351 -@@ -257,6 +257,7 @@ #include <linux/smp_lock.h> 4.2352 - #include <linux/fs.h> 4.2353 - #include <linux/random.h> 4.2354 - #include <linux/bootmem.h> 4.2355 -+#include <linux/err.h> 4.2356 - 4.2357 - #include <net/icmp.h> 4.2358 - #include <net/tcp.h> 4.2359 -@@ -570,7 +571,7 @@ new_segment: 4.2360 - skb->ip_summed = CHECKSUM_HW; 4.2361 - tp->write_seq += copy; 4.2362 - TCP_SKB_CB(skb)->end_seq += copy; 4.2363 -- skb_shinfo(skb)->tso_segs = 0; 4.2364 -+ skb_shinfo(skb)->gso_segs = 0; 4.2365 - 4.2366 - if (!copied) 4.2367 - TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; 4.2368 -@@ -621,14 +622,10 @@ ssize_t tcp_sendpage(struct socket *sock 4.2369 - ssize_t res; 4.2370 - struct sock *sk = sock->sk; 4.2371 - 4.2372 --#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 4.2373 -- 4.2374 - if (!(sk->sk_route_caps & NETIF_F_SG) || 4.2375 -- !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS)) 4.2376 -+ !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) 4.2377 - return sock_no_sendpage(sock, page, offset, size, flags); 4.2378 - 4.2379 --#undef TCP_ZC_CSUM_FLAGS 4.2380 -- 4.2381 - lock_sock(sk); 4.2382 - TCP_CHECK_TIMER(sk); 4.2383 - res = do_tcp_sendpages(sk, &page, offset, size, flags); 4.2384 -@@ -725,9 +722,7 @@ new_segment: 4.2385 - /* 4.2386 - * Check whether we can use HW checksum. 4.2387 - */ 4.2388 -- if (sk->sk_route_caps & 4.2389 -- (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | 4.2390 -- NETIF_F_HW_CSUM)) 4.2391 -+ if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 4.2392 - skb->ip_summed = CHECKSUM_HW; 4.2393 - 4.2394 - skb_entail(sk, tp, skb); 4.2395 -@@ -823,7 +818,7 @@ new_segment: 4.2396 - 4.2397 - tp->write_seq += copy; 4.2398 - TCP_SKB_CB(skb)->end_seq += copy; 4.2399 -- skb_shinfo(skb)->tso_segs = 0; 4.2400 -+ skb_shinfo(skb)->gso_segs = 0; 4.2401 - 4.2402 - from += copy; 4.2403 - copied += copy; 4.2404 -@@ -2026,6 +2021,77 @@ int tcp_getsockopt(struct sock *sk, int 4.2405 - } 4.2406 - 4.2407 - 4.2408 -+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) 4.2409 -+{ 4.2410 -+ struct sk_buff *segs = ERR_PTR(-EINVAL); 4.2411 -+ struct tcphdr *th; 4.2412 -+ unsigned thlen; 4.2413 -+ unsigned int seq; 4.2414 -+ unsigned int delta; 4.2415 -+ unsigned int oldlen; 4.2416 -+ unsigned int len; 4.2417 -+ 4.2418 -+ if (!pskb_may_pull(skb, sizeof(*th))) 4.2419 -+ goto out; 4.2420 -+ 4.2421 -+ th = skb->h.th; 4.2422 -+ thlen = th->doff * 4; 4.2423 -+ if (thlen < sizeof(*th)) 4.2424 -+ goto out; 4.2425 -+ 4.2426 -+ if (!pskb_may_pull(skb, thlen)) 4.2427 -+ goto out; 4.2428 -+ 4.2429 -+ oldlen = (u16)~skb->len; 4.2430 -+ __skb_pull(skb, thlen); 4.2431 -+ 4.2432 -+ if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 4.2433 -+ /* Packet is from an untrusted source, reset gso_segs. */ 4.2434 -+ int mss = skb_shinfo(skb)->gso_size; 4.2435 -+ 4.2436 -+ skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; 4.2437 -+ 4.2438 -+ segs = NULL; 4.2439 -+ goto out; 4.2440 -+ } 4.2441 -+ 4.2442 -+ segs = skb_segment(skb, features); 4.2443 -+ if (IS_ERR(segs)) 4.2444 -+ goto out; 4.2445 -+ 4.2446 -+ len = skb_shinfo(skb)->gso_size; 4.2447 -+ delta = htonl(oldlen + (thlen + len)); 4.2448 -+ 4.2449 -+ skb = segs; 4.2450 -+ th = skb->h.th; 4.2451 -+ seq = ntohl(th->seq); 4.2452 -+ 4.2453 -+ do { 4.2454 -+ th->fin = th->psh = 0; 4.2455 -+ 4.2456 -+ th->check = ~csum_fold(th->check + delta); 4.2457 -+ if (skb->ip_summed != CHECKSUM_HW) 4.2458 -+ th->check = csum_fold(csum_partial(skb->h.raw, thlen, 4.2459 -+ skb->csum)); 4.2460 -+ 4.2461 -+ seq += len; 4.2462 -+ skb = skb->next; 4.2463 -+ th = skb->h.th; 4.2464 -+ 4.2465 -+ th->seq = htonl(seq); 4.2466 -+ th->cwr = 0; 4.2467 -+ } while (skb->next); 4.2468 -+ 4.2469 -+ delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); 4.2470 -+ th->check = ~csum_fold(th->check + delta); 4.2471 -+ if (skb->ip_summed != CHECKSUM_HW) 4.2472 -+ th->check = csum_fold(csum_partial(skb->h.raw, thlen, 4.2473 -+ skb->csum)); 4.2474 -+ 4.2475 -+out: 4.2476 -+ return segs; 4.2477 -+} 4.2478 -+ 4.2479 - extern void __skb_cb_too_small_for_tcp(int, int); 4.2480 - extern struct tcp_congestion_ops tcp_reno; 4.2481 - 4.2482 -diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c 4.2483 -index e9a54ae..defe77a 100644 4.2484 ---- a/net/ipv4/tcp_input.c 4.2485 -+++ b/net/ipv4/tcp_input.c 4.2486 -@@ -1072,7 +1072,7 @@ tcp_sacktag_write_queue(struct sock *sk, 4.2487 - else 4.2488 - pkt_len = (end_seq - 4.2489 - TCP_SKB_CB(skb)->seq); 4.2490 -- if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size)) 4.2491 -+ if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size)) 4.2492 - break; 4.2493 - pcount = tcp_skb_pcount(skb); 4.2494 - } 4.2495 -diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c 4.2496 -index 233bdf2..b4240b4 100644 4.2497 ---- a/net/ipv4/tcp_ipv4.c 4.2498 -+++ b/net/ipv4/tcp_ipv4.c 4.2499 -@@ -495,6 +495,24 @@ void tcp_v4_send_check(struct sock *sk, 4.2500 - } 4.2501 - } 4.2502 - 4.2503 -+int tcp_v4_gso_send_check(struct sk_buff *skb) 4.2504 -+{ 4.2505 -+ struct iphdr *iph; 4.2506 -+ struct tcphdr *th; 4.2507 -+ 4.2508 -+ if (!pskb_may_pull(skb, sizeof(*th))) 4.2509 -+ return -EINVAL; 4.2510 -+ 4.2511 -+ iph = skb->nh.iph; 4.2512 -+ th = skb->h.th; 4.2513 -+ 4.2514 -+ th->check = 0; 4.2515 -+ th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); 4.2516 -+ skb->csum = offsetof(struct tcphdr, check); 4.2517 -+ skb->ip_summed = CHECKSUM_HW; 4.2518 -+ return 0; 4.2519 -+} 4.2520 -+ 4.2521 - /* 4.2522 - * This routine will send an RST to the other tcp. 4.2523 - * 4.2524 -diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c 4.2525 -index 310f2e6..ee01f69 100644 4.2526 ---- a/net/ipv4/tcp_output.c 4.2527 -+++ b/net/ipv4/tcp_output.c 4.2528 -@@ -497,15 +497,17 @@ static void tcp_set_skb_tso_segs(struct 4.2529 - /* Avoid the costly divide in the normal 4.2530 - * non-TSO case. 4.2531 - */ 4.2532 -- skb_shinfo(skb)->tso_segs = 1; 4.2533 -- skb_shinfo(skb)->tso_size = 0; 4.2534 -+ skb_shinfo(skb)->gso_segs = 1; 4.2535 -+ skb_shinfo(skb)->gso_size = 0; 4.2536 -+ skb_shinfo(skb)->gso_type = 0; 4.2537 - } else { 4.2538 - unsigned int factor; 4.2539 - 4.2540 - factor = skb->len + (mss_now - 1); 4.2541 - factor /= mss_now; 4.2542 -- skb_shinfo(skb)->tso_segs = factor; 4.2543 -- skb_shinfo(skb)->tso_size = mss_now; 4.2544 -+ skb_shinfo(skb)->gso_segs = factor; 4.2545 -+ skb_shinfo(skb)->gso_size = mss_now; 4.2546 -+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 4.2547 - } 4.2548 - } 4.2549 - 4.2550 -@@ -850,7 +852,7 @@ static int tcp_init_tso_segs(struct sock 4.2551 - 4.2552 - if (!tso_segs || 4.2553 - (tso_segs > 1 && 4.2554 -- skb_shinfo(skb)->tso_size != mss_now)) { 4.2555 -+ tcp_skb_mss(skb) != mss_now)) { 4.2556 - tcp_set_skb_tso_segs(sk, skb, mss_now); 4.2557 - tso_segs = tcp_skb_pcount(skb); 4.2558 - } 4.2559 -@@ -1510,8 +1512,9 @@ int tcp_retransmit_skb(struct sock *sk, 4.2560 - tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 4.2561 - if (!pskb_trim(skb, 0)) { 4.2562 - TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; 4.2563 -- skb_shinfo(skb)->tso_segs = 1; 4.2564 -- skb_shinfo(skb)->tso_size = 0; 4.2565 -+ skb_shinfo(skb)->gso_segs = 1; 4.2566 -+ skb_shinfo(skb)->gso_size = 0; 4.2567 -+ skb_shinfo(skb)->gso_type = 0; 4.2568 - skb->ip_summed = CHECKSUM_NONE; 4.2569 - skb->csum = 0; 4.2570 - } 4.2571 -@@ -1716,8 +1719,9 @@ void tcp_send_fin(struct sock *sk) 4.2572 - skb->csum = 0; 4.2573 - TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 4.2574 - TCP_SKB_CB(skb)->sacked = 0; 4.2575 -- skb_shinfo(skb)->tso_segs = 1; 4.2576 -- skb_shinfo(skb)->tso_size = 0; 4.2577 -+ skb_shinfo(skb)->gso_segs = 1; 4.2578 -+ skb_shinfo(skb)->gso_size = 0; 4.2579 -+ skb_shinfo(skb)->gso_type = 0; 4.2580 - 4.2581 - /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 4.2582 - TCP_SKB_CB(skb)->seq = tp->write_seq; 4.2583 -@@ -1749,8 +1753,9 @@ void tcp_send_active_reset(struct sock * 4.2584 - skb->csum = 0; 4.2585 - TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 4.2586 - TCP_SKB_CB(skb)->sacked = 0; 4.2587 -- skb_shinfo(skb)->tso_segs = 1; 4.2588 -- skb_shinfo(skb)->tso_size = 0; 4.2589 -+ skb_shinfo(skb)->gso_segs = 1; 4.2590 -+ skb_shinfo(skb)->gso_size = 0; 4.2591 -+ skb_shinfo(skb)->gso_type = 0; 4.2592 - 4.2593 - /* Send it off. */ 4.2594 - TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); 4.2595 -@@ -1833,8 +1838,9 @@ struct sk_buff * tcp_make_synack(struct 4.2596 - TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; 4.2597 - TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 4.2598 - TCP_SKB_CB(skb)->sacked = 0; 4.2599 -- skb_shinfo(skb)->tso_segs = 1; 4.2600 -- skb_shinfo(skb)->tso_size = 0; 4.2601 -+ skb_shinfo(skb)->gso_segs = 1; 4.2602 -+ skb_shinfo(skb)->gso_size = 0; 4.2603 -+ skb_shinfo(skb)->gso_type = 0; 4.2604 - th->seq = htonl(TCP_SKB_CB(skb)->seq); 4.2605 - th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 4.2606 - if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 4.2607 -@@ -1937,8 +1943,9 @@ int tcp_connect(struct sock *sk) 4.2608 - TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; 4.2609 - TCP_ECN_send_syn(sk, tp, buff); 4.2610 - TCP_SKB_CB(buff)->sacked = 0; 4.2611 -- skb_shinfo(buff)->tso_segs = 1; 4.2612 -- skb_shinfo(buff)->tso_size = 0; 4.2613 -+ skb_shinfo(buff)->gso_segs = 1; 4.2614 -+ skb_shinfo(buff)->gso_size = 0; 4.2615 -+ skb_shinfo(buff)->gso_type = 0; 4.2616 - buff->csum = 0; 4.2617 - TCP_SKB_CB(buff)->seq = tp->write_seq++; 4.2618 - TCP_SKB_CB(buff)->end_seq = tp->write_seq; 4.2619 -@@ -2042,8 +2049,9 @@ void tcp_send_ack(struct sock *sk) 4.2620 - buff->csum = 0; 4.2621 - TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; 4.2622 - TCP_SKB_CB(buff)->sacked = 0; 4.2623 -- skb_shinfo(buff)->tso_segs = 1; 4.2624 -- skb_shinfo(buff)->tso_size = 0; 4.2625 -+ skb_shinfo(buff)->gso_segs = 1; 4.2626 -+ skb_shinfo(buff)->gso_size = 0; 4.2627 -+ skb_shinfo(buff)->gso_type = 0; 4.2628 - 4.2629 - /* Send it off, this clears delayed acks for us. */ 4.2630 - TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); 4.2631 -@@ -2078,8 +2086,9 @@ static int tcp_xmit_probe_skb(struct soc 4.2632 - skb->csum = 0; 4.2633 - TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; 4.2634 - TCP_SKB_CB(skb)->sacked = urgent; 4.2635 -- skb_shinfo(skb)->tso_segs = 1; 4.2636 -- skb_shinfo(skb)->tso_size = 0; 4.2637 -+ skb_shinfo(skb)->gso_segs = 1; 4.2638 -+ skb_shinfo(skb)->gso_size = 0; 4.2639 -+ skb_shinfo(skb)->gso_type = 0; 4.2640 - 4.2641 - /* Use a previous sequence. This should cause the other 4.2642 - * end to send an ack. Don't queue or clone SKB, just 4.2643 -diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c 4.2644 -index 32ad229..62ead52 100644 4.2645 ---- a/net/ipv4/xfrm4_output.c 4.2646 -+++ b/net/ipv4/xfrm4_output.c 4.2647 -@@ -9,6 +9,8 @@ 4.2648 - */ 4.2649 - 4.2650 - #include <linux/compiler.h> 4.2651 -+#include <linux/if_ether.h> 4.2652 -+#include <linux/kernel.h> 4.2653 - #include <linux/skbuff.h> 4.2654 - #include <linux/spinlock.h> 4.2655 - #include <linux/netfilter_ipv4.h> 4.2656 -@@ -152,16 +154,10 @@ error_nolock: 4.2657 - goto out_exit; 4.2658 - } 4.2659 - 4.2660 --static int xfrm4_output_finish(struct sk_buff *skb) 4.2661 -+static int xfrm4_output_finish2(struct sk_buff *skb) 4.2662 - { 4.2663 - int err; 4.2664 - 4.2665 --#ifdef CONFIG_NETFILTER 4.2666 -- if (!skb->dst->xfrm) { 4.2667 -- IPCB(skb)->flags |= IPSKB_REROUTED; 4.2668 -- return dst_output(skb); 4.2669 -- } 4.2670 --#endif 4.2671 - while (likely((err = xfrm4_output_one(skb)) == 0)) { 4.2672 - nf_reset(skb); 4.2673 - 4.2674 -@@ -174,7 +170,7 @@ #endif 4.2675 - return dst_output(skb); 4.2676 - 4.2677 - err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL, 4.2678 -- skb->dst->dev, xfrm4_output_finish); 4.2679 -+ skb->dst->dev, xfrm4_output_finish2); 4.2680 - if (unlikely(err != 1)) 4.2681 - break; 4.2682 - } 4.2683 -@@ -182,6 +178,48 @@ #endif 4.2684 - return err; 4.2685 - } 4.2686 - 4.2687 -+static int xfrm4_output_finish(struct sk_buff *skb) 4.2688 -+{ 4.2689 -+ struct sk_buff *segs; 4.2690 -+ 4.2691 -+#ifdef CONFIG_NETFILTER 4.2692 -+ if (!skb->dst->xfrm) { 4.2693 -+ IPCB(skb)->flags |= IPSKB_REROUTED; 4.2694 -+ return dst_output(skb); 4.2695 -+ } 4.2696 -+#endif 4.2697 -+ 4.2698 -+ if (!skb_is_gso(skb)) 4.2699 -+ return xfrm4_output_finish2(skb); 4.2700 -+ 4.2701 -+ skb->protocol = htons(ETH_P_IP); 4.2702 -+ segs = skb_gso_segment(skb, 0); 4.2703 -+ kfree_skb(skb); 4.2704 -+ if (unlikely(IS_ERR(segs))) 4.2705 -+ return PTR_ERR(segs); 4.2706 -+ 4.2707 -+ do { 4.2708 -+ struct sk_buff *nskb = segs->next; 4.2709 -+ int err; 4.2710 -+ 4.2711 -+ segs->next = NULL; 4.2712 -+ err = xfrm4_output_finish2(segs); 4.2713 -+ 4.2714 -+ if (unlikely(err)) { 4.2715 -+ while ((segs = nskb)) { 4.2716 -+ nskb = segs->next; 4.2717 -+ segs->next = NULL; 4.2718 -+ kfree_skb(segs); 4.2719 -+ } 4.2720 -+ return err; 4.2721 -+ } 4.2722 -+ 4.2723 -+ segs = nskb; 4.2724 -+ } while (segs); 4.2725 -+ 4.2726 -+ return 0; 4.2727 -+} 4.2728 -+ 4.2729 - int xfrm4_output(struct sk_buff *skb) 4.2730 - { 4.2731 - return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev, 4.2732 -diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c 4.2733 -index 5bf70b1..33a5850 100644 4.2734 ---- a/net/ipv6/ip6_output.c 4.2735 -+++ b/net/ipv6/ip6_output.c 4.2736 -@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s 4.2737 - 4.2738 - int ip6_output(struct sk_buff *skb) 4.2739 - { 4.2740 -- if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) || 4.2741 -+ if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) || 4.2742 - dst_allfrag(skb->dst)) 4.2743 - return ip6_fragment(skb, ip6_output2); 4.2744 - else 4.2745 -@@ -829,8 +829,9 @@ static inline int ip6_ufo_append_data(st 4.2746 - struct frag_hdr fhdr; 4.2747 - 4.2748 - /* specify the length of each IP datagram fragment*/ 4.2749 -- skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) - 4.2750 -- sizeof(struct frag_hdr); 4.2751 -+ skb_shinfo(skb)->gso_size = mtu - fragheaderlen - 4.2752 -+ sizeof(struct frag_hdr); 4.2753 -+ skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; 4.2754 - ipv6_select_ident(skb, &fhdr); 4.2755 - skb_shinfo(skb)->ip6_frag_id = fhdr.identification; 4.2756 - __skb_queue_tail(&sk->sk_write_queue, skb); 4.2757 -diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c 4.2758 -index d511a88..ef56d5d 100644 4.2759 ---- a/net/ipv6/ipcomp6.c 4.2760 -+++ b/net/ipv6/ipcomp6.c 4.2761 -@@ -64,7 +64,7 @@ static LIST_HEAD(ipcomp6_tfms_list); 4.2762 - 4.2763 - static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) 4.2764 - { 4.2765 -- int err = 0; 4.2766 -+ int err = -ENOMEM; 4.2767 - u8 nexthdr = 0; 4.2768 - int hdr_len = skb->h.raw - skb->nh.raw; 4.2769 - unsigned char *tmp_hdr = NULL; 4.2770 -@@ -75,11 +75,8 @@ static int ipcomp6_input(struct xfrm_sta 4.2771 - struct crypto_tfm *tfm; 4.2772 - int cpu; 4.2773 - 4.2774 -- if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && 4.2775 -- skb_linearize(skb, GFP_ATOMIC) != 0) { 4.2776 -- err = -ENOMEM; 4.2777 -+ if (skb_linearize_cow(skb)) 4.2778 - goto out; 4.2779 -- } 4.2780 - 4.2781 - skb->ip_summed = CHECKSUM_NONE; 4.2782 - 4.2783 -@@ -158,10 +155,8 @@ static int ipcomp6_output(struct xfrm_st 4.2784 - goto out_ok; 4.2785 - } 4.2786 - 4.2787 -- if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && 4.2788 -- skb_linearize(skb, GFP_ATOMIC) != 0) { 4.2789 -+ if (skb_linearize_cow(skb)) 4.2790 - goto out_ok; 4.2791 -- } 4.2792 - 4.2793 - /* compression */ 4.2794 - plen = skb->len - hdr_len; 4.2795 -diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c 4.2796 -index 8024217..e9ea338 100644 4.2797 ---- a/net/ipv6/xfrm6_output.c 4.2798 -+++ b/net/ipv6/xfrm6_output.c 4.2799 -@@ -151,7 +151,7 @@ error_nolock: 4.2800 - goto out_exit; 4.2801 - } 4.2802 - 4.2803 --static int xfrm6_output_finish(struct sk_buff *skb) 4.2804 -+static int xfrm6_output_finish2(struct sk_buff *skb) 4.2805 - { 4.2806 - int err; 4.2807 - 4.2808 -@@ -167,7 +167,7 @@ static int xfrm6_output_finish(struct sk 4.2809 - return dst_output(skb); 4.2810 - 4.2811 - err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL, 4.2812 -- skb->dst->dev, xfrm6_output_finish); 4.2813 -+ skb->dst->dev, xfrm6_output_finish2); 4.2814 - if (unlikely(err != 1)) 4.2815 - break; 4.2816 - } 4.2817 -@@ -175,6 +175,41 @@ static int xfrm6_output_finish(struct sk 4.2818 - return err; 4.2819 - } 4.2820 - 4.2821 -+static int xfrm6_output_finish(struct sk_buff *skb) 4.2822 -+{ 4.2823 -+ struct sk_buff *segs; 4.2824 -+ 4.2825 -+ if (!skb_is_gso(skb)) 4.2826 -+ return xfrm6_output_finish2(skb); 4.2827 -+ 4.2828 -+ skb->protocol = htons(ETH_P_IP); 4.2829 -+ segs = skb_gso_segment(skb, 0); 4.2830 -+ kfree_skb(skb); 4.2831 -+ if (unlikely(IS_ERR(segs))) 4.2832 -+ return PTR_ERR(segs); 4.2833 -+ 4.2834 -+ do { 4.2835 -+ struct sk_buff *nskb = segs->next; 4.2836 -+ int err; 4.2837 -+ 4.2838 -+ segs->next = NULL; 4.2839 -+ err = xfrm6_output_finish2(segs); 4.2840 -+ 4.2841 -+ if (unlikely(err)) { 4.2842 -+ while ((segs = nskb)) { 4.2843 -+ nskb = segs->next; 4.2844 -+ segs->next = NULL; 4.2845 -+ kfree_skb(segs); 4.2846 -+ } 4.2847 -+ return err; 4.2848 -+ } 4.2849 -+ 4.2850 -+ segs = nskb; 4.2851 -+ } while (segs); 4.2852 -+ 4.2853 -+ return 0; 4.2854 -+} 4.2855 -+ 4.2856 - int xfrm6_output(struct sk_buff *skb) 4.2857 - { 4.2858 - return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dst->dev, 4.2859 -diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c 4.2860 -index 99ceb91..28c9efd 100644 4.2861 ---- a/net/sched/sch_generic.c 4.2862 -+++ b/net/sched/sch_generic.c 4.2863 -@@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device 4.2864 - dev->queue_lock serializes queue accesses for this device 4.2865 - AND dev->qdisc pointer itself. 4.2866 - 4.2867 -- dev->xmit_lock serializes accesses to device driver. 4.2868 -+ netif_tx_lock serializes accesses to device driver. 4.2869 - 4.2870 -- dev->queue_lock and dev->xmit_lock are mutually exclusive, 4.2871 -+ dev->queue_lock and netif_tx_lock are mutually exclusive, 4.2872 - if one is grabbed, another must be free. 4.2873 - */ 4.2874 - 4.2875 -@@ -90,14 +90,17 @@ void qdisc_unlock_tree(struct net_device 4.2876 - NOTE: Called under dev->queue_lock with locally disabled BH. 4.2877 - */ 4.2878 - 4.2879 --int qdisc_restart(struct net_device *dev) 4.2880 -+static inline int qdisc_restart(struct net_device *dev) 4.2881 - { 4.2882 - struct Qdisc *q = dev->qdisc; 4.2883 - struct sk_buff *skb; 4.2884 - 4.2885 - /* Dequeue packet */ 4.2886 -- if ((skb = q->dequeue(q)) != NULL) { 4.2887 -+ if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) { 4.2888 - unsigned nolock = (dev->features & NETIF_F_LLTX); 4.2889 -+ 4.2890 -+ dev->gso_skb = NULL; 4.2891 -+ 4.2892 - /* 4.2893 - * When the driver has LLTX set it does its own locking 4.2894 - * in start_xmit. No need to add additional overhead by 4.2895 -@@ -108,7 +111,7 @@ int qdisc_restart(struct net_device *dev 4.2896 - * will be requeued. 4.2897 - */ 4.2898 - if (!nolock) { 4.2899 -- if (!spin_trylock(&dev->xmit_lock)) { 4.2900 -+ if (!netif_tx_trylock(dev)) { 4.2901 - collision: 4.2902 - /* So, someone grabbed the driver. */ 4.2903 - 4.2904 -@@ -126,8 +129,6 @@ int qdisc_restart(struct net_device *dev 4.2905 - __get_cpu_var(netdev_rx_stat).cpu_collision++; 4.2906 - goto requeue; 4.2907 - } 4.2908 -- /* Remember that the driver is grabbed by us. */ 4.2909 -- dev->xmit_lock_owner = smp_processor_id(); 4.2910 - } 4.2911 - 4.2912 - { 4.2913 -@@ -136,14 +137,11 @@ int qdisc_restart(struct net_device *dev 4.2914 - 4.2915 - if (!netif_queue_stopped(dev)) { 4.2916 - int ret; 4.2917 -- if (netdev_nit) 4.2918 -- dev_queue_xmit_nit(skb, dev); 4.2919 - 4.2920 -- ret = dev->hard_start_xmit(skb, dev); 4.2921 -+ ret = dev_hard_start_xmit(skb, dev); 4.2922 - if (ret == NETDEV_TX_OK) { 4.2923 - if (!nolock) { 4.2924 -- dev->xmit_lock_owner = -1; 4.2925 -- spin_unlock(&dev->xmit_lock); 4.2926 -+ netif_tx_unlock(dev); 4.2927 - } 4.2928 - spin_lock(&dev->queue_lock); 4.2929 - return -1; 4.2930 -@@ -157,8 +155,7 @@ int qdisc_restart(struct net_device *dev 4.2931 - /* NETDEV_TX_BUSY - we need to requeue */ 4.2932 - /* Release the driver */ 4.2933 - if (!nolock) { 4.2934 -- dev->xmit_lock_owner = -1; 4.2935 -- spin_unlock(&dev->xmit_lock); 4.2936 -+ netif_tx_unlock(dev); 4.2937 - } 4.2938 - spin_lock(&dev->queue_lock); 4.2939 - q = dev->qdisc; 4.2940 -@@ -175,7 +172,10 @@ int qdisc_restart(struct net_device *dev 4.2941 - */ 4.2942 - 4.2943 - requeue: 4.2944 -- q->ops->requeue(skb, q); 4.2945 -+ if (skb->next) 4.2946 -+ dev->gso_skb = skb; 4.2947 -+ else 4.2948 -+ q->ops->requeue(skb, q); 4.2949 - netif_schedule(dev); 4.2950 - return 1; 4.2951 - } 4.2952 -@@ -183,11 +183,23 @@ requeue: 4.2953 - return q->q.qlen; 4.2954 - } 4.2955 - 4.2956 -+void __qdisc_run(struct net_device *dev) 4.2957 -+{ 4.2958 -+ if (unlikely(dev->qdisc == &noop_qdisc)) 4.2959 -+ goto out; 4.2960 -+ 4.2961 -+ while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev)) 4.2962 -+ /* NOTHING */; 4.2963 -+ 4.2964 -+out: 4.2965 -+ clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 4.2966 -+} 4.2967 -+ 4.2968 - static void dev_watchdog(unsigned long arg) 4.2969 - { 4.2970 - struct net_device *dev = (struct net_device *)arg; 4.2971 - 4.2972 -- spin_lock(&dev->xmit_lock); 4.2973 -+ netif_tx_lock(dev); 4.2974 - if (dev->qdisc != &noop_qdisc) { 4.2975 - if (netif_device_present(dev) && 4.2976 - netif_running(dev) && 4.2977 -@@ -201,7 +213,7 @@ static void dev_watchdog(unsigned long a 4.2978 - dev_hold(dev); 4.2979 - } 4.2980 - } 4.2981 -- spin_unlock(&dev->xmit_lock); 4.2982 -+ netif_tx_unlock(dev); 4.2983 - 4.2984 - dev_put(dev); 4.2985 - } 4.2986 -@@ -225,17 +237,17 @@ void __netdev_watchdog_up(struct net_dev 4.2987 - 4.2988 - static void dev_watchdog_up(struct net_device *dev) 4.2989 - { 4.2990 -- spin_lock_bh(&dev->xmit_lock); 4.2991 -+ netif_tx_lock_bh(dev); 4.2992 - __netdev_watchdog_up(dev); 4.2993 -- spin_unlock_bh(&dev->xmit_lock); 4.2994 -+ netif_tx_unlock_bh(dev); 4.2995 - } 4.2996 - 4.2997 - static void dev_watchdog_down(struct net_device *dev) 4.2998 - { 4.2999 -- spin_lock_bh(&dev->xmit_lock); 4.3000 -+ netif_tx_lock_bh(dev); 4.3001 - if (del_timer(&dev->watchdog_timer)) 4.3002 - __dev_put(dev); 4.3003 -- spin_unlock_bh(&dev->xmit_lock); 4.3004 -+ netif_tx_unlock_bh(dev); 4.3005 - } 4.3006 - 4.3007 - void netif_carrier_on(struct net_device *dev) 4.3008 -@@ -577,10 +589,17 @@ void dev_deactivate(struct net_device *d 4.3009 - 4.3010 - dev_watchdog_down(dev); 4.3011 - 4.3012 -- while (test_bit(__LINK_STATE_SCHED, &dev->state)) 4.3013 -+ /* Wait for outstanding dev_queue_xmit calls. */ 4.3014 -+ synchronize_rcu(); 4.3015 -+ 4.3016 -+ /* Wait for outstanding qdisc_run calls. */ 4.3017 -+ while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 4.3018 - yield(); 4.3019 - 4.3020 -- spin_unlock_wait(&dev->xmit_lock); 4.3021 -+ if (dev->gso_skb) { 4.3022 -+ kfree_skb(dev->gso_skb); 4.3023 -+ dev->gso_skb = NULL; 4.3024 -+ } 4.3025 - } 4.3026 - 4.3027 - void dev_init_scheduler(struct net_device *dev) 4.3028 -@@ -622,6 +641,5 @@ EXPORT_SYMBOL(qdisc_create_dflt); 4.3029 - EXPORT_SYMBOL(qdisc_alloc); 4.3030 - EXPORT_SYMBOL(qdisc_destroy); 4.3031 - EXPORT_SYMBOL(qdisc_reset); 4.3032 --EXPORT_SYMBOL(qdisc_restart); 4.3033 - EXPORT_SYMBOL(qdisc_lock_tree); 4.3034 - EXPORT_SYMBOL(qdisc_unlock_tree); 4.3035 -diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c 4.3036 -index 79b8ef3..4c16ad5 100644 4.3037 ---- a/net/sched/sch_teql.c 4.3038 -+++ b/net/sched/sch_teql.c 4.3039 -@@ -302,20 +302,17 @@ restart: 4.3040 - 4.3041 - switch (teql_resolve(skb, skb_res, slave)) { 4.3042 - case 0: 4.3043 -- if (spin_trylock(&slave->xmit_lock)) { 4.3044 -- slave->xmit_lock_owner = smp_processor_id(); 4.3045 -+ if (netif_tx_trylock(slave)) { 4.3046 - if (!netif_queue_stopped(slave) && 4.3047 - slave->hard_start_xmit(skb, slave) == 0) { 4.3048 -- slave->xmit_lock_owner = -1; 4.3049 -- spin_unlock(&slave->xmit_lock); 4.3050 -+ netif_tx_unlock(slave); 4.3051 - master->slaves = NEXT_SLAVE(q); 4.3052 - netif_wake_queue(dev); 4.3053 - master->stats.tx_packets++; 4.3054 - master->stats.tx_bytes += len; 4.3055 - return 0; 4.3056 - } 4.3057 -- slave->xmit_lock_owner = -1; 4.3058 -- spin_unlock(&slave->xmit_lock); 4.3059 -+ netif_tx_unlock(slave); 4.3060 - } 4.3061 - if (netif_queue_stopped(dev)) 4.3062 - busy = 1;