direct-io.hg

view patches/linux-2.6.16.33/net-gso-0-base.patch @ 12988:e080700efa56

[TOOLS] Fix the build. Clearly demarcate PPC-specific stuff.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Dec 13 10:23:53 2006 +0000 (2006-12-13)
parents 6c2c7ff6265a
children
line source
1 Index: tmp-xxx/Documentation/networking/netdevices.txt
2 ===================================================================
3 --- tmp-xxx.orig/Documentation/networking/netdevices.txt 2006-11-15 10:38:39.000000000 +0000
4 +++ tmp-xxx/Documentation/networking/netdevices.txt 2006-11-27 10:52:42.000000000 +0000
5 @@ -42,9 +42,9 @@
6 Context: nominally process, but don't sleep inside an rwlock
8 dev->hard_start_xmit:
9 - Synchronization: dev->xmit_lock spinlock.
10 + Synchronization: netif_tx_lock spinlock.
11 When the driver sets NETIF_F_LLTX in dev->features this will be
12 - called without holding xmit_lock. In this case the driver
13 + called without holding netif_tx_lock. In this case the driver
14 has to lock by itself when needed. It is recommended to use a try lock
15 for this and return -1 when the spin lock fails.
16 The locking there should also properly protect against
17 @@ -62,12 +62,12 @@
18 Only valid when NETIF_F_LLTX is set.
20 dev->tx_timeout:
21 - Synchronization: dev->xmit_lock spinlock.
22 + Synchronization: netif_tx_lock spinlock.
23 Context: BHs disabled
24 Notes: netif_queue_stopped() is guaranteed true
26 dev->set_multicast_list:
27 - Synchronization: dev->xmit_lock spinlock.
28 + Synchronization: netif_tx_lock spinlock.
29 Context: BHs disabled
31 dev->poll:
32 Index: tmp-xxx/drivers/block/aoe/aoenet.c
33 ===================================================================
34 --- tmp-xxx.orig/drivers/block/aoe/aoenet.c 2006-11-15 10:38:39.000000000 +0000
35 +++ tmp-xxx/drivers/block/aoe/aoenet.c 2006-11-27 10:52:42.000000000 +0000
36 @@ -95,9 +95,8 @@
37 static struct sk_buff *
38 skb_check(struct sk_buff *skb)
39 {
40 - if (skb_is_nonlinear(skb))
41 if ((skb = skb_share_check(skb, GFP_ATOMIC)))
42 - if (skb_linearize(skb, GFP_ATOMIC) < 0) {
43 + if (skb_linearize(skb)) {
44 dev_kfree_skb(skb);
45 return NULL;
46 }
47 Index: tmp-xxx/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
48 ===================================================================
49 --- tmp-xxx.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2006-11-15 10:38:39.000000000 +0000
50 +++ tmp-xxx/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2006-11-27 10:52:42.000000000 +0000
51 @@ -821,7 +821,8 @@
53 ipoib_mcast_stop_thread(dev, 0);
55 - spin_lock_irqsave(&dev->xmit_lock, flags);
56 + local_irq_save(flags);
57 + netif_tx_lock(dev);
58 spin_lock(&priv->lock);
60 /*
61 @@ -896,7 +897,8 @@
62 }
64 spin_unlock(&priv->lock);
65 - spin_unlock_irqrestore(&dev->xmit_lock, flags);
66 + netif_tx_unlock(dev);
67 + local_irq_restore(flags);
69 /* We have to cancel outside of the spinlock */
70 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
71 Index: tmp-xxx/drivers/media/dvb/dvb-core/dvb_net.c
72 ===================================================================
73 --- tmp-xxx.orig/drivers/media/dvb/dvb-core/dvb_net.c 2006-11-15 10:38:39.000000000 +0000
74 +++ tmp-xxx/drivers/media/dvb/dvb-core/dvb_net.c 2006-11-27 10:52:42.000000000 +0000
75 @@ -1053,7 +1053,7 @@
77 dvb_net_feed_stop(dev);
78 priv->rx_mode = RX_MODE_UNI;
79 - spin_lock_bh(&dev->xmit_lock);
80 + netif_tx_lock_bh(dev);
82 if (dev->flags & IFF_PROMISC) {
83 dprintk("%s: promiscuous mode\n", dev->name);
84 @@ -1078,7 +1078,7 @@
85 }
86 }
88 - spin_unlock_bh(&dev->xmit_lock);
89 + netif_tx_unlock_bh(dev);
90 dvb_net_feed_start(dev);
91 }
93 Index: tmp-xxx/drivers/net/8139cp.c
94 ===================================================================
95 --- tmp-xxx.orig/drivers/net/8139cp.c 2006-11-15 10:38:39.000000000 +0000
96 +++ tmp-xxx/drivers/net/8139cp.c 2006-11-27 10:52:42.000000000 +0000
97 @@ -794,7 +794,7 @@
98 entry = cp->tx_head;
99 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
100 if (dev->features & NETIF_F_TSO)
101 - mss = skb_shinfo(skb)->tso_size;
102 + mss = skb_shinfo(skb)->gso_size;
104 if (skb_shinfo(skb)->nr_frags == 0) {
105 struct cp_desc *txd = &cp->tx_ring[entry];
106 Index: tmp-xxx/drivers/net/bnx2.c
107 ===================================================================
108 --- tmp-xxx.orig/drivers/net/bnx2.c 2006-11-15 10:38:39.000000000 +0000
109 +++ tmp-xxx/drivers/net/bnx2.c 2006-11-27 10:52:42.000000000 +0000
110 @@ -1593,7 +1593,7 @@
111 skb = tx_buf->skb;
112 #ifdef BCM_TSO
113 /* partial BD completions possible with TSO packets */
114 - if (skb_shinfo(skb)->tso_size) {
115 + if (skb_shinfo(skb)->gso_size) {
116 u16 last_idx, last_ring_idx;
118 last_idx = sw_cons +
119 @@ -1948,7 +1948,7 @@
120 return 1;
121 }
123 -/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
124 +/* Called with rtnl_lock from vlan functions and also netif_tx_lock
125 * from set_multicast.
126 */
127 static void
128 @@ -4403,7 +4403,7 @@
129 }
130 #endif
132 -/* Called with dev->xmit_lock.
133 +/* Called with netif_tx_lock.
134 * hard_start_xmit is pseudo-lockless - a lock is only required when
135 * the tx queue is full. This way, we get the benefit of lockless
136 * operations most of the time without the complexities to handle
137 @@ -4441,7 +4441,7 @@
138 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
139 }
140 #ifdef BCM_TSO
141 - if ((mss = skb_shinfo(skb)->tso_size) &&
142 + if ((mss = skb_shinfo(skb)->gso_size) &&
143 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
144 u32 tcp_opt_len, ip_tcp_len;
146 Index: tmp-xxx/drivers/net/bonding/bond_main.c
147 ===================================================================
148 --- tmp-xxx.orig/drivers/net/bonding/bond_main.c 2006-11-15 10:38:39.000000000 +0000
149 +++ tmp-xxx/drivers/net/bonding/bond_main.c 2006-11-27 10:52:42.000000000 +0000
150 @@ -1145,8 +1145,7 @@
151 }
153 #define BOND_INTERSECT_FEATURES \
154 - (NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\
155 - NETIF_F_TSO|NETIF_F_UFO)
156 + (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
158 /*
159 * Compute the common dev->feature set available to all slaves. Some
160 @@ -1164,9 +1163,7 @@
161 features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
163 if ((features & NETIF_F_SG) &&
164 - !(features & (NETIF_F_IP_CSUM |
165 - NETIF_F_NO_CSUM |
166 - NETIF_F_HW_CSUM)))
167 + !(features & NETIF_F_ALL_CSUM))
168 features &= ~NETIF_F_SG;
170 /*
171 @@ -4147,7 +4144,7 @@
172 */
173 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
175 - /* don't acquire bond device's xmit_lock when
176 + /* don't acquire bond device's netif_tx_lock when
177 * transmitting */
178 bond_dev->features |= NETIF_F_LLTX;
180 Index: tmp-xxx/drivers/net/chelsio/sge.c
181 ===================================================================
182 --- tmp-xxx.orig/drivers/net/chelsio/sge.c 2006-11-15 10:38:39.000000000 +0000
183 +++ tmp-xxx/drivers/net/chelsio/sge.c 2006-11-27 10:52:42.000000000 +0000
184 @@ -1419,7 +1419,7 @@
185 struct cpl_tx_pkt *cpl;
187 #ifdef NETIF_F_TSO
188 - if (skb_shinfo(skb)->tso_size) {
189 + if (skb_shinfo(skb)->gso_size) {
190 int eth_type;
191 struct cpl_tx_pkt_lso *hdr;
193 @@ -1434,7 +1434,7 @@
194 hdr->ip_hdr_words = skb->nh.iph->ihl;
195 hdr->tcp_hdr_words = skb->h.th->doff;
196 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
197 - skb_shinfo(skb)->tso_size));
198 + skb_shinfo(skb)->gso_size));
199 hdr->len = htonl(skb->len - sizeof(*hdr));
200 cpl = (struct cpl_tx_pkt *)hdr;
201 sge->stats.tx_lso_pkts++;
202 Index: tmp-xxx/drivers/net/e1000/e1000_main.c
203 ===================================================================
204 --- tmp-xxx.orig/drivers/net/e1000/e1000_main.c 2006-11-15 10:38:39.000000000 +0000
205 +++ tmp-xxx/drivers/net/e1000/e1000_main.c 2006-11-27 10:52:42.000000000 +0000
206 @@ -2526,7 +2526,7 @@
207 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
208 int err;
210 - if (skb_shinfo(skb)->tso_size) {
211 + if (skb_shinfo(skb)->gso_size) {
212 if (skb_header_cloned(skb)) {
213 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
214 if (err)
215 @@ -2534,7 +2534,7 @@
216 }
218 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
219 - mss = skb_shinfo(skb)->tso_size;
220 + mss = skb_shinfo(skb)->gso_size;
221 if (skb->protocol == ntohs(ETH_P_IP)) {
222 skb->nh.iph->tot_len = 0;
223 skb->nh.iph->check = 0;
224 @@ -2651,7 +2651,7 @@
225 * tso gets written back prematurely before the data is fully
226 * DMAd to the controller */
227 if (!skb->data_len && tx_ring->last_tx_tso &&
228 - !skb_shinfo(skb)->tso_size) {
229 + !skb_shinfo(skb)->gso_size) {
230 tx_ring->last_tx_tso = 0;
231 size -= 4;
232 }
233 @@ -2893,7 +2893,7 @@
234 }
236 #ifdef NETIF_F_TSO
237 - mss = skb_shinfo(skb)->tso_size;
238 + mss = skb_shinfo(skb)->gso_size;
239 /* The controller does a simple calculation to
240 * make sure there is enough room in the FIFO before
241 * initiating the DMA for each buffer. The calc is:
242 @@ -2935,7 +2935,7 @@
243 #ifdef NETIF_F_TSO
244 /* Controller Erratum workaround */
245 if (!skb->data_len && tx_ring->last_tx_tso &&
246 - !skb_shinfo(skb)->tso_size)
247 + !skb_shinfo(skb)->gso_size)
248 count++;
249 #endif
251 Index: tmp-xxx/drivers/net/forcedeth.c
252 ===================================================================
253 --- tmp-xxx.orig/drivers/net/forcedeth.c 2006-11-15 10:38:39.000000000 +0000
254 +++ tmp-xxx/drivers/net/forcedeth.c 2006-11-27 10:52:42.000000000 +0000
255 @@ -482,9 +482,9 @@
256 * critical parts:
257 * - rx is (pseudo-) lockless: it relies on the single-threading provided
258 * by the arch code for interrupts.
259 - * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
260 + * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
261 * needs dev->priv->lock :-(
262 - * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
263 + * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
264 */
266 /* in dev: base, irq */
267 @@ -1016,7 +1016,7 @@
269 /*
270 * nv_start_xmit: dev->hard_start_xmit function
271 - * Called with dev->xmit_lock held.
272 + * Called with netif_tx_lock held.
273 */
274 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
275 {
276 @@ -1105,8 +1105,8 @@
277 np->tx_skbuff[nr] = skb;
279 #ifdef NETIF_F_TSO
280 - if (skb_shinfo(skb)->tso_size)
281 - tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
282 + if (skb_shinfo(skb)->gso_size)
283 + tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
284 else
285 #endif
286 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
287 @@ -1203,7 +1203,7 @@
289 /*
290 * nv_tx_timeout: dev->tx_timeout function
291 - * Called with dev->xmit_lock held.
292 + * Called with netif_tx_lock held.
293 */
294 static void nv_tx_timeout(struct net_device *dev)
295 {
296 @@ -1524,7 +1524,7 @@
297 * Changing the MTU is a rare event, it shouldn't matter.
298 */
299 disable_irq(dev->irq);
300 - spin_lock_bh(&dev->xmit_lock);
301 + netif_tx_lock_bh(dev);
302 spin_lock(&np->lock);
303 /* stop engines */
304 nv_stop_rx(dev);
305 @@ -1559,7 +1559,7 @@
306 nv_start_rx(dev);
307 nv_start_tx(dev);
308 spin_unlock(&np->lock);
309 - spin_unlock_bh(&dev->xmit_lock);
310 + netif_tx_unlock_bh(dev);
311 enable_irq(dev->irq);
312 }
313 return 0;
314 @@ -1594,7 +1594,7 @@
315 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
317 if (netif_running(dev)) {
318 - spin_lock_bh(&dev->xmit_lock);
319 + netif_tx_lock_bh(dev);
320 spin_lock_irq(&np->lock);
322 /* stop rx engine */
323 @@ -1606,7 +1606,7 @@
324 /* restart rx engine */
325 nv_start_rx(dev);
326 spin_unlock_irq(&np->lock);
327 - spin_unlock_bh(&dev->xmit_lock);
328 + netif_tx_unlock_bh(dev);
329 } else {
330 nv_copy_mac_to_hw(dev);
331 }
332 @@ -1615,7 +1615,7 @@
334 /*
335 * nv_set_multicast: dev->set_multicast function
336 - * Called with dev->xmit_lock held.
337 + * Called with netif_tx_lock held.
338 */
339 static void nv_set_multicast(struct net_device *dev)
340 {
341 Index: tmp-xxx/drivers/net/hamradio/6pack.c
342 ===================================================================
343 --- tmp-xxx.orig/drivers/net/hamradio/6pack.c 2006-11-15 10:38:39.000000000 +0000
344 +++ tmp-xxx/drivers/net/hamradio/6pack.c 2006-11-27 10:52:42.000000000 +0000
345 @@ -308,9 +308,9 @@
346 {
347 struct sockaddr_ax25 *sa = addr;
349 - spin_lock_irq(&dev->xmit_lock);
350 + netif_tx_lock_bh(dev);
351 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
352 - spin_unlock_irq(&dev->xmit_lock);
353 + netif_tx_unlock_bh(dev);
355 return 0;
356 }
357 @@ -767,9 +767,9 @@
358 break;
359 }
361 - spin_lock_irq(&dev->xmit_lock);
362 + netif_tx_lock_bh(dev);
363 memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
364 - spin_unlock_irq(&dev->xmit_lock);
365 + netif_tx_unlock_bh(dev);
367 err = 0;
368 break;
369 Index: tmp-xxx/drivers/net/hamradio/mkiss.c
370 ===================================================================
371 --- tmp-xxx.orig/drivers/net/hamradio/mkiss.c 2006-11-15 10:38:39.000000000 +0000
372 +++ tmp-xxx/drivers/net/hamradio/mkiss.c 2006-11-27 10:52:42.000000000 +0000
373 @@ -357,9 +357,9 @@
374 {
375 struct sockaddr_ax25 *sa = addr;
377 - spin_lock_irq(&dev->xmit_lock);
378 + netif_tx_lock_bh(dev);
379 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
380 - spin_unlock_irq(&dev->xmit_lock);
381 + netif_tx_unlock_bh(dev);
383 return 0;
384 }
385 @@ -886,9 +886,9 @@
386 break;
387 }
389 - spin_lock_irq(&dev->xmit_lock);
390 + netif_tx_lock_bh(dev);
391 memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
392 - spin_unlock_irq(&dev->xmit_lock);
393 + netif_tx_unlock_bh(dev);
395 err = 0;
396 break;
397 Index: tmp-xxx/drivers/net/ifb.c
398 ===================================================================
399 --- tmp-xxx.orig/drivers/net/ifb.c 2006-11-15 10:38:39.000000000 +0000
400 +++ tmp-xxx/drivers/net/ifb.c 2006-11-27 10:52:42.000000000 +0000
401 @@ -76,13 +76,13 @@
402 dp->st_task_enter++;
403 if ((skb = skb_peek(&dp->tq)) == NULL) {
404 dp->st_txq_refl_try++;
405 - if (spin_trylock(&_dev->xmit_lock)) {
406 + if (netif_tx_trylock(_dev)) {
407 dp->st_rxq_enter++;
408 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
409 skb_queue_tail(&dp->tq, skb);
410 dp->st_rx2tx_tran++;
411 }
412 - spin_unlock(&_dev->xmit_lock);
413 + netif_tx_unlock(_dev);
414 } else {
415 /* reschedule */
416 dp->st_rxq_notenter++;
417 @@ -110,7 +110,7 @@
418 }
419 }
421 - if (spin_trylock(&_dev->xmit_lock)) {
422 + if (netif_tx_trylock(_dev)) {
423 dp->st_rxq_check++;
424 if ((skb = skb_peek(&dp->rq)) == NULL) {
425 dp->tasklet_pending = 0;
426 @@ -118,10 +118,10 @@
427 netif_wake_queue(_dev);
428 } else {
429 dp->st_rxq_rsch++;
430 - spin_unlock(&_dev->xmit_lock);
431 + netif_tx_unlock(_dev);
432 goto resched;
433 }
434 - spin_unlock(&_dev->xmit_lock);
435 + netif_tx_unlock(_dev);
436 } else {
437 resched:
438 dp->tasklet_pending = 1;
439 Index: tmp-xxx/drivers/net/irda/vlsi_ir.c
440 ===================================================================
441 --- tmp-xxx.orig/drivers/net/irda/vlsi_ir.c 2006-11-15 10:38:39.000000000 +0000
442 +++ tmp-xxx/drivers/net/irda/vlsi_ir.c 2006-11-27 10:52:42.000000000 +0000
443 @@ -959,7 +959,7 @@
444 || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
445 break;
446 udelay(100);
447 - /* must not sleep here - we are called under xmit_lock! */
448 + /* must not sleep here - called under netif_tx_lock! */
449 }
450 }
452 Index: tmp-xxx/drivers/net/ixgb/ixgb_main.c
453 ===================================================================
454 --- tmp-xxx.orig/drivers/net/ixgb/ixgb_main.c 2006-11-15 10:38:39.000000000 +0000
455 +++ tmp-xxx/drivers/net/ixgb/ixgb_main.c 2006-11-27 10:52:42.000000000 +0000
456 @@ -1163,7 +1163,7 @@
457 uint16_t ipcse, tucse, mss;
458 int err;
460 - if(likely(skb_shinfo(skb)->tso_size)) {
461 + if(likely(skb_shinfo(skb)->gso_size)) {
462 if (skb_header_cloned(skb)) {
463 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
464 if (err)
465 @@ -1171,7 +1171,7 @@
466 }
468 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
469 - mss = skb_shinfo(skb)->tso_size;
470 + mss = skb_shinfo(skb)->gso_size;
471 skb->nh.iph->tot_len = 0;
472 skb->nh.iph->check = 0;
473 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
474 Index: tmp-xxx/drivers/net/loopback.c
475 ===================================================================
476 --- tmp-xxx.orig/drivers/net/loopback.c 2006-11-15 10:38:39.000000000 +0000
477 +++ tmp-xxx/drivers/net/loopback.c 2006-11-27 10:52:42.000000000 +0000
478 @@ -74,7 +74,7 @@
479 struct iphdr *iph = skb->nh.iph;
480 struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
481 unsigned int doffset = (iph->ihl + th->doff) * 4;
482 - unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
483 + unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
484 unsigned int offset = 0;
485 u32 seq = ntohl(th->seq);
486 u16 id = ntohs(iph->id);
487 @@ -139,7 +139,7 @@
488 #endif
490 #ifdef LOOPBACK_TSO
491 - if (skb_shinfo(skb)->tso_size) {
492 + if (skb_shinfo(skb)->gso_size) {
493 BUG_ON(skb->protocol != htons(ETH_P_IP));
494 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
496 Index: tmp-xxx/drivers/net/mv643xx_eth.c
497 ===================================================================
498 --- tmp-xxx.orig/drivers/net/mv643xx_eth.c 2006-11-15 10:38:39.000000000 +0000
499 +++ tmp-xxx/drivers/net/mv643xx_eth.c 2006-11-27 10:52:42.000000000 +0000
500 @@ -1107,7 +1107,7 @@
502 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
503 if (has_tiny_unaligned_frags(skb)) {
504 - if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
505 + if (__skb_linearize(skb)) {
506 stats->tx_dropped++;
507 printk(KERN_DEBUG "%s: failed to linearize tiny "
508 "unaligned fragment\n", dev->name);
509 Index: tmp-xxx/drivers/net/natsemi.c
510 ===================================================================
511 --- tmp-xxx.orig/drivers/net/natsemi.c 2006-11-15 10:38:39.000000000 +0000
512 +++ tmp-xxx/drivers/net/natsemi.c 2006-11-27 10:52:42.000000000 +0000
513 @@ -323,12 +323,12 @@
514 The rx process only runs in the interrupt handler. Access from outside
515 the interrupt handler is only permitted after disable_irq().
517 -The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
518 +The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
519 is set, then access is permitted under spin_lock_irq(&np->lock).
521 Thus configuration functions that want to access everything must call
522 disable_irq(dev->irq);
523 - spin_lock_bh(dev->xmit_lock);
524 + netif_tx_lock_bh(dev);
525 spin_lock_irq(&np->lock);
527 IV. Notes
528 Index: tmp-xxx/drivers/net/r8169.c
529 ===================================================================
530 --- tmp-xxx.orig/drivers/net/r8169.c 2006-11-15 10:38:39.000000000 +0000
531 +++ tmp-xxx/drivers/net/r8169.c 2006-11-27 10:52:42.000000000 +0000
532 @@ -2171,7 +2171,7 @@
533 static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
534 {
535 if (dev->features & NETIF_F_TSO) {
536 - u32 mss = skb_shinfo(skb)->tso_size;
537 + u32 mss = skb_shinfo(skb)->gso_size;
539 if (mss)
540 return LargeSend | ((mss & MSSMask) << MSSShift);
541 Index: tmp-xxx/drivers/net/s2io.c
542 ===================================================================
543 --- tmp-xxx.orig/drivers/net/s2io.c 2006-11-15 10:38:39.000000000 +0000
544 +++ tmp-xxx/drivers/net/s2io.c 2006-11-27 10:52:42.000000000 +0000
545 @@ -3522,8 +3522,8 @@
546 txdp->Control_1 = 0;
547 txdp->Control_2 = 0;
548 #ifdef NETIF_F_TSO
549 - mss = skb_shinfo(skb)->tso_size;
550 - if (mss) {
551 + mss = skb_shinfo(skb)->gso_size;
552 + if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
553 txdp->Control_1 |= TXD_TCP_LSO_EN;
554 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
555 }
556 @@ -3543,10 +3543,10 @@
557 }
559 frg_len = skb->len - skb->data_len;
560 - if (skb_shinfo(skb)->ufo_size) {
561 + if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
562 int ufo_size;
564 - ufo_size = skb_shinfo(skb)->ufo_size;
565 + ufo_size = skb_shinfo(skb)->gso_size;
566 ufo_size &= ~7;
567 txdp->Control_1 |= TXD_UFO_EN;
568 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
569 @@ -3572,7 +3572,7 @@
570 txdp->Host_Control = (unsigned long) skb;
571 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
573 - if (skb_shinfo(skb)->ufo_size)
574 + if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
575 txdp->Control_1 |= TXD_UFO_EN;
577 frg_cnt = skb_shinfo(skb)->nr_frags;
578 @@ -3587,12 +3587,12 @@
579 (sp->pdev, frag->page, frag->page_offset,
580 frag->size, PCI_DMA_TODEVICE);
581 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
582 - if (skb_shinfo(skb)->ufo_size)
583 + if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
584 txdp->Control_1 |= TXD_UFO_EN;
585 }
586 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
588 - if (skb_shinfo(skb)->ufo_size)
589 + if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
590 frg_cnt++; /* as Txd0 was used for inband header */
592 tx_fifo = mac_control->tx_FIFO_start[queue];
593 @@ -3606,7 +3606,7 @@
594 if (mss)
595 val64 |= TX_FIFO_SPECIAL_FUNC;
596 #endif
597 - if (skb_shinfo(skb)->ufo_size)
598 + if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
599 val64 |= TX_FIFO_SPECIAL_FUNC;
600 writeq(val64, &tx_fifo->List_Control);
602 Index: tmp-xxx/drivers/net/sky2.c
603 ===================================================================
604 --- tmp-xxx.orig/drivers/net/sky2.c 2006-11-15 10:38:39.000000000 +0000
605 +++ tmp-xxx/drivers/net/sky2.c 2006-11-27 10:52:42.000000000 +0000
606 @@ -1141,7 +1141,7 @@
607 count = sizeof(dma_addr_t) / sizeof(u32);
608 count += skb_shinfo(skb)->nr_frags * count;
610 - if (skb_shinfo(skb)->tso_size)
611 + if (skb_shinfo(skb)->gso_size)
612 ++count;
614 if (skb->ip_summed == CHECKSUM_HW)
615 @@ -1213,7 +1213,7 @@
616 }
618 /* Check for TCP Segmentation Offload */
619 - mss = skb_shinfo(skb)->tso_size;
620 + mss = skb_shinfo(skb)->gso_size;
621 if (mss != 0) {
622 /* just drop the packet if non-linear expansion fails */
623 if (skb_header_cloned(skb) &&
624 Index: tmp-xxx/drivers/net/tg3.c
625 ===================================================================
626 --- tmp-xxx.orig/drivers/net/tg3.c 2006-11-15 10:38:39.000000000 +0000
627 +++ tmp-xxx/drivers/net/tg3.c 2006-11-27 10:52:42.000000000 +0000
628 @@ -3664,7 +3664,7 @@
629 #if TG3_TSO_SUPPORT != 0
630 mss = 0;
631 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
632 - (mss = skb_shinfo(skb)->tso_size) != 0) {
633 + (mss = skb_shinfo(skb)->gso_size) != 0) {
634 int tcp_opt_len, ip_tcp_len;
636 if (skb_header_cloned(skb) &&
637 Index: tmp-xxx/drivers/net/tulip/winbond-840.c
638 ===================================================================
639 --- tmp-xxx.orig/drivers/net/tulip/winbond-840.c 2006-11-15 10:38:39.000000000 +0000
640 +++ tmp-xxx/drivers/net/tulip/winbond-840.c 2006-11-27 10:52:42.000000000 +0000
641 @@ -1605,11 +1605,11 @@
642 * - get_stats:
643 * spin_lock_irq(np->lock), doesn't touch hw if not present
644 * - hard_start_xmit:
645 - * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
646 + * synchronize_irq + netif_tx_disable;
647 * - tx_timeout:
648 - * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
649 + * netif_device_detach + netif_tx_disable;
650 * - set_multicast_list
651 - * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
652 + * netif_device_detach + netif_tx_disable;
653 * - interrupt handler
654 * doesn't touch hw if not present, synchronize_irq waits for
655 * running instances of the interrupt handler.
656 @@ -1635,11 +1635,10 @@
657 netif_device_detach(dev);
658 update_csr6(dev, 0);
659 iowrite32(0, ioaddr + IntrEnable);
660 - netif_stop_queue(dev);
661 spin_unlock_irq(&np->lock);
663 - spin_unlock_wait(&dev->xmit_lock);
664 synchronize_irq(dev->irq);
665 + netif_tx_disable(dev);
667 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
669 Index: tmp-xxx/drivers/net/typhoon.c
670 ===================================================================
671 --- tmp-xxx.orig/drivers/net/typhoon.c 2006-11-15 10:38:39.000000000 +0000
672 +++ tmp-xxx/drivers/net/typhoon.c 2006-11-27 10:52:42.000000000 +0000
673 @@ -340,7 +340,7 @@
674 #endif
676 #if defined(NETIF_F_TSO)
677 -#define skb_tso_size(x) (skb_shinfo(x)->tso_size)
678 +#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
679 #define TSO_NUM_DESCRIPTORS 2
680 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
681 #else
682 Index: tmp-xxx/drivers/net/via-velocity.c
683 ===================================================================
684 --- tmp-xxx.orig/drivers/net/via-velocity.c 2006-11-15 10:38:39.000000000 +0000
685 +++ tmp-xxx/drivers/net/via-velocity.c 2006-11-27 10:52:42.000000000 +0000
686 @@ -1905,6 +1905,13 @@
688 int pktlen = skb->len;
690 +#ifdef VELOCITY_ZERO_COPY_SUPPORT
691 + if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
692 + kfree_skb(skb);
693 + return 0;
694 + }
695 +#endif
696 +
697 spin_lock_irqsave(&vptr->lock, flags);
699 index = vptr->td_curr[qnum];
700 @@ -1920,8 +1927,6 @@
701 */
702 if (pktlen < ETH_ZLEN) {
703 /* Cannot occur until ZC support */
704 - if(skb_linearize(skb, GFP_ATOMIC))
705 - return 0;
706 pktlen = ETH_ZLEN;
707 memcpy(tdinfo->buf, skb->data, skb->len);
708 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
709 @@ -1939,7 +1944,6 @@
710 int nfrags = skb_shinfo(skb)->nr_frags;
711 tdinfo->skb = skb;
712 if (nfrags > 6) {
713 - skb_linearize(skb, GFP_ATOMIC);
714 memcpy(tdinfo->buf, skb->data, skb->len);
715 tdinfo->skb_dma[0] = tdinfo->buf_dma;
716 td_ptr->tdesc0.pktsize =
717 Index: tmp-xxx/drivers/net/wireless/orinoco.c
718 ===================================================================
719 --- tmp-xxx.orig/drivers/net/wireless/orinoco.c 2006-11-15 10:38:39.000000000 +0000
720 +++ tmp-xxx/drivers/net/wireless/orinoco.c 2006-11-27 10:52:42.000000000 +0000
721 @@ -1835,7 +1835,9 @@
722 /* Set promiscuity / multicast*/
723 priv->promiscuous = 0;
724 priv->mc_count = 0;
725 - __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
726 +
727 + /* FIXME: what about netif_tx_lock */
728 + __orinoco_set_multicast_list(dev);
730 return 0;
731 }
732 Index: tmp-xxx/drivers/s390/net/qeth_eddp.c
733 ===================================================================
734 --- tmp-xxx.orig/drivers/s390/net/qeth_eddp.c 2006-11-15 10:38:39.000000000 +0000
735 +++ tmp-xxx/drivers/s390/net/qeth_eddp.c 2006-11-27 10:52:42.000000000 +0000
736 @@ -421,7 +421,7 @@
737 }
738 tcph = eddp->skb->h.th;
739 while (eddp->skb_offset < eddp->skb->len) {
740 - data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
741 + data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
742 (int)(eddp->skb->len - eddp->skb_offset));
743 /* prepare qdio hdr */
744 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
745 @@ -516,20 +516,20 @@
747 QETH_DBF_TEXT(trace, 5, "eddpcanp");
748 /* can we put multiple skbs in one page? */
749 - skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
750 + skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
751 if (skbs_per_page > 1){
752 - ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
753 + ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
754 skbs_per_page + 1;
755 ctx->elements_per_skb = 1;
756 } else {
757 /* no -> how many elements per skb? */
758 - ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
759 + ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
760 PAGE_SIZE) >> PAGE_SHIFT;
761 ctx->num_pages = ctx->elements_per_skb *
762 - (skb_shinfo(skb)->tso_segs + 1);
763 + (skb_shinfo(skb)->gso_segs + 1);
764 }
765 ctx->num_elements = ctx->elements_per_skb *
766 - (skb_shinfo(skb)->tso_segs + 1);
767 + (skb_shinfo(skb)->gso_segs + 1);
768 }
770 static inline struct qeth_eddp_context *
771 Index: tmp-xxx/drivers/s390/net/qeth_main.c
772 ===================================================================
773 --- tmp-xxx.orig/drivers/s390/net/qeth_main.c 2006-11-15 10:38:39.000000000 +0000
774 +++ tmp-xxx/drivers/s390/net/qeth_main.c 2006-11-27 10:52:42.000000000 +0000
775 @@ -4454,7 +4454,7 @@
776 queue = card->qdio.out_qs
777 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
779 - if (skb_shinfo(skb)->tso_size)
780 + if (skb_shinfo(skb)->gso_size)
781 large_send = card->options.large_send;
783 /*are we able to do TSO ? If so ,prepare and send it from here */
784 @@ -4501,7 +4501,7 @@
785 card->stats.tx_packets++;
786 card->stats.tx_bytes += skb->len;
787 #ifdef CONFIG_QETH_PERF_STATS
788 - if (skb_shinfo(skb)->tso_size &&
789 + if (skb_shinfo(skb)->gso_size &&
790 !(large_send == QETH_LARGE_SEND_NO)) {
791 card->perf_stats.large_send_bytes += skb->len;
792 card->perf_stats.large_send_cnt++;
793 Index: tmp-xxx/drivers/s390/net/qeth_tso.h
794 ===================================================================
795 --- tmp-xxx.orig/drivers/s390/net/qeth_tso.h 2006-11-15 10:38:39.000000000 +0000
796 +++ tmp-xxx/drivers/s390/net/qeth_tso.h 2006-11-27 10:52:42.000000000 +0000
797 @@ -51,7 +51,7 @@
798 hdr->ext.hdr_version = 1;
799 hdr->ext.hdr_len = 28;
800 /*insert non-fix values */
801 - hdr->ext.mss = skb_shinfo(skb)->tso_size;
802 + hdr->ext.mss = skb_shinfo(skb)->gso_size;
803 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
804 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
805 sizeof(struct qeth_hdr_tso));
806 Index: tmp-xxx/include/linux/ethtool.h
807 ===================================================================
808 --- tmp-xxx.orig/include/linux/ethtool.h 2006-11-15 10:38:39.000000000 +0000
809 +++ tmp-xxx/include/linux/ethtool.h 2006-11-27 10:52:42.000000000 +0000
810 @@ -408,6 +408,8 @@
811 #define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */
812 #define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */
813 #define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */
814 +#define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */
815 +#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */
817 /* compatibility with older code */
818 #define SPARC_ETH_GSET ETHTOOL_GSET
819 Index: tmp-xxx/include/linux/netdevice.h
820 ===================================================================
821 --- tmp-xxx.orig/include/linux/netdevice.h 2006-11-15 10:38:39.000000000 +0000
822 +++ tmp-xxx/include/linux/netdevice.h 2006-11-27 10:52:42.000000000 +0000
823 @@ -230,7 +230,8 @@
824 __LINK_STATE_SCHED,
825 __LINK_STATE_NOCARRIER,
826 __LINK_STATE_RX_SCHED,
827 - __LINK_STATE_LINKWATCH_PENDING
828 + __LINK_STATE_LINKWATCH_PENDING,
829 + __LINK_STATE_QDISC_RUNNING,
830 };
833 @@ -306,9 +307,17 @@
834 #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
835 #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
836 #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
837 -#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */
838 +#define NETIF_F_GSO 2048 /* Enable software GSO. */
839 #define NETIF_F_LLTX 4096 /* LockLess TX */
840 -#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/
841 +
842 + /* Segmentation offload features */
843 +#define NETIF_F_GSO_SHIFT 16
844 +#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
845 +#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
846 +#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
847 +
848 +#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
849 +#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
851 struct net_device *next_sched;
853 @@ -394,6 +403,9 @@
854 struct list_head qdisc_list;
855 unsigned long tx_queue_len; /* Max frames per queue allowed */
857 + /* Partially transmitted GSO packet. */
858 + struct sk_buff *gso_skb;
859 +
860 /* ingress path synchronizer */
861 spinlock_t ingress_lock;
862 struct Qdisc *qdisc_ingress;
863 @@ -402,7 +414,7 @@
864 * One part is mostly used on xmit path (device)
865 */
866 /* hard_start_xmit synchronizer */
867 - spinlock_t xmit_lock ____cacheline_aligned_in_smp;
868 + spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
869 /* cpu id of processor entered to hard_start_xmit or -1,
870 if nobody entered there.
871 */
872 @@ -527,6 +539,8 @@
873 struct net_device *,
874 struct packet_type *,
875 struct net_device *);
876 + struct sk_buff *(*gso_segment)(struct sk_buff *skb,
877 + int features);
878 void *af_packet_priv;
879 struct list_head list;
880 };
881 @@ -693,7 +707,8 @@
882 extern int dev_set_mtu(struct net_device *, int);
883 extern int dev_set_mac_address(struct net_device *,
884 struct sockaddr *);
885 -extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
886 +extern int dev_hard_start_xmit(struct sk_buff *skb,
887 + struct net_device *dev);
889 extern void dev_init(void);
891 @@ -900,11 +915,43 @@
892 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
893 }
895 +static inline void netif_tx_lock(struct net_device *dev)
896 +{
897 + spin_lock(&dev->_xmit_lock);
898 + dev->xmit_lock_owner = smp_processor_id();
899 +}
900 +
901 +static inline void netif_tx_lock_bh(struct net_device *dev)
902 +{
903 + spin_lock_bh(&dev->_xmit_lock);
904 + dev->xmit_lock_owner = smp_processor_id();
905 +}
906 +
907 +static inline int netif_tx_trylock(struct net_device *dev)
908 +{
909 + int err = spin_trylock(&dev->_xmit_lock);
910 + if (!err)
911 + dev->xmit_lock_owner = smp_processor_id();
912 + return err;
913 +}
914 +
915 +static inline void netif_tx_unlock(struct net_device *dev)
916 +{
917 + dev->xmit_lock_owner = -1;
918 + spin_unlock(&dev->_xmit_lock);
919 +}
920 +
921 +static inline void netif_tx_unlock_bh(struct net_device *dev)
922 +{
923 + dev->xmit_lock_owner = -1;
924 + spin_unlock_bh(&dev->_xmit_lock);
925 +}
926 +
927 static inline void netif_tx_disable(struct net_device *dev)
928 {
929 - spin_lock_bh(&dev->xmit_lock);
930 + netif_tx_lock_bh(dev);
931 netif_stop_queue(dev);
932 - spin_unlock_bh(&dev->xmit_lock);
933 + netif_tx_unlock_bh(dev);
934 }
936 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
937 @@ -932,6 +979,7 @@
938 extern int weight_p;
939 extern int netdev_set_master(struct net_device *dev, struct net_device *master);
940 extern int skb_checksum_help(struct sk_buff *skb, int inward);
941 +extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
942 #ifdef CONFIG_BUG
943 extern void netdev_rx_csum_fault(struct net_device *dev);
944 #else
945 @@ -951,6 +999,18 @@
947 extern void linkwatch_run_queue(void);
949 +static inline int skb_gso_ok(struct sk_buff *skb, int features)
950 +{
951 + int feature = skb_shinfo(skb)->gso_size ?
952 + skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
953 + return (features & feature) == feature;
954 +}
955 +
956 +static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
957 +{
958 + return !skb_gso_ok(skb, dev->features);
959 +}
960 +
961 #endif /* __KERNEL__ */
963 #endif /* _LINUX_DEV_H */
964 Index: tmp-xxx/include/linux/skbuff.h
965 ===================================================================
966 --- tmp-xxx.orig/include/linux/skbuff.h 2006-11-15 10:38:39.000000000 +0000
967 +++ tmp-xxx/include/linux/skbuff.h 2006-11-27 10:52:42.000000000 +0000
968 @@ -134,9 +134,10 @@
969 struct skb_shared_info {
970 atomic_t dataref;
971 unsigned short nr_frags;
972 - unsigned short tso_size;
973 - unsigned short tso_segs;
974 - unsigned short ufo_size;
975 + unsigned short gso_size;
976 + /* Warning: this field is not always filled in (UFO)! */
977 + unsigned short gso_segs;
978 + unsigned short gso_type;
979 unsigned int ip6_frag_id;
980 struct sk_buff *frag_list;
981 skb_frag_t frags[MAX_SKB_FRAGS];
982 @@ -168,6 +169,14 @@
983 SKB_FCLONE_CLONE,
984 };
986 +enum {
987 + SKB_GSO_TCPV4 = 1 << 0,
988 + SKB_GSO_UDPV4 = 1 << 1,
989 +
990 + /* This indicates the skb is from an untrusted source. */
991 + SKB_GSO_DODGY = 1 << 2,
992 +};
993 +
994 /**
995 * struct sk_buff - socket buffer
996 * @next: Next buffer in list
997 @@ -1148,18 +1157,34 @@
998 return 0;
999 }
1001 +static inline int __skb_linearize(struct sk_buff *skb)
1002 +{
1003 + return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1004 +}
1006 /**
1007 * skb_linearize - convert paged skb to linear one
1008 * @skb: buffer to linarize
1009 - * @gfp: allocation mode
1011 * If there is no free memory -ENOMEM is returned, otherwise zero
1012 * is returned and the old skb data released.
1013 */
1014 -extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
1015 -static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
1016 +static inline int skb_linearize(struct sk_buff *skb)
1017 +{
1018 + return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1019 +}
1021 +/**
1022 + * skb_linearize_cow - make sure skb is linear and writable
1023 + * @skb: buffer to process
1024 + *
1025 + * If there is no free memory -ENOMEM is returned, otherwise zero
1026 + * is returned and the old skb data released.
1027 + */
1028 +static inline int skb_linearize_cow(struct sk_buff *skb)
1030 - return __skb_linearize(skb, gfp);
1031 + return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1032 + __skb_linearize(skb) : 0;
1035 /**
1036 @@ -1254,6 +1279,7 @@
1037 struct sk_buff *skb1, const u32 len);
1039 extern void skb_release_data(struct sk_buff *skb);
1040 +extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1042 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1043 int len, void *buffer)
1044 Index: tmp-xxx/include/net/pkt_sched.h
1045 ===================================================================
1046 --- tmp-xxx.orig/include/net/pkt_sched.h 2006-11-15 10:38:39.000000000 +0000
1047 +++ tmp-xxx/include/net/pkt_sched.h 2006-11-27 10:52:42.000000000 +0000
1048 @@ -218,12 +218,13 @@
1049 struct rtattr *tab);
1050 extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
1052 -extern int qdisc_restart(struct net_device *dev);
1053 +extern void __qdisc_run(struct net_device *dev);
1055 static inline void qdisc_run(struct net_device *dev)
1057 - while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
1058 - /* NOTHING */;
1059 + if (!netif_queue_stopped(dev) &&
1060 + !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
1061 + __qdisc_run(dev);
1064 extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1065 Index: tmp-xxx/include/net/protocol.h
1066 ===================================================================
1067 --- tmp-xxx.orig/include/net/protocol.h 2006-11-15 10:38:39.000000000 +0000
1068 +++ tmp-xxx/include/net/protocol.h 2006-11-27 10:52:42.000000000 +0000
1069 @@ -37,6 +37,8 @@
1070 struct net_protocol {
1071 int (*handler)(struct sk_buff *skb);
1072 void (*err_handler)(struct sk_buff *skb, u32 info);
1073 + struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1074 + int features);
1075 int no_policy;
1076 };
1078 Index: tmp-xxx/include/net/sock.h
1079 ===================================================================
1080 --- tmp-xxx.orig/include/net/sock.h 2006-11-15 10:38:39.000000000 +0000
1081 +++ tmp-xxx/include/net/sock.h 2006-11-27 10:52:42.000000000 +0000
1082 @@ -1064,9 +1064,13 @@
1084 __sk_dst_set(sk, dst);
1085 sk->sk_route_caps = dst->dev->features;
1086 + if (sk->sk_route_caps & NETIF_F_GSO)
1087 + sk->sk_route_caps |= NETIF_F_TSO;
1088 if (sk->sk_route_caps & NETIF_F_TSO) {
1089 if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
1090 sk->sk_route_caps &= ~NETIF_F_TSO;
1091 + else
1092 + sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1096 Index: tmp-xxx/include/net/tcp.h
1097 ===================================================================
1098 --- tmp-xxx.orig/include/net/tcp.h 2006-11-15 10:38:39.000000000 +0000
1099 +++ tmp-xxx/include/net/tcp.h 2006-11-27 10:52:42.000000000 +0000
1100 @@ -552,13 +552,13 @@
1101 */
1102 static inline int tcp_skb_pcount(const struct sk_buff *skb)
1104 - return skb_shinfo(skb)->tso_segs;
1105 + return skb_shinfo(skb)->gso_segs;
1108 /* This is valid iff tcp_skb_pcount() > 1. */
1109 static inline int tcp_skb_mss(const struct sk_buff *skb)
1111 - return skb_shinfo(skb)->tso_size;
1112 + return skb_shinfo(skb)->gso_size;
1115 static inline void tcp_dec_pcount_approx(__u32 *count,
1116 @@ -1063,6 +1063,8 @@
1118 extern int tcp_v4_destroy_sock(struct sock *sk);
1120 +extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
1122 #ifdef CONFIG_PROC_FS
1123 extern int tcp4_proc_init(void);
1124 extern void tcp4_proc_exit(void);
1125 Index: tmp-xxx/net/atm/clip.c
1126 ===================================================================
1127 --- tmp-xxx.orig/net/atm/clip.c 2006-11-15 10:38:39.000000000 +0000
1128 +++ tmp-xxx/net/atm/clip.c 2006-11-27 10:52:42.000000000 +0000
1129 @@ -101,7 +101,7 @@
1130 printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc);
1131 return;
1133 - spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */
1134 + netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
1135 entry->neigh->used = jiffies;
1136 for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
1137 if (*walk == clip_vcc) {
1138 @@ -125,7 +125,7 @@
1139 printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
1140 "0x%p)\n",entry,clip_vcc);
1141 out:
1142 - spin_unlock_bh(&entry->neigh->dev->xmit_lock);
1143 + netif_tx_unlock_bh(entry->neigh->dev);
1146 /* The neighbour entry n->lock is held. */
1147 Index: tmp-xxx/net/bridge/br_device.c
1148 ===================================================================
1149 --- tmp-xxx.orig/net/bridge/br_device.c 2006-11-15 10:38:39.000000000 +0000
1150 +++ tmp-xxx/net/bridge/br_device.c 2006-11-27 10:52:42.000000000 +0000
1151 @@ -146,9 +146,9 @@
1152 struct net_bridge *br = netdev_priv(dev);
1154 if (data)
1155 - br->feature_mask |= NETIF_F_IP_CSUM;
1156 + br->feature_mask |= NETIF_F_NO_CSUM;
1157 else
1158 - br->feature_mask &= ~NETIF_F_IP_CSUM;
1159 + br->feature_mask &= ~NETIF_F_ALL_CSUM;
1161 br_features_recompute(br);
1162 return 0;
1163 @@ -185,6 +185,6 @@
1164 dev->set_mac_address = br_set_mac_address;
1165 dev->priv_flags = IFF_EBRIDGE;
1167 - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
1168 - | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM;
1169 + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
1170 + NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
1172 Index: tmp-xxx/net/bridge/br_forward.c
1173 ===================================================================
1174 --- tmp-xxx.orig/net/bridge/br_forward.c 2006-11-15 10:38:39.000000000 +0000
1175 +++ tmp-xxx/net/bridge/br_forward.c 2006-11-27 10:52:42.000000000 +0000
1176 @@ -32,7 +32,7 @@
1177 int br_dev_queue_push_xmit(struct sk_buff *skb)
1179 /* drop mtu oversized packets except tso */
1180 - if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
1181 + if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
1182 kfree_skb(skb);
1183 else {
1184 #ifdef CONFIG_BRIDGE_NETFILTER
1185 Index: tmp-xxx/net/bridge/br_if.c
1186 ===================================================================
1187 --- tmp-xxx.orig/net/bridge/br_if.c 2006-11-15 10:38:39.000000000 +0000
1188 +++ tmp-xxx/net/bridge/br_if.c 2006-11-27 10:52:42.000000000 +0000
1189 @@ -385,17 +385,28 @@
1190 struct net_bridge_port *p;
1191 unsigned long features, checksum;
1193 - features = br->feature_mask &~ NETIF_F_IP_CSUM;
1194 - checksum = br->feature_mask & NETIF_F_IP_CSUM;
1195 + checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
1196 + features = br->feature_mask & ~NETIF_F_ALL_CSUM;
1198 list_for_each_entry(p, &br->port_list, list) {
1199 - if (!(p->dev->features
1200 - & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)))
1201 + unsigned long feature = p->dev->features;
1203 + if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
1204 + checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
1205 + if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
1206 + checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
1207 + if (!(feature & NETIF_F_IP_CSUM))
1208 checksum = 0;
1209 - features &= p->dev->features;
1211 + if (feature & NETIF_F_GSO)
1212 + feature |= NETIF_F_TSO;
1213 + feature |= NETIF_F_GSO;
1215 + features &= feature;
1218 - br->dev->features = features | checksum | NETIF_F_LLTX;
1219 + br->dev->features = features | checksum | NETIF_F_LLTX |
1220 + NETIF_F_GSO_ROBUST;
1223 /* called with RTNL */
1224 Index: tmp-xxx/net/bridge/br_netfilter.c
1225 ===================================================================
1226 --- tmp-xxx.orig/net/bridge/br_netfilter.c 2006-11-15 10:38:39.000000000 +0000
1227 +++ tmp-xxx/net/bridge/br_netfilter.c 2006-11-27 10:52:42.000000000 +0000
1228 @@ -743,7 +743,7 @@
1230 if (skb->protocol == htons(ETH_P_IP) &&
1231 skb->len > skb->dev->mtu &&
1232 - !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
1233 + !skb_shinfo(skb)->gso_size)
1234 return ip_fragment(skb, br_dev_queue_push_xmit);
1235 else
1236 return br_dev_queue_push_xmit(skb);
1237 Index: tmp-xxx/net/core/dev.c
1238 ===================================================================
1239 --- tmp-xxx.orig/net/core/dev.c 2006-11-15 10:38:39.000000000 +0000
1240 +++ tmp-xxx/net/core/dev.c 2006-11-27 10:57:31.000000000 +0000
1241 @@ -115,6 +115,7 @@
1242 #include <net/iw_handler.h>
1243 #endif /* CONFIG_NET_RADIO */
1244 #include <asm/current.h>
1245 +#include <linux/err.h>
1247 /*
1248 * The list of packet types we will receive (as opposed to discard)
1249 @@ -1032,7 +1033,7 @@
1250 * taps currently in use.
1251 */
1253 -void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1254 +static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1256 struct packet_type *ptype;
1258 @@ -1106,6 +1107,45 @@
1259 return ret;
1262 +/**
1263 + * skb_gso_segment - Perform segmentation on skb.
1264 + * @skb: buffer to segment
1265 + * @features: features for the output path (see dev->features)
1266 + *
1267 + * This function segments the given skb and returns a list of segments.
1268 + *
1269 + * It may return NULL if the skb requires no segmentation. This is
1270 + * only possible when GSO is used for verifying header integrity.
1271 + */
1272 +struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1273 +{
1274 + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1275 + struct packet_type *ptype;
1276 + int type = skb->protocol;
1278 + BUG_ON(skb_shinfo(skb)->frag_list);
1279 + BUG_ON(skb->ip_summed != CHECKSUM_HW);
1281 + skb->mac.raw = skb->data;
1282 + skb->mac_len = skb->nh.raw - skb->data;
1283 + __skb_pull(skb, skb->mac_len);
1285 + rcu_read_lock();
1286 + list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1287 + if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1288 + segs = ptype->gso_segment(skb, features);
1289 + break;
1290 + }
1291 + }
1292 + rcu_read_unlock();
1294 + __skb_push(skb, skb->data - skb->mac.raw);
1296 + return segs;
1297 +}
1299 +EXPORT_SYMBOL(skb_gso_segment);
1301 /* Take action when hardware reception checksum errors are detected. */
1302 #ifdef CONFIG_BUG
1303 void netdev_rx_csum_fault(struct net_device *dev)
1304 @@ -1142,76 +1182,107 @@
1305 #define illegal_highdma(dev, skb) (0)
1306 #endif
1308 -/* Keep head the same: replace data */
1309 -int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
1310 +struct dev_gso_cb {
1311 + void (*destructor)(struct sk_buff *skb);
1312 +};
1314 +#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1316 +static void dev_gso_skb_destructor(struct sk_buff *skb)
1318 - unsigned int size;
1319 - u8 *data;
1320 - long offset;
1321 - struct skb_shared_info *ninfo;
1322 - int headerlen = skb->data - skb->head;
1323 - int expand = (skb->tail + skb->data_len) - skb->end;
1325 - if (skb_shared(skb))
1326 - BUG();
1328 - if (expand <= 0)
1329 - expand = 0;
1331 - size = skb->end - skb->head + expand;
1332 - size = SKB_DATA_ALIGN(size);
1333 - data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
1334 - if (!data)
1335 - return -ENOMEM;
1337 - /* Copy entire thing */
1338 - if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
1339 - BUG();
1341 - /* Set up shinfo */
1342 - ninfo = (struct skb_shared_info*)(data + size);
1343 - atomic_set(&ninfo->dataref, 1);
1344 - ninfo->tso_size = skb_shinfo(skb)->tso_size;
1345 - ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
1346 - ninfo->ufo_size = skb_shinfo(skb)->ufo_size;
1347 - ninfo->nr_frags = 0;
1348 - ninfo->frag_list = NULL;
1350 - /* Offset between the two in bytes */
1351 - offset = data - skb->head;
1353 - /* Free old data. */
1354 - skb_release_data(skb);
1356 - skb->head = data;
1357 - skb->end = data + size;
1359 - /* Set up new pointers */
1360 - skb->h.raw += offset;
1361 - skb->nh.raw += offset;
1362 - skb->mac.raw += offset;
1363 - skb->tail += offset;
1364 - skb->data += offset;
1365 + struct dev_gso_cb *cb;
1367 - /* We are no longer a clone, even if we were. */
1368 - skb->cloned = 0;
1369 + do {
1370 + struct sk_buff *nskb = skb->next;
1372 - skb->tail += skb->data_len;
1373 - skb->data_len = 0;
1374 + skb->next = nskb->next;
1375 + nskb->next = NULL;
1376 + kfree_skb(nskb);
1377 + } while (skb->next);
1379 + cb = DEV_GSO_CB(skb);
1380 + if (cb->destructor)
1381 + cb->destructor(skb);
1382 +}
1384 +/**
1385 + * dev_gso_segment - Perform emulated hardware segmentation on skb.
1386 + * @skb: buffer to segment
1387 + *
1388 + * This function segments the given skb and stores the list of segments
1389 + * in skb->next.
1390 + */
1391 +static int dev_gso_segment(struct sk_buff *skb)
1392 +{
1393 + struct net_device *dev = skb->dev;
1394 + struct sk_buff *segs;
1395 + int features = dev->features & ~(illegal_highdma(dev, skb) ?
1396 + NETIF_F_SG : 0);
1398 + segs = skb_gso_segment(skb, features);
1400 + /* Verifying header integrity only. */
1401 + if (!segs)
1402 + return 0;
1404 + if (unlikely(IS_ERR(segs)))
1405 + return PTR_ERR(segs);
1407 + skb->next = segs;
1408 + DEV_GSO_CB(skb)->destructor = skb->destructor;
1409 + skb->destructor = dev_gso_skb_destructor;
1410 + return 0;
1411 +}
1413 +int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1414 +{
1415 + if (likely(!skb->next)) {
1416 + if (netdev_nit)
1417 + dev_queue_xmit_nit(skb, dev);
1419 + if (netif_needs_gso(dev, skb)) {
1420 + if (unlikely(dev_gso_segment(skb)))
1421 + goto out_kfree_skb;
1422 + if (skb->next)
1423 + goto gso;
1424 + }
1426 + return dev->hard_start_xmit(skb, dev);
1427 + }
1429 +gso:
1430 + do {
1431 + struct sk_buff *nskb = skb->next;
1432 + int rc;
1434 + skb->next = nskb->next;
1435 + nskb->next = NULL;
1436 + rc = dev->hard_start_xmit(nskb, dev);
1437 + if (unlikely(rc)) {
1438 + nskb->next = skb->next;
1439 + skb->next = nskb;
1440 + return rc;
1441 + }
1442 + if (unlikely(netif_queue_stopped(dev) && skb->next))
1443 + return NETDEV_TX_BUSY;
1444 + } while (skb->next);
1446 + skb->destructor = DEV_GSO_CB(skb)->destructor;
1448 +out_kfree_skb:
1449 + kfree_skb(skb);
1450 return 0;
1453 #define HARD_TX_LOCK(dev, cpu) { \
1454 if ((dev->features & NETIF_F_LLTX) == 0) { \
1455 - spin_lock(&dev->xmit_lock); \
1456 - dev->xmit_lock_owner = cpu; \
1457 + netif_tx_lock(dev); \
1458 } \
1461 #define HARD_TX_UNLOCK(dev) { \
1462 if ((dev->features & NETIF_F_LLTX) == 0) { \
1463 - dev->xmit_lock_owner = -1; \
1464 - spin_unlock(&dev->xmit_lock); \
1465 + netif_tx_unlock(dev); \
1466 } \
1469 @@ -1247,9 +1318,13 @@
1470 struct Qdisc *q;
1471 int rc = -ENOMEM;
1473 + /* GSO will handle the following emulations directly. */
1474 + if (netif_needs_gso(dev, skb))
1475 + goto gso;
1477 if (skb_shinfo(skb)->frag_list &&
1478 !(dev->features & NETIF_F_FRAGLIST) &&
1479 - __skb_linearize(skb, GFP_ATOMIC))
1480 + __skb_linearize(skb))
1481 goto out_kfree_skb;
1483 /* Fragmented skb is linearized if device does not support SG,
1484 @@ -1258,25 +1333,26 @@
1485 */
1486 if (skb_shinfo(skb)->nr_frags &&
1487 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1488 - __skb_linearize(skb, GFP_ATOMIC))
1489 + __skb_linearize(skb))
1490 goto out_kfree_skb;
1492 /* If packet is not checksummed and device does not support
1493 * checksumming for this protocol, complete checksumming here.
1494 */
1495 if (skb->ip_summed == CHECKSUM_HW &&
1496 - (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
1497 + (!(dev->features & NETIF_F_GEN_CSUM) &&
1498 (!(dev->features & NETIF_F_IP_CSUM) ||
1499 skb->protocol != htons(ETH_P_IP))))
1500 if (skb_checksum_help(skb, 0))
1501 goto out_kfree_skb;
1503 +gso:
1504 spin_lock_prefetch(&dev->queue_lock);
1506 /* Disable soft irqs for various locks below. Also
1507 * stops preemption for RCU.
1508 */
1509 - local_bh_disable();
1510 + rcu_read_lock_bh();
1512 /* Updates of qdisc are serialized by queue_lock.
1513 * The struct Qdisc which is pointed to by qdisc is now a
1514 @@ -1310,8 +1386,8 @@
1515 /* The device has no queue. Common case for software devices:
1516 loopback, all the sorts of tunnels...
1518 - Really, it is unlikely that xmit_lock protection is necessary here.
1519 - (f.e. loopback and IP tunnels are clean ignoring statistics
1520 + Really, it is unlikely that netif_tx_lock protection is necessary
1521 + here. (f.e. loopback and IP tunnels are clean ignoring statistics
1522 counters.)
1523 However, it is possible, that they rely on protection
1524 made by us here.
1525 @@ -1327,11 +1403,8 @@
1526 HARD_TX_LOCK(dev, cpu);
1528 if (!netif_queue_stopped(dev)) {
1529 - if (netdev_nit)
1530 - dev_queue_xmit_nit(skb, dev);
1532 rc = 0;
1533 - if (!dev->hard_start_xmit(skb, dev)) {
1534 + if (!dev_hard_start_xmit(skb, dev)) {
1535 HARD_TX_UNLOCK(dev);
1536 goto out;
1538 @@ -1350,13 +1423,13 @@
1541 rc = -ENETDOWN;
1542 - local_bh_enable();
1543 + rcu_read_unlock_bh();
1545 out_kfree_skb:
1546 kfree_skb(skb);
1547 return rc;
1548 out:
1549 - local_bh_enable();
1550 + rcu_read_unlock_bh();
1551 return rc;
1554 @@ -2671,7 +2744,7 @@
1555 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
1557 spin_lock_init(&dev->queue_lock);
1558 - spin_lock_init(&dev->xmit_lock);
1559 + spin_lock_init(&dev->_xmit_lock);
1560 dev->xmit_lock_owner = -1;
1561 #ifdef CONFIG_NET_CLS_ACT
1562 spin_lock_init(&dev->ingress_lock);
1563 @@ -2715,9 +2788,7 @@
1565 /* Fix illegal SG+CSUM combinations. */
1566 if ((dev->features & NETIF_F_SG) &&
1567 - !(dev->features & (NETIF_F_IP_CSUM |
1568 - NETIF_F_NO_CSUM |
1569 - NETIF_F_HW_CSUM))) {
1570 + !(dev->features & NETIF_F_ALL_CSUM)) {
1571 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
1572 dev->name);
1573 dev->features &= ~NETIF_F_SG;
1574 @@ -3269,7 +3340,6 @@
1575 EXPORT_SYMBOL(__dev_get_by_index);
1576 EXPORT_SYMBOL(__dev_get_by_name);
1577 EXPORT_SYMBOL(__dev_remove_pack);
1578 -EXPORT_SYMBOL(__skb_linearize);
1579 EXPORT_SYMBOL(dev_valid_name);
1580 EXPORT_SYMBOL(dev_add_pack);
1581 EXPORT_SYMBOL(dev_alloc_name);
1582 Index: tmp-xxx/net/core/dev_mcast.c
1583 ===================================================================
1584 --- tmp-xxx.orig/net/core/dev_mcast.c 2006-11-15 10:38:39.000000000 +0000
1585 +++ tmp-xxx/net/core/dev_mcast.c 2006-11-27 10:52:42.000000000 +0000
1586 @@ -62,7 +62,7 @@
1587 * Device mc lists are changed by bh at least if IPv6 is enabled,
1588 * so that it must be bh protected.
1590 - * We block accesses to device mc filters with dev->xmit_lock.
1591 + * We block accesses to device mc filters with netif_tx_lock.
1592 */
1594 /*
1595 @@ -93,9 +93,9 @@
1597 void dev_mc_upload(struct net_device *dev)
1599 - spin_lock_bh(&dev->xmit_lock);
1600 + netif_tx_lock_bh(dev);
1601 __dev_mc_upload(dev);
1602 - spin_unlock_bh(&dev->xmit_lock);
1603 + netif_tx_unlock_bh(dev);
1606 /*
1607 @@ -107,7 +107,7 @@
1608 int err = 0;
1609 struct dev_mc_list *dmi, **dmip;
1611 - spin_lock_bh(&dev->xmit_lock);
1612 + netif_tx_lock_bh(dev);
1614 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
1615 /*
1616 @@ -139,13 +139,13 @@
1617 */
1618 __dev_mc_upload(dev);
1620 - spin_unlock_bh(&dev->xmit_lock);
1621 + netif_tx_unlock_bh(dev);
1622 return 0;
1625 err = -ENOENT;
1626 done:
1627 - spin_unlock_bh(&dev->xmit_lock);
1628 + netif_tx_unlock_bh(dev);
1629 return err;
1632 @@ -160,7 +160,7 @@
1634 dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
1636 - spin_lock_bh(&dev->xmit_lock);
1637 + netif_tx_lock_bh(dev);
1638 for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
1639 if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
1640 dmi->dmi_addrlen == alen) {
1641 @@ -176,7 +176,7 @@
1644 if ((dmi = dmi1) == NULL) {
1645 - spin_unlock_bh(&dev->xmit_lock);
1646 + netif_tx_unlock_bh(dev);
1647 return -ENOMEM;
1649 memcpy(dmi->dmi_addr, addr, alen);
1650 @@ -189,11 +189,11 @@
1652 __dev_mc_upload(dev);
1654 - spin_unlock_bh(&dev->xmit_lock);
1655 + netif_tx_unlock_bh(dev);
1656 return 0;
1658 done:
1659 - spin_unlock_bh(&dev->xmit_lock);
1660 + netif_tx_unlock_bh(dev);
1661 kfree(dmi1);
1662 return err;
1664 @@ -204,7 +204,7 @@
1666 void dev_mc_discard(struct net_device *dev)
1668 - spin_lock_bh(&dev->xmit_lock);
1669 + netif_tx_lock_bh(dev);
1671 while (dev->mc_list != NULL) {
1672 struct dev_mc_list *tmp = dev->mc_list;
1673 @@ -215,7 +215,7 @@
1675 dev->mc_count = 0;
1677 - spin_unlock_bh(&dev->xmit_lock);
1678 + netif_tx_unlock_bh(dev);
1681 #ifdef CONFIG_PROC_FS
1682 @@ -250,7 +250,7 @@
1683 struct dev_mc_list *m;
1684 struct net_device *dev = v;
1686 - spin_lock_bh(&dev->xmit_lock);
1687 + netif_tx_lock_bh(dev);
1688 for (m = dev->mc_list; m; m = m->next) {
1689 int i;
1691 @@ -262,7 +262,7 @@
1693 seq_putc(seq, '\n');
1695 - spin_unlock_bh(&dev->xmit_lock);
1696 + netif_tx_unlock_bh(dev);
1697 return 0;
1700 Index: tmp-xxx/net/core/ethtool.c
1701 ===================================================================
1702 --- tmp-xxx.orig/net/core/ethtool.c 2006-11-15 10:38:39.000000000 +0000
1703 +++ tmp-xxx/net/core/ethtool.c 2006-11-27 10:52:42.000000000 +0000
1704 @@ -30,7 +30,7 @@
1706 u32 ethtool_op_get_tx_csum(struct net_device *dev)
1708 - return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0;
1709 + return (dev->features & NETIF_F_ALL_CSUM) != 0;
1712 int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1713 @@ -551,9 +551,7 @@
1714 return -EFAULT;
1716 if (edata.data &&
1717 - !(dev->features & (NETIF_F_IP_CSUM |
1718 - NETIF_F_NO_CSUM |
1719 - NETIF_F_HW_CSUM)))
1720 + !(dev->features & NETIF_F_ALL_CSUM))
1721 return -EINVAL;
1723 return __ethtool_set_sg(dev, edata.data);
1724 @@ -561,7 +559,7 @@
1726 static int ethtool_get_tso(struct net_device *dev, char __user *useraddr)
1728 - struct ethtool_value edata = { ETHTOOL_GTSO };
1729 + struct ethtool_value edata = { ETHTOOL_GUFO };
1731 if (!dev->ethtool_ops->get_tso)
1732 return -EOPNOTSUPP;
1733 @@ -616,6 +614,29 @@
1734 return dev->ethtool_ops->set_ufo(dev, edata.data);
1737 +static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
1738 +{
1739 + struct ethtool_value edata = { ETHTOOL_GGSO };
1741 + edata.data = dev->features & NETIF_F_GSO;
1742 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
1743 + return -EFAULT;
1744 + return 0;
1745 +}
1747 +static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
1748 +{
1749 + struct ethtool_value edata;
1751 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
1752 + return -EFAULT;
1753 + if (edata.data)
1754 + dev->features |= NETIF_F_GSO;
1755 + else
1756 + dev->features &= ~NETIF_F_GSO;
1757 + return 0;
1758 +}
1760 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
1762 struct ethtool_test test;
1763 @@ -907,6 +928,12 @@
1764 case ETHTOOL_SUFO:
1765 rc = ethtool_set_ufo(dev, useraddr);
1766 break;
1767 + case ETHTOOL_GGSO:
1768 + rc = ethtool_get_gso(dev, useraddr);
1769 + break;
1770 + case ETHTOOL_SGSO:
1771 + rc = ethtool_set_gso(dev, useraddr);
1772 + break;
1773 default:
1774 rc = -EOPNOTSUPP;
1776 Index: tmp-xxx/net/core/netpoll.c
1777 ===================================================================
1778 --- tmp-xxx.orig/net/core/netpoll.c 2006-11-15 10:38:39.000000000 +0000
1779 +++ tmp-xxx/net/core/netpoll.c 2006-11-27 10:52:42.000000000 +0000
1780 @@ -273,24 +273,21 @@
1782 do {
1783 npinfo->tries--;
1784 - spin_lock(&np->dev->xmit_lock);
1785 - np->dev->xmit_lock_owner = smp_processor_id();
1786 + netif_tx_lock(np->dev);
1788 /*
1789 * network drivers do not expect to be called if the queue is
1790 * stopped.
1791 */
1792 if (netif_queue_stopped(np->dev)) {
1793 - np->dev->xmit_lock_owner = -1;
1794 - spin_unlock(&np->dev->xmit_lock);
1795 + netif_tx_unlock(np->dev);
1796 netpoll_poll(np);
1797 udelay(50);
1798 continue;
1801 status = np->dev->hard_start_xmit(skb, np->dev);
1802 - np->dev->xmit_lock_owner = -1;
1803 - spin_unlock(&np->dev->xmit_lock);
1804 + netif_tx_unlock(np->dev);
1806 /* success */
1807 if(!status) {
1808 Index: tmp-xxx/net/core/pktgen.c
1809 ===================================================================
1810 --- tmp-xxx.orig/net/core/pktgen.c 2006-11-15 10:38:39.000000000 +0000
1811 +++ tmp-xxx/net/core/pktgen.c 2006-11-27 10:52:42.000000000 +0000
1812 @@ -2586,7 +2586,7 @@
1816 - spin_lock_bh(&odev->xmit_lock);
1817 + netif_tx_lock_bh(odev);
1818 if (!netif_queue_stopped(odev)) {
1820 atomic_inc(&(pkt_dev->skb->users));
1821 @@ -2631,7 +2631,7 @@
1822 pkt_dev->next_tx_ns = 0;
1825 - spin_unlock_bh(&odev->xmit_lock);
1826 + netif_tx_unlock_bh(odev);
1828 /* If pkt_dev->count is zero, then run forever */
1829 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
1830 Index: tmp-xxx/net/core/skbuff.c
1831 ===================================================================
1832 --- tmp-xxx.orig/net/core/skbuff.c 2006-11-15 10:38:39.000000000 +0000
1833 +++ tmp-xxx/net/core/skbuff.c 2006-11-27 10:58:31.000000000 +0000
1834 @@ -164,9 +164,9 @@
1835 shinfo = skb_shinfo(skb);
1836 atomic_set(&shinfo->dataref, 1);
1837 shinfo->nr_frags = 0;
1838 - shinfo->tso_size = 0;
1839 - shinfo->tso_segs = 0;
1840 - shinfo->ufo_size = 0;
1841 + shinfo->gso_size = 0;
1842 + shinfo->gso_segs = 0;
1843 + shinfo->gso_type = 0;
1844 shinfo->ip6_frag_id = 0;
1845 shinfo->frag_list = NULL;
1847 @@ -230,9 +230,9 @@
1849 atomic_set(&(skb_shinfo(skb)->dataref), 1);
1850 skb_shinfo(skb)->nr_frags = 0;
1851 - skb_shinfo(skb)->tso_size = 0;
1852 - skb_shinfo(skb)->tso_segs = 0;
1853 - skb_shinfo(skb)->ufo_size = 0;
1854 + skb_shinfo(skb)->gso_size = 0;
1855 + skb_shinfo(skb)->gso_segs = 0;
1856 + skb_shinfo(skb)->gso_type = 0;
1857 skb_shinfo(skb)->frag_list = NULL;
1858 out:
1859 return skb;
1860 @@ -507,9 +507,9 @@
1861 new->tc_index = old->tc_index;
1862 #endif
1863 atomic_set(&new->users, 1);
1864 - skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
1865 - skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
1866 - skb_shinfo(new)->ufo_size = skb_shinfo(old)->ufo_size;
1867 + skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1868 + skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1869 + skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1872 /**
1873 @@ -1822,6 +1822,133 @@
1874 return 0;
1877 +/**
1878 + * skb_segment - Perform protocol segmentation on skb.
1879 + * @skb: buffer to segment
1880 + * @features: features for the output path (see dev->features)
1881 + *
1882 + * This function performs segmentation on the given skb. It returns
1883 + * the segment at the given position. It returns NULL if there are
1884 + * no more segments to generate, or when an error is encountered.
1885 + */
1886 +struct sk_buff *skb_segment(struct sk_buff *skb, int features)
1887 +{
1888 + struct sk_buff *segs = NULL;
1889 + struct sk_buff *tail = NULL;
1890 + unsigned int mss = skb_shinfo(skb)->gso_size;
1891 + unsigned int doffset = skb->data - skb->mac.raw;
1892 + unsigned int offset = doffset;
1893 + unsigned int headroom;
1894 + unsigned int len;
1895 + int sg = features & NETIF_F_SG;
1896 + int nfrags = skb_shinfo(skb)->nr_frags;
1897 + int err = -ENOMEM;
1898 + int i = 0;
1899 + int pos;
1901 + __skb_push(skb, doffset);
1902 + headroom = skb_headroom(skb);
1903 + pos = skb_headlen(skb);
1905 + do {
1906 + struct sk_buff *nskb;
1907 + skb_frag_t *frag;
1908 + int hsize, nsize;
1909 + int k;
1910 + int size;
1912 + len = skb->len - offset;
1913 + if (len > mss)
1914 + len = mss;
1916 + hsize = skb_headlen(skb) - offset;
1917 + if (hsize < 0)
1918 + hsize = 0;
1919 + nsize = hsize + doffset;
1920 + if (nsize > len + doffset || !sg)
1921 + nsize = len + doffset;
1923 + nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
1924 + if (unlikely(!nskb))
1925 + goto err;
1927 + if (segs)
1928 + tail->next = nskb;
1929 + else
1930 + segs = nskb;
1931 + tail = nskb;
1933 + nskb->dev = skb->dev;
1934 + nskb->priority = skb->priority;
1935 + nskb->protocol = skb->protocol;
1936 + nskb->dst = dst_clone(skb->dst);
1937 + memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
1938 + nskb->pkt_type = skb->pkt_type;
1939 + nskb->mac_len = skb->mac_len;
1941 + skb_reserve(nskb, headroom);
1942 + nskb->mac.raw = nskb->data;
1943 + nskb->nh.raw = nskb->data + skb->mac_len;
1944 + nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
1945 + memcpy(skb_put(nskb, doffset), skb->data, doffset);
1947 + if (!sg) {
1948 + nskb->csum = skb_copy_and_csum_bits(skb, offset,
1949 + skb_put(nskb, len),
1950 + len, 0);
1951 + continue;
1952 + }
1954 + frag = skb_shinfo(nskb)->frags;
1955 + k = 0;
1957 + nskb->ip_summed = CHECKSUM_HW;
1958 + nskb->csum = skb->csum;
1959 + memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
1961 + while (pos < offset + len) {
1962 + BUG_ON(i >= nfrags);
1964 + *frag = skb_shinfo(skb)->frags[i];
1965 + get_page(frag->page);
1966 + size = frag->size;
1968 + if (pos < offset) {
1969 + frag->page_offset += offset - pos;
1970 + frag->size -= offset - pos;
1971 + }
1973 + k++;
1975 + if (pos + size <= offset + len) {
1976 + i++;
1977 + pos += size;
1978 + } else {
1979 + frag->size -= pos + size - (offset + len);
1980 + break;
1981 + }
1983 + frag++;
1984 + }
1986 + skb_shinfo(nskb)->nr_frags = k;
1987 + nskb->data_len = len - hsize;
1988 + nskb->len += nskb->data_len;
1989 + nskb->truesize += nskb->data_len;
1990 + } while ((offset += len) < skb->len);
1992 + return segs;
1994 +err:
1995 + while ((skb = segs)) {
1996 + segs = skb->next;
1997 + kfree(skb);
1998 + }
1999 + return ERR_PTR(err);
2000 +}
2002 +EXPORT_SYMBOL_GPL(skb_segment);
2004 void __init skb_init(void)
2006 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2007 Index: tmp-xxx/net/decnet/dn_nsp_in.c
2008 ===================================================================
2009 --- tmp-xxx.orig/net/decnet/dn_nsp_in.c 2006-11-15 10:38:39.000000000 +0000
2010 +++ tmp-xxx/net/decnet/dn_nsp_in.c 2006-11-27 10:52:42.000000000 +0000
2011 @@ -801,8 +801,7 @@
2012 * We linearize everything except data segments here.
2013 */
2014 if (cb->nsp_flags & ~0x60) {
2015 - if (unlikely(skb_is_nonlinear(skb)) &&
2016 - skb_linearize(skb, GFP_ATOMIC) != 0)
2017 + if (unlikely(skb_linearize(skb)))
2018 goto free_out;
2021 Index: tmp-xxx/net/decnet/dn_route.c
2022 ===================================================================
2023 --- tmp-xxx.orig/net/decnet/dn_route.c 2006-11-15 10:38:39.000000000 +0000
2024 +++ tmp-xxx/net/decnet/dn_route.c 2006-11-27 10:52:42.000000000 +0000
2025 @@ -629,8 +629,7 @@
2026 padlen);
2028 if (flags & DN_RT_PKT_CNTL) {
2029 - if (unlikely(skb_is_nonlinear(skb)) &&
2030 - skb_linearize(skb, GFP_ATOMIC) != 0)
2031 + if (unlikely(skb_linearize(skb)))
2032 goto dump_it;
2034 switch(flags & DN_RT_CNTL_MSK) {
2035 Index: tmp-xxx/net/ipv4/af_inet.c
2036 ===================================================================
2037 --- tmp-xxx.orig/net/ipv4/af_inet.c 2006-11-15 10:38:39.000000000 +0000
2038 +++ tmp-xxx/net/ipv4/af_inet.c 2006-11-27 10:52:42.000000000 +0000
2039 @@ -68,6 +68,7 @@
2040 */
2042 #include <linux/config.h>
2043 +#include <linux/err.h>
2044 #include <linux/errno.h>
2045 #include <linux/types.h>
2046 #include <linux/socket.h>
2047 @@ -1084,6 +1085,54 @@
2049 EXPORT_SYMBOL(inet_sk_rebuild_header);
2051 +static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
2052 +{
2053 + struct sk_buff *segs = ERR_PTR(-EINVAL);
2054 + struct iphdr *iph;
2055 + struct net_protocol *ops;
2056 + int proto;
2057 + int ihl;
2058 + int id;
2060 + if (!pskb_may_pull(skb, sizeof(*iph)))
2061 + goto out;
2063 + iph = skb->nh.iph;
2064 + ihl = iph->ihl * 4;
2065 + if (ihl < sizeof(*iph))
2066 + goto out;
2068 + if (!pskb_may_pull(skb, ihl))
2069 + goto out;
2071 + skb->h.raw = __skb_pull(skb, ihl);
2072 + iph = skb->nh.iph;
2073 + id = ntohs(iph->id);
2074 + proto = iph->protocol & (MAX_INET_PROTOS - 1);
2075 + segs = ERR_PTR(-EPROTONOSUPPORT);
2077 + rcu_read_lock();
2078 + ops = rcu_dereference(inet_protos[proto]);
2079 + if (ops && ops->gso_segment)
2080 + segs = ops->gso_segment(skb, features);
2081 + rcu_read_unlock();
2083 + if (!segs || unlikely(IS_ERR(segs)))
2084 + goto out;
2086 + skb = segs;
2087 + do {
2088 + iph = skb->nh.iph;
2089 + iph->id = htons(id++);
2090 + iph->tot_len = htons(skb->len - skb->mac_len);
2091 + iph->check = 0;
2092 + iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
2093 + } while ((skb = skb->next));
2095 +out:
2096 + return segs;
2097 +}
2099 #ifdef CONFIG_IP_MULTICAST
2100 static struct net_protocol igmp_protocol = {
2101 .handler = igmp_rcv,
2102 @@ -1093,6 +1142,7 @@
2103 static struct net_protocol tcp_protocol = {
2104 .handler = tcp_v4_rcv,
2105 .err_handler = tcp_v4_err,
2106 + .gso_segment = tcp_tso_segment,
2107 .no_policy = 1,
2108 };
2110 @@ -1138,6 +1188,7 @@
2111 static struct packet_type ip_packet_type = {
2112 .type = __constant_htons(ETH_P_IP),
2113 .func = ip_rcv,
2114 + .gso_segment = inet_gso_segment,
2115 };
2117 static int __init inet_init(void)
2118 Index: tmp-xxx/net/ipv4/ip_output.c
2119 ===================================================================
2120 --- tmp-xxx.orig/net/ipv4/ip_output.c 2006-11-15 10:38:39.000000000 +0000
2121 +++ tmp-xxx/net/ipv4/ip_output.c 2006-11-27 10:52:42.000000000 +0000
2122 @@ -210,8 +210,7 @@
2123 return dst_output(skb);
2125 #endif
2126 - if (skb->len > dst_mtu(skb->dst) &&
2127 - !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
2128 + if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
2129 return ip_fragment(skb, ip_finish_output2);
2130 else
2131 return ip_finish_output2(skb);
2132 @@ -362,7 +361,7 @@
2135 ip_select_ident_more(iph, &rt->u.dst, sk,
2136 - (skb_shinfo(skb)->tso_segs ?: 1) - 1);
2137 + (skb_shinfo(skb)->gso_segs ?: 1) - 1);
2139 /* Add an IP checksum. */
2140 ip_send_check(iph);
2141 @@ -743,7 +742,8 @@
2142 (length - transhdrlen));
2143 if (!err) {
2144 /* specify the length of each IP datagram fragment*/
2145 - skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
2146 + skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
2147 + skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
2148 __skb_queue_tail(&sk->sk_write_queue, skb);
2150 return 0;
2151 @@ -839,7 +839,7 @@
2152 */
2153 if (transhdrlen &&
2154 length + fragheaderlen <= mtu &&
2155 - rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
2156 + rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
2157 !exthdrlen)
2158 csummode = CHECKSUM_HW;
2160 @@ -1086,14 +1086,16 @@
2162 inet->cork.length += size;
2163 if ((sk->sk_protocol == IPPROTO_UDP) &&
2164 - (rt->u.dst.dev->features & NETIF_F_UFO))
2165 - skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
2166 + (rt->u.dst.dev->features & NETIF_F_UFO)) {
2167 + skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
2168 + skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
2169 + }
2172 while (size > 0) {
2173 int i;
2175 - if (skb_shinfo(skb)->ufo_size)
2176 + if (skb_shinfo(skb)->gso_size)
2177 len = size;
2178 else {
2180 Index: tmp-xxx/net/ipv4/ipcomp.c
2181 ===================================================================
2182 --- tmp-xxx.orig/net/ipv4/ipcomp.c 2006-11-15 10:38:39.000000000 +0000
2183 +++ tmp-xxx/net/ipv4/ipcomp.c 2006-11-27 10:52:42.000000000 +0000
2184 @@ -84,7 +84,7 @@
2185 struct xfrm_decap_state *decap, struct sk_buff *skb)
2187 u8 nexthdr;
2188 - int err = 0;
2189 + int err = -ENOMEM;
2190 struct iphdr *iph;
2191 union {
2192 struct iphdr iph;
2193 @@ -92,11 +92,8 @@
2194 } tmp_iph;
2197 - if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
2198 - skb_linearize(skb, GFP_ATOMIC) != 0) {
2199 - err = -ENOMEM;
2200 + if (skb_linearize_cow(skb))
2201 goto out;
2202 - }
2204 skb->ip_summed = CHECKSUM_NONE;
2206 @@ -171,10 +168,8 @@
2207 goto out_ok;
2210 - if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
2211 - skb_linearize(skb, GFP_ATOMIC) != 0) {
2212 + if (skb_linearize_cow(skb))
2213 goto out_ok;
2214 - }
2216 err = ipcomp_compress(x, skb);
2217 iph = skb->nh.iph;
2218 Index: tmp-xxx/net/ipv4/tcp.c
2219 ===================================================================
2220 --- tmp-xxx.orig/net/ipv4/tcp.c 2006-11-15 10:38:39.000000000 +0000
2221 +++ tmp-xxx/net/ipv4/tcp.c 2006-11-27 10:52:42.000000000 +0000
2222 @@ -257,6 +257,7 @@
2223 #include <linux/fs.h>
2224 #include <linux/random.h>
2225 #include <linux/bootmem.h>
2226 +#include <linux/err.h>
2228 #include <net/icmp.h>
2229 #include <net/tcp.h>
2230 @@ -570,7 +571,7 @@
2231 skb->ip_summed = CHECKSUM_HW;
2232 tp->write_seq += copy;
2233 TCP_SKB_CB(skb)->end_seq += copy;
2234 - skb_shinfo(skb)->tso_segs = 0;
2235 + skb_shinfo(skb)->gso_segs = 0;
2237 if (!copied)
2238 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
2239 @@ -621,14 +622,10 @@
2240 ssize_t res;
2241 struct sock *sk = sock->sk;
2243 -#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
2245 if (!(sk->sk_route_caps & NETIF_F_SG) ||
2246 - !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
2247 + !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
2248 return sock_no_sendpage(sock, page, offset, size, flags);
2250 -#undef TCP_ZC_CSUM_FLAGS
2252 lock_sock(sk);
2253 TCP_CHECK_TIMER(sk);
2254 res = do_tcp_sendpages(sk, &page, offset, size, flags);
2255 @@ -725,9 +722,7 @@
2256 /*
2257 * Check whether we can use HW checksum.
2258 */
2259 - if (sk->sk_route_caps &
2260 - (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
2261 - NETIF_F_HW_CSUM))
2262 + if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
2263 skb->ip_summed = CHECKSUM_HW;
2265 skb_entail(sk, tp, skb);
2266 @@ -823,7 +818,7 @@
2268 tp->write_seq += copy;
2269 TCP_SKB_CB(skb)->end_seq += copy;
2270 - skb_shinfo(skb)->tso_segs = 0;
2271 + skb_shinfo(skb)->gso_segs = 0;
2273 from += copy;
2274 copied += copy;
2275 @@ -2026,6 +2021,71 @@
2279 +struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2280 +{
2281 + struct sk_buff *segs = ERR_PTR(-EINVAL);
2282 + struct tcphdr *th;
2283 + unsigned thlen;
2284 + unsigned int seq;
2285 + unsigned int delta;
2286 + unsigned int oldlen;
2287 + unsigned int len;
2289 + if (!pskb_may_pull(skb, sizeof(*th)))
2290 + goto out;
2292 + th = skb->h.th;
2293 + thlen = th->doff * 4;
2294 + if (thlen < sizeof(*th))
2295 + goto out;
2297 + if (!pskb_may_pull(skb, thlen))
2298 + goto out;
2300 + segs = NULL;
2301 + if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
2302 + goto out;
2304 + oldlen = (u16)~skb->len;
2305 + __skb_pull(skb, thlen);
2307 + segs = skb_segment(skb, features);
2308 + if (IS_ERR(segs))
2309 + goto out;
2311 + len = skb_shinfo(skb)->gso_size;
2312 + delta = htonl(oldlen + (thlen + len));
2314 + skb = segs;
2315 + th = skb->h.th;
2316 + seq = ntohl(th->seq);
2318 + do {
2319 + th->fin = th->psh = 0;
2321 + th->check = ~csum_fold(th->check + delta);
2322 + if (skb->ip_summed != CHECKSUM_HW)
2323 + th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2324 + skb->csum));
2326 + seq += len;
2327 + skb = skb->next;
2328 + th = skb->h.th;
2330 + th->seq = htonl(seq);
2331 + th->cwr = 0;
2332 + } while (skb->next);
2334 + delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
2335 + th->check = ~csum_fold(th->check + delta);
2336 + if (skb->ip_summed != CHECKSUM_HW)
2337 + th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2338 + skb->csum));
2340 +out:
2341 + return segs;
2342 +}
2344 extern void __skb_cb_too_small_for_tcp(int, int);
2345 extern struct tcp_congestion_ops tcp_reno;
2347 Index: tmp-xxx/net/ipv4/tcp_input.c
2348 ===================================================================
2349 --- tmp-xxx.orig/net/ipv4/tcp_input.c 2006-11-15 10:38:39.000000000 +0000
2350 +++ tmp-xxx/net/ipv4/tcp_input.c 2006-11-27 10:52:42.000000000 +0000
2351 @@ -1072,7 +1072,7 @@
2352 else
2353 pkt_len = (end_seq -
2354 TCP_SKB_CB(skb)->seq);
2355 - if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
2356 + if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
2357 break;
2358 pcount = tcp_skb_pcount(skb);
2360 Index: tmp-xxx/net/ipv4/tcp_output.c
2361 ===================================================================
2362 --- tmp-xxx.orig/net/ipv4/tcp_output.c 2006-11-15 10:38:39.000000000 +0000
2363 +++ tmp-xxx/net/ipv4/tcp_output.c 2006-11-27 10:52:42.000000000 +0000
2364 @@ -497,15 +497,17 @@
2365 /* Avoid the costly divide in the normal
2366 * non-TSO case.
2367 */
2368 - skb_shinfo(skb)->tso_segs = 1;
2369 - skb_shinfo(skb)->tso_size = 0;
2370 + skb_shinfo(skb)->gso_segs = 1;
2371 + skb_shinfo(skb)->gso_size = 0;
2372 + skb_shinfo(skb)->gso_type = 0;
2373 } else {
2374 unsigned int factor;
2376 factor = skb->len + (mss_now - 1);
2377 factor /= mss_now;
2378 - skb_shinfo(skb)->tso_segs = factor;
2379 - skb_shinfo(skb)->tso_size = mss_now;
2380 + skb_shinfo(skb)->gso_segs = factor;
2381 + skb_shinfo(skb)->gso_size = mss_now;
2382 + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2386 @@ -850,7 +852,7 @@
2388 if (!tso_segs ||
2389 (tso_segs > 1 &&
2390 - skb_shinfo(skb)->tso_size != mss_now)) {
2391 + tcp_skb_mss(skb) != mss_now)) {
2392 tcp_set_skb_tso_segs(sk, skb, mss_now);
2393 tso_segs = tcp_skb_pcount(skb);
2395 @@ -1510,8 +1512,9 @@
2396 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
2397 if (!pskb_trim(skb, 0)) {
2398 TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
2399 - skb_shinfo(skb)->tso_segs = 1;
2400 - skb_shinfo(skb)->tso_size = 0;
2401 + skb_shinfo(skb)->gso_segs = 1;
2402 + skb_shinfo(skb)->gso_size = 0;
2403 + skb_shinfo(skb)->gso_type = 0;
2404 skb->ip_summed = CHECKSUM_NONE;
2405 skb->csum = 0;
2407 @@ -1716,8 +1719,9 @@
2408 skb->csum = 0;
2409 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
2410 TCP_SKB_CB(skb)->sacked = 0;
2411 - skb_shinfo(skb)->tso_segs = 1;
2412 - skb_shinfo(skb)->tso_size = 0;
2413 + skb_shinfo(skb)->gso_segs = 1;
2414 + skb_shinfo(skb)->gso_size = 0;
2415 + skb_shinfo(skb)->gso_type = 0;
2417 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2418 TCP_SKB_CB(skb)->seq = tp->write_seq;
2419 @@ -1749,8 +1753,9 @@
2420 skb->csum = 0;
2421 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
2422 TCP_SKB_CB(skb)->sacked = 0;
2423 - skb_shinfo(skb)->tso_segs = 1;
2424 - skb_shinfo(skb)->tso_size = 0;
2425 + skb_shinfo(skb)->gso_segs = 1;
2426 + skb_shinfo(skb)->gso_size = 0;
2427 + skb_shinfo(skb)->gso_type = 0;
2429 /* Send it off. */
2430 TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
2431 @@ -1833,8 +1838,9 @@
2432 TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
2433 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2434 TCP_SKB_CB(skb)->sacked = 0;
2435 - skb_shinfo(skb)->tso_segs = 1;
2436 - skb_shinfo(skb)->tso_size = 0;
2437 + skb_shinfo(skb)->gso_segs = 1;
2438 + skb_shinfo(skb)->gso_size = 0;
2439 + skb_shinfo(skb)->gso_type = 0;
2440 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2441 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2442 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2443 @@ -1937,8 +1943,9 @@
2444 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
2445 TCP_ECN_send_syn(sk, tp, buff);
2446 TCP_SKB_CB(buff)->sacked = 0;
2447 - skb_shinfo(buff)->tso_segs = 1;
2448 - skb_shinfo(buff)->tso_size = 0;
2449 + skb_shinfo(buff)->gso_segs = 1;
2450 + skb_shinfo(buff)->gso_size = 0;
2451 + skb_shinfo(buff)->gso_type = 0;
2452 buff->csum = 0;
2453 TCP_SKB_CB(buff)->seq = tp->write_seq++;
2454 TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2455 @@ -2042,8 +2049,9 @@
2456 buff->csum = 0;
2457 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
2458 TCP_SKB_CB(buff)->sacked = 0;
2459 - skb_shinfo(buff)->tso_segs = 1;
2460 - skb_shinfo(buff)->tso_size = 0;
2461 + skb_shinfo(buff)->gso_segs = 1;
2462 + skb_shinfo(buff)->gso_size = 0;
2463 + skb_shinfo(buff)->gso_type = 0;
2465 /* Send it off, this clears delayed acks for us. */
2466 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
2467 @@ -2078,8 +2086,9 @@
2468 skb->csum = 0;
2469 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
2470 TCP_SKB_CB(skb)->sacked = urgent;
2471 - skb_shinfo(skb)->tso_segs = 1;
2472 - skb_shinfo(skb)->tso_size = 0;
2473 + skb_shinfo(skb)->gso_segs = 1;
2474 + skb_shinfo(skb)->gso_size = 0;
2475 + skb_shinfo(skb)->gso_type = 0;
2477 /* Use a previous sequence. This should cause the other
2478 * end to send an ack. Don't queue or clone SKB, just
2479 Index: tmp-xxx/net/ipv4/xfrm4_output.c
2480 ===================================================================
2481 --- tmp-xxx.orig/net/ipv4/xfrm4_output.c 2006-11-27 10:52:32.000000000 +0000
2482 +++ tmp-xxx/net/ipv4/xfrm4_output.c 2006-11-27 10:52:42.000000000 +0000
2483 @@ -9,6 +9,8 @@
2484 */
2486 #include <linux/compiler.h>
2487 +#include <linux/if_ether.h>
2488 +#include <linux/kernel.h>
2489 #include <linux/skbuff.h>
2490 #include <linux/spinlock.h>
2491 #include <linux/netfilter_ipv4.h>
2492 @@ -158,16 +160,10 @@
2493 goto out_exit;
2496 -static int xfrm4_output_finish(struct sk_buff *skb)
2497 +static int xfrm4_output_finish2(struct sk_buff *skb)
2499 int err;
2501 -#ifdef CONFIG_NETFILTER
2502 - if (!skb->dst->xfrm) {
2503 - IPCB(skb)->flags |= IPSKB_REROUTED;
2504 - return dst_output(skb);
2505 - }
2506 -#endif
2507 while (likely((err = xfrm4_output_one(skb)) == 0)) {
2508 nf_reset(skb);
2510 @@ -180,7 +176,7 @@
2511 return dst_output(skb);
2513 err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL,
2514 - skb->dst->dev, xfrm4_output_finish);
2515 + skb->dst->dev, xfrm4_output_finish2);
2516 if (unlikely(err != 1))
2517 break;
2519 @@ -188,6 +184,48 @@
2520 return err;
2523 +static int xfrm4_output_finish(struct sk_buff *skb)
2524 +{
2525 + struct sk_buff *segs;
2527 +#ifdef CONFIG_NETFILTER
2528 + if (!skb->dst->xfrm) {
2529 + IPCB(skb)->flags |= IPSKB_REROUTED;
2530 + return dst_output(skb);
2531 + }
2532 +#endif
2534 + if (!skb_shinfo(skb)->gso_size)
2535 + return xfrm4_output_finish2(skb);
2537 + skb->protocol = htons(ETH_P_IP);
2538 + segs = skb_gso_segment(skb, 0);
2539 + kfree_skb(skb);
2540 + if (unlikely(IS_ERR(segs)))
2541 + return PTR_ERR(segs);
2543 + do {
2544 + struct sk_buff *nskb = segs->next;
2545 + int err;
2547 + segs->next = NULL;
2548 + err = xfrm4_output_finish2(segs);
2550 + if (unlikely(err)) {
2551 + while ((segs = nskb)) {
2552 + nskb = segs->next;
2553 + segs->next = NULL;
2554 + kfree_skb(segs);
2555 + }
2556 + return err;
2557 + }
2559 + segs = nskb;
2560 + } while (segs);
2562 + return 0;
2563 +}
2565 int xfrm4_output(struct sk_buff *skb)
2567 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
2568 Index: tmp-xxx/net/ipv6/ip6_output.c
2569 ===================================================================
2570 --- tmp-xxx.orig/net/ipv6/ip6_output.c 2006-11-15 10:38:39.000000000 +0000
2571 +++ tmp-xxx/net/ipv6/ip6_output.c 2006-11-27 10:52:42.000000000 +0000
2572 @@ -147,7 +147,7 @@
2574 int ip6_output(struct sk_buff *skb)
2576 - if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) ||
2577 + if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
2578 dst_allfrag(skb->dst))
2579 return ip6_fragment(skb, ip6_output2);
2580 else
2581 @@ -829,8 +829,9 @@
2582 struct frag_hdr fhdr;
2584 /* specify the length of each IP datagram fragment*/
2585 - skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) -
2586 - sizeof(struct frag_hdr);
2587 + skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
2588 + sizeof(struct frag_hdr);
2589 + skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
2590 ipv6_select_ident(skb, &fhdr);
2591 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
2592 __skb_queue_tail(&sk->sk_write_queue, skb);
2593 Index: tmp-xxx/net/ipv6/ipcomp6.c
2594 ===================================================================
2595 --- tmp-xxx.orig/net/ipv6/ipcomp6.c 2006-11-15 10:38:39.000000000 +0000
2596 +++ tmp-xxx/net/ipv6/ipcomp6.c 2006-11-27 10:52:42.000000000 +0000
2597 @@ -64,7 +64,7 @@
2599 static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
2601 - int err = 0;
2602 + int err = -ENOMEM;
2603 u8 nexthdr = 0;
2604 int hdr_len = skb->h.raw - skb->nh.raw;
2605 unsigned char *tmp_hdr = NULL;
2606 @@ -75,11 +75,8 @@
2607 struct crypto_tfm *tfm;
2608 int cpu;
2610 - if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
2611 - skb_linearize(skb, GFP_ATOMIC) != 0) {
2612 - err = -ENOMEM;
2613 + if (skb_linearize_cow(skb))
2614 goto out;
2615 - }
2617 skb->ip_summed = CHECKSUM_NONE;
2619 @@ -158,10 +155,8 @@
2620 goto out_ok;
2623 - if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
2624 - skb_linearize(skb, GFP_ATOMIC) != 0) {
2625 + if (skb_linearize_cow(skb))
2626 goto out_ok;
2627 - }
2629 /* compression */
2630 plen = skb->len - hdr_len;
2631 Index: tmp-xxx/net/ipv6/xfrm6_output.c
2632 ===================================================================
2633 --- tmp-xxx.orig/net/ipv6/xfrm6_output.c 2006-11-15 10:38:39.000000000 +0000
2634 +++ tmp-xxx/net/ipv6/xfrm6_output.c 2006-11-27 10:52:42.000000000 +0000
2635 @@ -151,7 +151,7 @@
2636 goto out_exit;
2639 -static int xfrm6_output_finish(struct sk_buff *skb)
2640 +static int xfrm6_output_finish2(struct sk_buff *skb)
2642 int err;
2644 @@ -167,7 +167,7 @@
2645 return dst_output(skb);
2647 err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL,
2648 - skb->dst->dev, xfrm6_output_finish);
2649 + skb->dst->dev, xfrm6_output_finish2);
2650 if (unlikely(err != 1))
2651 break;
2653 @@ -175,6 +175,41 @@
2654 return err;
2657 +static int xfrm6_output_finish(struct sk_buff *skb)
2658 +{
2659 + struct sk_buff *segs;
2661 + if (!skb_shinfo(skb)->gso_size)
2662 + return xfrm6_output_finish2(skb);
2664 + skb->protocol = htons(ETH_P_IP);
2665 + segs = skb_gso_segment(skb, 0);
2666 + kfree_skb(skb);
2667 + if (unlikely(IS_ERR(segs)))
2668 + return PTR_ERR(segs);
2670 + do {
2671 + struct sk_buff *nskb = segs->next;
2672 + int err;
2674 + segs->next = NULL;
2675 + err = xfrm6_output_finish2(segs);
2677 + if (unlikely(err)) {
2678 + while ((segs = nskb)) {
2679 + nskb = segs->next;
2680 + segs->next = NULL;
2681 + kfree_skb(segs);
2682 + }
2683 + return err;
2684 + }
2686 + segs = nskb;
2687 + } while (segs);
2689 + return 0;
2690 +}
2692 int xfrm6_output(struct sk_buff *skb)
2694 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dst->dev,
2695 Index: tmp-xxx/net/sched/sch_generic.c
2696 ===================================================================
2697 --- tmp-xxx.orig/net/sched/sch_generic.c 2006-11-15 10:38:39.000000000 +0000
2698 +++ tmp-xxx/net/sched/sch_generic.c 2006-11-27 10:52:42.000000000 +0000
2699 @@ -72,9 +72,9 @@
2700 dev->queue_lock serializes queue accesses for this device
2701 AND dev->qdisc pointer itself.
2703 - dev->xmit_lock serializes accesses to device driver.
2704 + netif_tx_lock serializes accesses to device driver.
2706 - dev->queue_lock and dev->xmit_lock are mutually exclusive,
2707 + dev->queue_lock and netif_tx_lock are mutually exclusive,
2708 if one is grabbed, another must be free.
2709 */
2711 @@ -90,14 +90,17 @@
2712 NOTE: Called under dev->queue_lock with locally disabled BH.
2713 */
2715 -int qdisc_restart(struct net_device *dev)
2716 +static inline int qdisc_restart(struct net_device *dev)
2718 struct Qdisc *q = dev->qdisc;
2719 struct sk_buff *skb;
2721 /* Dequeue packet */
2722 - if ((skb = q->dequeue(q)) != NULL) {
2723 + if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
2724 unsigned nolock = (dev->features & NETIF_F_LLTX);
2726 + dev->gso_skb = NULL;
2728 /*
2729 * When the driver has LLTX set it does its own locking
2730 * in start_xmit. No need to add additional overhead by
2731 @@ -108,7 +111,7 @@
2732 * will be requeued.
2733 */
2734 if (!nolock) {
2735 - if (!spin_trylock(&dev->xmit_lock)) {
2736 + if (!netif_tx_trylock(dev)) {
2737 collision:
2738 /* So, someone grabbed the driver. */
2740 @@ -126,8 +129,6 @@
2741 __get_cpu_var(netdev_rx_stat).cpu_collision++;
2742 goto requeue;
2744 - /* Remember that the driver is grabbed by us. */
2745 - dev->xmit_lock_owner = smp_processor_id();
2749 @@ -136,14 +137,11 @@
2751 if (!netif_queue_stopped(dev)) {
2752 int ret;
2753 - if (netdev_nit)
2754 - dev_queue_xmit_nit(skb, dev);
2756 - ret = dev->hard_start_xmit(skb, dev);
2757 + ret = dev_hard_start_xmit(skb, dev);
2758 if (ret == NETDEV_TX_OK) {
2759 if (!nolock) {
2760 - dev->xmit_lock_owner = -1;
2761 - spin_unlock(&dev->xmit_lock);
2762 + netif_tx_unlock(dev);
2764 spin_lock(&dev->queue_lock);
2765 return -1;
2766 @@ -157,8 +155,7 @@
2767 /* NETDEV_TX_BUSY - we need to requeue */
2768 /* Release the driver */
2769 if (!nolock) {
2770 - dev->xmit_lock_owner = -1;
2771 - spin_unlock(&dev->xmit_lock);
2772 + netif_tx_unlock(dev);
2774 spin_lock(&dev->queue_lock);
2775 q = dev->qdisc;
2776 @@ -175,7 +172,10 @@
2777 */
2779 requeue:
2780 - q->ops->requeue(skb, q);
2781 + if (skb->next)
2782 + dev->gso_skb = skb;
2783 + else
2784 + q->ops->requeue(skb, q);
2785 netif_schedule(dev);
2786 return 1;
2788 @@ -183,11 +183,23 @@
2789 return q->q.qlen;
2792 +void __qdisc_run(struct net_device *dev)
2793 +{
2794 + if (unlikely(dev->qdisc == &noop_qdisc))
2795 + goto out;
2797 + while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
2798 + /* NOTHING */;
2800 +out:
2801 + clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
2802 +}
2804 static void dev_watchdog(unsigned long arg)
2806 struct net_device *dev = (struct net_device *)arg;
2808 - spin_lock(&dev->xmit_lock);
2809 + netif_tx_lock(dev);
2810 if (dev->qdisc != &noop_qdisc) {
2811 if (netif_device_present(dev) &&
2812 netif_running(dev) &&
2813 @@ -201,7 +213,7 @@
2814 dev_hold(dev);
2817 - spin_unlock(&dev->xmit_lock);
2818 + netif_tx_unlock(dev);
2820 dev_put(dev);
2822 @@ -225,17 +237,17 @@
2824 static void dev_watchdog_up(struct net_device *dev)
2826 - spin_lock_bh(&dev->xmit_lock);
2827 + netif_tx_lock_bh(dev);
2828 __netdev_watchdog_up(dev);
2829 - spin_unlock_bh(&dev->xmit_lock);
2830 + netif_tx_unlock_bh(dev);
2833 static void dev_watchdog_down(struct net_device *dev)
2835 - spin_lock_bh(&dev->xmit_lock);
2836 + netif_tx_lock_bh(dev);
2837 if (del_timer(&dev->watchdog_timer))
2838 __dev_put(dev);
2839 - spin_unlock_bh(&dev->xmit_lock);
2840 + netif_tx_unlock_bh(dev);
2843 void netif_carrier_on(struct net_device *dev)
2844 @@ -577,10 +589,17 @@
2846 dev_watchdog_down(dev);
2848 - while (test_bit(__LINK_STATE_SCHED, &dev->state))
2849 + /* Wait for outstanding dev_queue_xmit calls. */
2850 + synchronize_rcu();
2852 + /* Wait for outstanding qdisc_run calls. */
2853 + while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
2854 yield();
2856 - spin_unlock_wait(&dev->xmit_lock);
2857 + if (dev->gso_skb) {
2858 + kfree_skb(dev->gso_skb);
2859 + dev->gso_skb = NULL;
2860 + }
2863 void dev_init_scheduler(struct net_device *dev)
2864 @@ -622,6 +641,5 @@
2865 EXPORT_SYMBOL(qdisc_alloc);
2866 EXPORT_SYMBOL(qdisc_destroy);
2867 EXPORT_SYMBOL(qdisc_reset);
2868 -EXPORT_SYMBOL(qdisc_restart);
2869 EXPORT_SYMBOL(qdisc_lock_tree);
2870 EXPORT_SYMBOL(qdisc_unlock_tree);
2871 Index: tmp-xxx/net/sched/sch_teql.c
2872 ===================================================================
2873 --- tmp-xxx.orig/net/sched/sch_teql.c 2006-11-15 10:38:39.000000000 +0000
2874 +++ tmp-xxx/net/sched/sch_teql.c 2006-11-27 10:52:42.000000000 +0000
2875 @@ -302,20 +302,17 @@
2877 switch (teql_resolve(skb, skb_res, slave)) {
2878 case 0:
2879 - if (spin_trylock(&slave->xmit_lock)) {
2880 - slave->xmit_lock_owner = smp_processor_id();
2881 + if (netif_tx_trylock(slave)) {
2882 if (!netif_queue_stopped(slave) &&
2883 slave->hard_start_xmit(skb, slave) == 0) {
2884 - slave->xmit_lock_owner = -1;
2885 - spin_unlock(&slave->xmit_lock);
2886 + netif_tx_unlock(slave);
2887 master->slaves = NEXT_SLAVE(q);
2888 netif_wake_queue(dev);
2889 master->stats.tx_packets++;
2890 master->stats.tx_bytes += len;
2891 return 0;
2893 - slave->xmit_lock_owner = -1;
2894 - spin_unlock(&slave->xmit_lock);
2895 + netif_tx_unlock(slave);
2897 if (netif_queue_stopped(dev))
2898 busy = 1;