ia64/xen-unstable

view patches/linux-2.6.16.29/net-gso-0-base.patch @ 12273:b223ba940ed1

PV-on-HVM: Fix PV-on-HVM drivers need maddr.h included for defn of maddr_t.
Signed-off-by: Takanori Kasai <Kasai.Takanori@jp.fujitsu.com>
Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
author kfraser@localhost.localdomain
date Tue Nov 07 09:29:53 2006 +0000 (2006-11-07)
parents 041be3f6b38e
children
line source
1 diff -pruN ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt ./Documentation/networking/netdevices.txt
2 --- ../orig-linux-2.6.16.29/Documentation/networking/netdevices.txt 2006-09-12 19:02:10.000000000 +0100
3 +++ ./Documentation/networking/netdevices.txt 2006-09-19 13:59:20.000000000 +0100
4 @@ -42,9 +42,9 @@ dev->get_stats:
5 Context: nominally process, but don't sleep inside an rwlock
7 dev->hard_start_xmit:
8 - Synchronization: dev->xmit_lock spinlock.
9 + Synchronization: netif_tx_lock spinlock.
10 When the driver sets NETIF_F_LLTX in dev->features this will be
11 - called without holding xmit_lock. In this case the driver
12 + called without holding netif_tx_lock. In this case the driver
13 has to lock by itself when needed. It is recommended to use a try lock
14 for this and return -1 when the spin lock fails.
15 The locking there should also properly protect against
16 @@ -62,12 +62,12 @@ dev->hard_start_xmit:
17 Only valid when NETIF_F_LLTX is set.
19 dev->tx_timeout:
20 - Synchronization: dev->xmit_lock spinlock.
21 + Synchronization: netif_tx_lock spinlock.
22 Context: BHs disabled
23 Notes: netif_queue_stopped() is guaranteed true
25 dev->set_multicast_list:
26 - Synchronization: dev->xmit_lock spinlock.
27 + Synchronization: netif_tx_lock spinlock.
28 Context: BHs disabled
30 dev->poll:
31 diff -pruN ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c ./drivers/block/aoe/aoenet.c
32 --- ../orig-linux-2.6.16.29/drivers/block/aoe/aoenet.c 2006-09-12 19:02:10.000000000 +0100
33 +++ ./drivers/block/aoe/aoenet.c 2006-09-19 13:59:20.000000000 +0100
34 @@ -95,9 +95,8 @@ mac_addr(char addr[6])
35 static struct sk_buff *
36 skb_check(struct sk_buff *skb)
37 {
38 - if (skb_is_nonlinear(skb))
39 if ((skb = skb_share_check(skb, GFP_ATOMIC)))
40 - if (skb_linearize(skb, GFP_ATOMIC) < 0) {
41 + if (skb_linearize(skb)) {
42 dev_kfree_skb(skb);
43 return NULL;
44 }
45 diff -pruN ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c
46 --- ../orig-linux-2.6.16.29/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2006-09-12 19:02:10.000000000 +0100
47 +++ ./drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2006-09-19 13:59:20.000000000 +0100
48 @@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_
50 ipoib_mcast_stop_thread(dev, 0);
52 - spin_lock_irqsave(&dev->xmit_lock, flags);
53 + local_irq_save(flags);
54 + netif_tx_lock(dev);
55 spin_lock(&priv->lock);
57 /*
58 @@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_
59 }
61 spin_unlock(&priv->lock);
62 - spin_unlock_irqrestore(&dev->xmit_lock, flags);
63 + netif_tx_unlock(dev);
64 + local_irq_restore(flags);
66 /* We have to cancel outside of the spinlock */
67 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
68 diff -pruN ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c ./drivers/media/dvb/dvb-core/dvb_net.c
69 --- ../orig-linux-2.6.16.29/drivers/media/dvb/dvb-core/dvb_net.c 2006-09-12 19:02:10.000000000 +0100
70 +++ ./drivers/media/dvb/dvb-core/dvb_net.c 2006-09-19 13:59:20.000000000 +0100
71 @@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void
73 dvb_net_feed_stop(dev);
74 priv->rx_mode = RX_MODE_UNI;
75 - spin_lock_bh(&dev->xmit_lock);
76 + netif_tx_lock_bh(dev);
78 if (dev->flags & IFF_PROMISC) {
79 dprintk("%s: promiscuous mode\n", dev->name);
80 @@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void
81 }
82 }
84 - spin_unlock_bh(&dev->xmit_lock);
85 + netif_tx_unlock_bh(dev);
86 dvb_net_feed_start(dev);
87 }
89 diff -pruN ../orig-linux-2.6.16.29/drivers/net/8139cp.c ./drivers/net/8139cp.c
90 --- ../orig-linux-2.6.16.29/drivers/net/8139cp.c 2006-09-12 19:02:10.000000000 +0100
91 +++ ./drivers/net/8139cp.c 2006-09-19 13:59:20.000000000 +0100
92 @@ -794,7 +794,7 @@ static int cp_start_xmit (struct sk_buff
93 entry = cp->tx_head;
94 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
95 if (dev->features & NETIF_F_TSO)
96 - mss = skb_shinfo(skb)->tso_size;
97 + mss = skb_shinfo(skb)->gso_size;
99 if (skb_shinfo(skb)->nr_frags == 0) {
100 struct cp_desc *txd = &cp->tx_ring[entry];
101 diff -pruN ../orig-linux-2.6.16.29/drivers/net/bnx2.c ./drivers/net/bnx2.c
102 --- ../orig-linux-2.6.16.29/drivers/net/bnx2.c 2006-09-12 19:02:10.000000000 +0100
103 +++ ./drivers/net/bnx2.c 2006-09-19 13:59:20.000000000 +0100
104 @@ -1593,7 +1593,7 @@ bnx2_tx_int(struct bnx2 *bp)
105 skb = tx_buf->skb;
106 #ifdef BCM_TSO
107 /* partial BD completions possible with TSO packets */
108 - if (skb_shinfo(skb)->tso_size) {
109 + if (skb_shinfo(skb)->gso_size) {
110 u16 last_idx, last_ring_idx;
112 last_idx = sw_cons +
113 @@ -1948,7 +1948,7 @@ bnx2_poll(struct net_device *dev, int *b
114 return 1;
115 }
117 -/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
118 +/* Called with rtnl_lock from vlan functions and also netif_tx_lock
119 * from set_multicast.
120 */
121 static void
122 @@ -4403,7 +4403,7 @@ bnx2_vlan_rx_kill_vid(struct net_device
123 }
124 #endif
126 -/* Called with dev->xmit_lock.
127 +/* Called with netif_tx_lock.
128 * hard_start_xmit is pseudo-lockless - a lock is only required when
129 * the tx queue is full. This way, we get the benefit of lockless
130 * operations most of the time without the complexities to handle
131 @@ -4441,7 +4441,7 @@ bnx2_start_xmit(struct sk_buff *skb, str
132 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
133 }
134 #ifdef BCM_TSO
135 - if ((mss = skb_shinfo(skb)->tso_size) &&
136 + if ((mss = skb_shinfo(skb)->gso_size) &&
137 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
138 u32 tcp_opt_len, ip_tcp_len;
140 diff -pruN ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c ./drivers/net/bonding/bond_main.c
141 --- ../orig-linux-2.6.16.29/drivers/net/bonding/bond_main.c 2006-09-12 19:02:10.000000000 +0100
142 +++ ./drivers/net/bonding/bond_main.c 2006-09-19 13:59:20.000000000 +0100
143 @@ -1145,8 +1145,7 @@ int bond_sethwaddr(struct net_device *bo
144 }
146 #define BOND_INTERSECT_FEATURES \
147 - (NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM|\
148 - NETIF_F_TSO|NETIF_F_UFO)
149 + (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
151 /*
152 * Compute the common dev->feature set available to all slaves. Some
153 @@ -1164,9 +1163,7 @@ static int bond_compute_features(struct
154 features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
156 if ((features & NETIF_F_SG) &&
157 - !(features & (NETIF_F_IP_CSUM |
158 - NETIF_F_NO_CSUM |
159 - NETIF_F_HW_CSUM)))
160 + !(features & NETIF_F_ALL_CSUM))
161 features &= ~NETIF_F_SG;
163 /*
164 @@ -4147,7 +4144,7 @@ static int bond_init(struct net_device *
165 */
166 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
168 - /* don't acquire bond device's xmit_lock when
169 + /* don't acquire bond device's netif_tx_lock when
170 * transmitting */
171 bond_dev->features |= NETIF_F_LLTX;
173 diff -pruN ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c ./drivers/net/chelsio/sge.c
174 --- ../orig-linux-2.6.16.29/drivers/net/chelsio/sge.c 2006-09-12 19:02:10.000000000 +0100
175 +++ ./drivers/net/chelsio/sge.c 2006-09-19 13:59:20.000000000 +0100
176 @@ -1419,7 +1419,7 @@ int t1_start_xmit(struct sk_buff *skb, s
177 struct cpl_tx_pkt *cpl;
179 #ifdef NETIF_F_TSO
180 - if (skb_shinfo(skb)->tso_size) {
181 + if (skb_shinfo(skb)->gso_size) {
182 int eth_type;
183 struct cpl_tx_pkt_lso *hdr;
185 @@ -1434,7 +1434,7 @@ int t1_start_xmit(struct sk_buff *skb, s
186 hdr->ip_hdr_words = skb->nh.iph->ihl;
187 hdr->tcp_hdr_words = skb->h.th->doff;
188 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
189 - skb_shinfo(skb)->tso_size));
190 + skb_shinfo(skb)->gso_size));
191 hdr->len = htonl(skb->len - sizeof(*hdr));
192 cpl = (struct cpl_tx_pkt *)hdr;
193 sge->stats.tx_lso_pkts++;
194 diff -pruN ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c ./drivers/net/e1000/e1000_main.c
195 --- ../orig-linux-2.6.16.29/drivers/net/e1000/e1000_main.c 2006-09-12 19:02:10.000000000 +0100
196 +++ ./drivers/net/e1000/e1000_main.c 2006-09-19 13:59:20.000000000 +0100
197 @@ -2526,7 +2526,7 @@ e1000_tso(struct e1000_adapter *adapter,
198 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
199 int err;
201 - if (skb_shinfo(skb)->tso_size) {
202 + if (skb_shinfo(skb)->gso_size) {
203 if (skb_header_cloned(skb)) {
204 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
205 if (err)
206 @@ -2534,7 +2534,7 @@ e1000_tso(struct e1000_adapter *adapter,
207 }
209 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
210 - mss = skb_shinfo(skb)->tso_size;
211 + mss = skb_shinfo(skb)->gso_size;
212 if (skb->protocol == ntohs(ETH_P_IP)) {
213 skb->nh.iph->tot_len = 0;
214 skb->nh.iph->check = 0;
215 @@ -2651,7 +2651,7 @@ e1000_tx_map(struct e1000_adapter *adapt
216 * tso gets written back prematurely before the data is fully
217 * DMAd to the controller */
218 if (!skb->data_len && tx_ring->last_tx_tso &&
219 - !skb_shinfo(skb)->tso_size) {
220 + !skb_shinfo(skb)->gso_size) {
221 tx_ring->last_tx_tso = 0;
222 size -= 4;
223 }
224 @@ -2893,7 +2893,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
225 }
227 #ifdef NETIF_F_TSO
228 - mss = skb_shinfo(skb)->tso_size;
229 + mss = skb_shinfo(skb)->gso_size;
230 /* The controller does a simple calculation to
231 * make sure there is enough room in the FIFO before
232 * initiating the DMA for each buffer. The calc is:
233 @@ -2935,7 +2935,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
234 #ifdef NETIF_F_TSO
235 /* Controller Erratum workaround */
236 if (!skb->data_len && tx_ring->last_tx_tso &&
237 - !skb_shinfo(skb)->tso_size)
238 + !skb_shinfo(skb)->gso_size)
239 count++;
240 #endif
242 diff -pruN ../orig-linux-2.6.16.29/drivers/net/forcedeth.c ./drivers/net/forcedeth.c
243 --- ../orig-linux-2.6.16.29/drivers/net/forcedeth.c 2006-09-12 19:02:10.000000000 +0100
244 +++ ./drivers/net/forcedeth.c 2006-09-19 13:59:20.000000000 +0100
245 @@ -482,9 +482,9 @@ typedef union _ring_type {
246 * critical parts:
247 * - rx is (pseudo-) lockless: it relies on the single-threading provided
248 * by the arch code for interrupts.
249 - * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
250 + * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
251 * needs dev->priv->lock :-(
252 - * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
253 + * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
254 */
256 /* in dev: base, irq */
257 @@ -1016,7 +1016,7 @@ static void drain_ring(struct net_device
259 /*
260 * nv_start_xmit: dev->hard_start_xmit function
261 - * Called with dev->xmit_lock held.
262 + * Called with netif_tx_lock held.
263 */
264 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
265 {
266 @@ -1105,8 +1105,8 @@ static int nv_start_xmit(struct sk_buff
267 np->tx_skbuff[nr] = skb;
269 #ifdef NETIF_F_TSO
270 - if (skb_shinfo(skb)->tso_size)
271 - tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
272 + if (skb_shinfo(skb)->gso_size)
273 + tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
274 else
275 #endif
276 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
277 @@ -1203,7 +1203,7 @@ static void nv_tx_done(struct net_device
279 /*
280 * nv_tx_timeout: dev->tx_timeout function
281 - * Called with dev->xmit_lock held.
282 + * Called with netif_tx_lock held.
283 */
284 static void nv_tx_timeout(struct net_device *dev)
285 {
286 @@ -1524,7 +1524,7 @@ static int nv_change_mtu(struct net_devi
287 * Changing the MTU is a rare event, it shouldn't matter.
288 */
289 disable_irq(dev->irq);
290 - spin_lock_bh(&dev->xmit_lock);
291 + netif_tx_lock_bh(dev);
292 spin_lock(&np->lock);
293 /* stop engines */
294 nv_stop_rx(dev);
295 @@ -1559,7 +1559,7 @@ static int nv_change_mtu(struct net_devi
296 nv_start_rx(dev);
297 nv_start_tx(dev);
298 spin_unlock(&np->lock);
299 - spin_unlock_bh(&dev->xmit_lock);
300 + netif_tx_unlock_bh(dev);
301 enable_irq(dev->irq);
302 }
303 return 0;
304 @@ -1594,7 +1594,7 @@ static int nv_set_mac_address(struct net
305 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
307 if (netif_running(dev)) {
308 - spin_lock_bh(&dev->xmit_lock);
309 + netif_tx_lock_bh(dev);
310 spin_lock_irq(&np->lock);
312 /* stop rx engine */
313 @@ -1606,7 +1606,7 @@ static int nv_set_mac_address(struct net
314 /* restart rx engine */
315 nv_start_rx(dev);
316 spin_unlock_irq(&np->lock);
317 - spin_unlock_bh(&dev->xmit_lock);
318 + netif_tx_unlock_bh(dev);
319 } else {
320 nv_copy_mac_to_hw(dev);
321 }
322 @@ -1615,7 +1615,7 @@ static int nv_set_mac_address(struct net
324 /*
325 * nv_set_multicast: dev->set_multicast function
326 - * Called with dev->xmit_lock held.
327 + * Called with netif_tx_lock held.
328 */
329 static void nv_set_multicast(struct net_device *dev)
330 {
331 diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c ./drivers/net/hamradio/6pack.c
332 --- ../orig-linux-2.6.16.29/drivers/net/hamradio/6pack.c 2006-09-12 19:02:10.000000000 +0100
333 +++ ./drivers/net/hamradio/6pack.c 2006-09-19 13:59:20.000000000 +0100
334 @@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net
335 {
336 struct sockaddr_ax25 *sa = addr;
338 - spin_lock_irq(&dev->xmit_lock);
339 + netif_tx_lock_bh(dev);
340 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
341 - spin_unlock_irq(&dev->xmit_lock);
342 + netif_tx_unlock_bh(dev);
344 return 0;
345 }
346 @@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_stru
347 break;
348 }
350 - spin_lock_irq(&dev->xmit_lock);
351 + netif_tx_lock_bh(dev);
352 memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
353 - spin_unlock_irq(&dev->xmit_lock);
354 + netif_tx_unlock_bh(dev);
356 err = 0;
357 break;
358 diff -pruN ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c ./drivers/net/hamradio/mkiss.c
359 --- ../orig-linux-2.6.16.29/drivers/net/hamradio/mkiss.c 2006-09-12 19:02:10.000000000 +0100
360 +++ ./drivers/net/hamradio/mkiss.c 2006-09-19 13:59:20.000000000 +0100
361 @@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net
362 {
363 struct sockaddr_ax25 *sa = addr;
365 - spin_lock_irq(&dev->xmit_lock);
366 + netif_tx_lock_bh(dev);
367 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
368 - spin_unlock_irq(&dev->xmit_lock);
369 + netif_tx_unlock_bh(dev);
371 return 0;
372 }
373 @@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct
374 break;
375 }
377 - spin_lock_irq(&dev->xmit_lock);
378 + netif_tx_lock_bh(dev);
379 memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
380 - spin_unlock_irq(&dev->xmit_lock);
381 + netif_tx_unlock_bh(dev);
383 err = 0;
384 break;
385 diff -pruN ../orig-linux-2.6.16.29/drivers/net/ifb.c ./drivers/net/ifb.c
386 --- ../orig-linux-2.6.16.29/drivers/net/ifb.c 2006-09-12 19:02:10.000000000 +0100
387 +++ ./drivers/net/ifb.c 2006-09-19 13:59:20.000000000 +0100
388 @@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev
389 dp->st_task_enter++;
390 if ((skb = skb_peek(&dp->tq)) == NULL) {
391 dp->st_txq_refl_try++;
392 - if (spin_trylock(&_dev->xmit_lock)) {
393 + if (netif_tx_trylock(_dev)) {
394 dp->st_rxq_enter++;
395 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
396 skb_queue_tail(&dp->tq, skb);
397 dp->st_rx2tx_tran++;
398 }
399 - spin_unlock(&_dev->xmit_lock);
400 + netif_tx_unlock(_dev);
401 } else {
402 /* reschedule */
403 dp->st_rxq_notenter++;
404 @@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev
405 }
406 }
408 - if (spin_trylock(&_dev->xmit_lock)) {
409 + if (netif_tx_trylock(_dev)) {
410 dp->st_rxq_check++;
411 if ((skb = skb_peek(&dp->rq)) == NULL) {
412 dp->tasklet_pending = 0;
413 @@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev
414 netif_wake_queue(_dev);
415 } else {
416 dp->st_rxq_rsch++;
417 - spin_unlock(&_dev->xmit_lock);
418 + netif_tx_unlock(_dev);
419 goto resched;
420 }
421 - spin_unlock(&_dev->xmit_lock);
422 + netif_tx_unlock(_dev);
423 } else {
424 resched:
425 dp->tasklet_pending = 1;
426 diff -pruN ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c ./drivers/net/irda/vlsi_ir.c
427 --- ../orig-linux-2.6.16.29/drivers/net/irda/vlsi_ir.c 2006-09-12 19:02:10.000000000 +0100
428 +++ ./drivers/net/irda/vlsi_ir.c 2006-09-19 13:59:20.000000000 +0100
429 @@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct s
430 || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
431 break;
432 udelay(100);
433 - /* must not sleep here - we are called under xmit_lock! */
434 + /* must not sleep here - called under netif_tx_lock! */
435 }
436 }
438 diff -pruN ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c ./drivers/net/ixgb/ixgb_main.c
439 --- ../orig-linux-2.6.16.29/drivers/net/ixgb/ixgb_main.c 2006-09-12 19:02:10.000000000 +0100
440 +++ ./drivers/net/ixgb/ixgb_main.c 2006-09-19 13:59:20.000000000 +0100
441 @@ -1163,7 +1163,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
442 uint16_t ipcse, tucse, mss;
443 int err;
445 - if(likely(skb_shinfo(skb)->tso_size)) {
446 + if(likely(skb_shinfo(skb)->gso_size)) {
447 if (skb_header_cloned(skb)) {
448 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
449 if (err)
450 @@ -1171,7 +1171,7 @@ ixgb_tso(struct ixgb_adapter *adapter, s
451 }
453 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
454 - mss = skb_shinfo(skb)->tso_size;
455 + mss = skb_shinfo(skb)->gso_size;
456 skb->nh.iph->tot_len = 0;
457 skb->nh.iph->check = 0;
458 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
459 diff -pruN ../orig-linux-2.6.16.29/drivers/net/loopback.c ./drivers/net/loopback.c
460 --- ../orig-linux-2.6.16.29/drivers/net/loopback.c 2006-09-12 19:02:10.000000000 +0100
461 +++ ./drivers/net/loopback.c 2006-09-19 13:59:20.000000000 +0100
462 @@ -74,7 +74,7 @@ static void emulate_large_send_offload(s
463 struct iphdr *iph = skb->nh.iph;
464 struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
465 unsigned int doffset = (iph->ihl + th->doff) * 4;
466 - unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
467 + unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
468 unsigned int offset = 0;
469 u32 seq = ntohl(th->seq);
470 u16 id = ntohs(iph->id);
471 @@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff
472 #endif
474 #ifdef LOOPBACK_TSO
475 - if (skb_shinfo(skb)->tso_size) {
476 + if (skb_shinfo(skb)->gso_size) {
477 BUG_ON(skb->protocol != htons(ETH_P_IP));
478 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
480 diff -pruN ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c ./drivers/net/mv643xx_eth.c
481 --- ../orig-linux-2.6.16.29/drivers/net/mv643xx_eth.c 2006-09-12 19:02:10.000000000 +0100
482 +++ ./drivers/net/mv643xx_eth.c 2006-09-19 13:59:20.000000000 +0100
483 @@ -1107,7 +1107,7 @@ static int mv643xx_eth_start_xmit(struct
485 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
486 if (has_tiny_unaligned_frags(skb)) {
487 - if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
488 + if (__skb_linearize(skb)) {
489 stats->tx_dropped++;
490 printk(KERN_DEBUG "%s: failed to linearize tiny "
491 "unaligned fragment\n", dev->name);
492 diff -pruN ../orig-linux-2.6.16.29/drivers/net/natsemi.c ./drivers/net/natsemi.c
493 --- ../orig-linux-2.6.16.29/drivers/net/natsemi.c 2006-09-12 19:02:10.000000000 +0100
494 +++ ./drivers/net/natsemi.c 2006-09-19 13:59:20.000000000 +0100
495 @@ -323,12 +323,12 @@ performance critical codepaths:
496 The rx process only runs in the interrupt handler. Access from outside
497 the interrupt handler is only permitted after disable_irq().
499 -The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
500 +The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
501 is set, then access is permitted under spin_lock_irq(&np->lock).
503 Thus configuration functions that want to access everything must call
504 disable_irq(dev->irq);
505 - spin_lock_bh(dev->xmit_lock);
506 + netif_tx_lock_bh(dev);
507 spin_lock_irq(&np->lock);
509 IV. Notes
510 diff -pruN ../orig-linux-2.6.16.29/drivers/net/r8169.c ./drivers/net/r8169.c
511 --- ../orig-linux-2.6.16.29/drivers/net/r8169.c 2006-09-12 19:02:10.000000000 +0100
512 +++ ./drivers/net/r8169.c 2006-09-19 13:59:20.000000000 +0100
513 @@ -2171,7 +2171,7 @@ static int rtl8169_xmit_frags(struct rtl
514 static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
515 {
516 if (dev->features & NETIF_F_TSO) {
517 - u32 mss = skb_shinfo(skb)->tso_size;
518 + u32 mss = skb_shinfo(skb)->gso_size;
520 if (mss)
521 return LargeSend | ((mss & MSSMask) << MSSShift);
522 diff -pruN ../orig-linux-2.6.16.29/drivers/net/s2io.c ./drivers/net/s2io.c
523 --- ../orig-linux-2.6.16.29/drivers/net/s2io.c 2006-09-12 19:02:10.000000000 +0100
524 +++ ./drivers/net/s2io.c 2006-09-19 13:59:20.000000000 +0100
525 @@ -3522,8 +3522,8 @@ static int s2io_xmit(struct sk_buff *skb
526 txdp->Control_1 = 0;
527 txdp->Control_2 = 0;
528 #ifdef NETIF_F_TSO
529 - mss = skb_shinfo(skb)->tso_size;
530 - if (mss) {
531 + mss = skb_shinfo(skb)->gso_size;
532 + if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
533 txdp->Control_1 |= TXD_TCP_LSO_EN;
534 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
535 }
536 @@ -3543,10 +3543,10 @@ static int s2io_xmit(struct sk_buff *skb
537 }
539 frg_len = skb->len - skb->data_len;
540 - if (skb_shinfo(skb)->ufo_size) {
541 + if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
542 int ufo_size;
544 - ufo_size = skb_shinfo(skb)->ufo_size;
545 + ufo_size = skb_shinfo(skb)->gso_size;
546 ufo_size &= ~7;
547 txdp->Control_1 |= TXD_UFO_EN;
548 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
549 @@ -3572,7 +3572,7 @@ static int s2io_xmit(struct sk_buff *skb
550 txdp->Host_Control = (unsigned long) skb;
551 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
553 - if (skb_shinfo(skb)->ufo_size)
554 + if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
555 txdp->Control_1 |= TXD_UFO_EN;
557 frg_cnt = skb_shinfo(skb)->nr_frags;
558 @@ -3587,12 +3587,12 @@ static int s2io_xmit(struct sk_buff *skb
559 (sp->pdev, frag->page, frag->page_offset,
560 frag->size, PCI_DMA_TODEVICE);
561 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
562 - if (skb_shinfo(skb)->ufo_size)
563 + if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
564 txdp->Control_1 |= TXD_UFO_EN;
565 }
566 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
568 - if (skb_shinfo(skb)->ufo_size)
569 + if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
570 frg_cnt++; /* as Txd0 was used for inband header */
572 tx_fifo = mac_control->tx_FIFO_start[queue];
573 @@ -3606,7 +3606,7 @@ static int s2io_xmit(struct sk_buff *skb
574 if (mss)
575 val64 |= TX_FIFO_SPECIAL_FUNC;
576 #endif
577 - if (skb_shinfo(skb)->ufo_size)
578 + if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
579 val64 |= TX_FIFO_SPECIAL_FUNC;
580 writeq(val64, &tx_fifo->List_Control);
582 diff -pruN ../orig-linux-2.6.16.29/drivers/net/sky2.c ./drivers/net/sky2.c
583 --- ../orig-linux-2.6.16.29/drivers/net/sky2.c 2006-09-12 19:02:10.000000000 +0100
584 +++ ./drivers/net/sky2.c 2006-09-19 13:59:20.000000000 +0100
585 @@ -1125,7 +1125,7 @@ static unsigned tx_le_req(const struct s
586 count = sizeof(dma_addr_t) / sizeof(u32);
587 count += skb_shinfo(skb)->nr_frags * count;
589 - if (skb_shinfo(skb)->tso_size)
590 + if (skb_shinfo(skb)->gso_size)
591 ++count;
593 if (skb->ip_summed == CHECKSUM_HW)
594 @@ -1197,7 +1197,7 @@ static int sky2_xmit_frame(struct sk_buf
595 }
597 /* Check for TCP Segmentation Offload */
598 - mss = skb_shinfo(skb)->tso_size;
599 + mss = skb_shinfo(skb)->gso_size;
600 if (mss != 0) {
601 /* just drop the packet if non-linear expansion fails */
602 if (skb_header_cloned(skb) &&
603 diff -pruN ../orig-linux-2.6.16.29/drivers/net/tg3.c ./drivers/net/tg3.c
604 --- ../orig-linux-2.6.16.29/drivers/net/tg3.c 2006-09-12 19:02:10.000000000 +0100
605 +++ ./drivers/net/tg3.c 2006-09-19 13:59:20.000000000 +0100
606 @@ -3664,7 +3664,7 @@ static int tg3_start_xmit(struct sk_buff
607 #if TG3_TSO_SUPPORT != 0
608 mss = 0;
609 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
610 - (mss = skb_shinfo(skb)->tso_size) != 0) {
611 + (mss = skb_shinfo(skb)->gso_size) != 0) {
612 int tcp_opt_len, ip_tcp_len;
614 if (skb_header_cloned(skb) &&
615 diff -pruN ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c ./drivers/net/tulip/winbond-840.c
616 --- ../orig-linux-2.6.16.29/drivers/net/tulip/winbond-840.c 2006-09-12 19:02:10.000000000 +0100
617 +++ ./drivers/net/tulip/winbond-840.c 2006-09-19 13:59:20.000000000 +0100
618 @@ -1605,11 +1605,11 @@ static void __devexit w840_remove1 (stru
619 * - get_stats:
620 * spin_lock_irq(np->lock), doesn't touch hw if not present
621 * - hard_start_xmit:
622 - * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
623 + * synchronize_irq + netif_tx_disable;
624 * - tx_timeout:
625 - * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
626 + * netif_device_detach + netif_tx_disable;
627 * - set_multicast_list
628 - * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
629 + * netif_device_detach + netif_tx_disable;
630 * - interrupt handler
631 * doesn't touch hw if not present, synchronize_irq waits for
632 * running instances of the interrupt handler.
633 @@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev
634 netif_device_detach(dev);
635 update_csr6(dev, 0);
636 iowrite32(0, ioaddr + IntrEnable);
637 - netif_stop_queue(dev);
638 spin_unlock_irq(&np->lock);
640 - spin_unlock_wait(&dev->xmit_lock);
641 synchronize_irq(dev->irq);
642 + netif_tx_disable(dev);
644 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
646 diff -pruN ../orig-linux-2.6.16.29/drivers/net/typhoon.c ./drivers/net/typhoon.c
647 --- ../orig-linux-2.6.16.29/drivers/net/typhoon.c 2006-09-12 19:02:10.000000000 +0100
648 +++ ./drivers/net/typhoon.c 2006-09-19 13:59:20.000000000 +0100
649 @@ -340,7 +340,7 @@ enum state_values {
650 #endif
652 #if defined(NETIF_F_TSO)
653 -#define skb_tso_size(x) (skb_shinfo(x)->tso_size)
654 +#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
655 #define TSO_NUM_DESCRIPTORS 2
656 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
657 #else
658 diff -pruN ../orig-linux-2.6.16.29/drivers/net/via-velocity.c ./drivers/net/via-velocity.c
659 --- ../orig-linux-2.6.16.29/drivers/net/via-velocity.c 2006-09-12 19:02:10.000000000 +0100
660 +++ ./drivers/net/via-velocity.c 2006-09-19 13:59:20.000000000 +0100
661 @@ -1899,6 +1899,13 @@ static int velocity_xmit(struct sk_buff
663 int pktlen = skb->len;
665 +#ifdef VELOCITY_ZERO_COPY_SUPPORT
666 + if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
667 + kfree_skb(skb);
668 + return 0;
669 + }
670 +#endif
671 +
672 spin_lock_irqsave(&vptr->lock, flags);
674 index = vptr->td_curr[qnum];
675 @@ -1914,8 +1921,6 @@ static int velocity_xmit(struct sk_buff
676 */
677 if (pktlen < ETH_ZLEN) {
678 /* Cannot occur until ZC support */
679 - if(skb_linearize(skb, GFP_ATOMIC))
680 - return 0;
681 pktlen = ETH_ZLEN;
682 memcpy(tdinfo->buf, skb->data, skb->len);
683 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
684 @@ -1933,7 +1938,6 @@ static int velocity_xmit(struct sk_buff
685 int nfrags = skb_shinfo(skb)->nr_frags;
686 tdinfo->skb = skb;
687 if (nfrags > 6) {
688 - skb_linearize(skb, GFP_ATOMIC);
689 memcpy(tdinfo->buf, skb->data, skb->len);
690 tdinfo->skb_dma[0] = tdinfo->buf_dma;
691 td_ptr->tdesc0.pktsize =
692 diff -pruN ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c ./drivers/net/wireless/orinoco.c
693 --- ../orig-linux-2.6.16.29/drivers/net/wireless/orinoco.c 2006-09-12 19:02:10.000000000 +0100
694 +++ ./drivers/net/wireless/orinoco.c 2006-09-19 13:59:20.000000000 +0100
695 @@ -1835,7 +1835,9 @@ static int __orinoco_program_rids(struct
696 /* Set promiscuity / multicast*/
697 priv->promiscuous = 0;
698 priv->mc_count = 0;
699 - __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
700 +
701 + /* FIXME: what about netif_tx_lock */
702 + __orinoco_set_multicast_list(dev);
704 return 0;
705 }
706 diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c ./drivers/s390/net/qeth_eddp.c
707 --- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_eddp.c 2006-09-12 19:02:10.000000000 +0100
708 +++ ./drivers/s390/net/qeth_eddp.c 2006-09-19 13:59:20.000000000 +0100
709 @@ -421,7 +421,7 @@ __qeth_eddp_fill_context_tcp(struct qeth
710 }
711 tcph = eddp->skb->h.th;
712 while (eddp->skb_offset < eddp->skb->len) {
713 - data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
714 + data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
715 (int)(eddp->skb->len - eddp->skb_offset));
716 /* prepare qdio hdr */
717 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
718 @@ -516,20 +516,20 @@ qeth_eddp_calc_num_pages(struct qeth_edd
720 QETH_DBF_TEXT(trace, 5, "eddpcanp");
721 /* can we put multiple skbs in one page? */
722 - skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
723 + skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
724 if (skbs_per_page > 1){
725 - ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
726 + ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
727 skbs_per_page + 1;
728 ctx->elements_per_skb = 1;
729 } else {
730 /* no -> how many elements per skb? */
731 - ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
732 + ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
733 PAGE_SIZE) >> PAGE_SHIFT;
734 ctx->num_pages = ctx->elements_per_skb *
735 - (skb_shinfo(skb)->tso_segs + 1);
736 + (skb_shinfo(skb)->gso_segs + 1);
737 }
738 ctx->num_elements = ctx->elements_per_skb *
739 - (skb_shinfo(skb)->tso_segs + 1);
740 + (skb_shinfo(skb)->gso_segs + 1);
741 }
743 static inline struct qeth_eddp_context *
744 diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c ./drivers/s390/net/qeth_main.c
745 --- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_main.c 2006-09-12 19:02:10.000000000 +0100
746 +++ ./drivers/s390/net/qeth_main.c 2006-09-19 13:59:20.000000000 +0100
747 @@ -4454,7 +4454,7 @@ qeth_send_packet(struct qeth_card *card,
748 queue = card->qdio.out_qs
749 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
751 - if (skb_shinfo(skb)->tso_size)
752 + if (skb_shinfo(skb)->gso_size)
753 large_send = card->options.large_send;
755 /*are we able to do TSO ? If so ,prepare and send it from here */
756 @@ -4501,7 +4501,7 @@ qeth_send_packet(struct qeth_card *card,
757 card->stats.tx_packets++;
758 card->stats.tx_bytes += skb->len;
759 #ifdef CONFIG_QETH_PERF_STATS
760 - if (skb_shinfo(skb)->tso_size &&
761 + if (skb_shinfo(skb)->gso_size &&
762 !(large_send == QETH_LARGE_SEND_NO)) {
763 card->perf_stats.large_send_bytes += skb->len;
764 card->perf_stats.large_send_cnt++;
765 diff -pruN ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h ./drivers/s390/net/qeth_tso.h
766 --- ../orig-linux-2.6.16.29/drivers/s390/net/qeth_tso.h 2006-09-12 19:02:10.000000000 +0100
767 +++ ./drivers/s390/net/qeth_tso.h 2006-09-19 13:59:20.000000000 +0100
768 @@ -51,7 +51,7 @@ qeth_tso_fill_header(struct qeth_card *c
769 hdr->ext.hdr_version = 1;
770 hdr->ext.hdr_len = 28;
771 /*insert non-fix values */
772 - hdr->ext.mss = skb_shinfo(skb)->tso_size;
773 + hdr->ext.mss = skb_shinfo(skb)->gso_size;
774 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
775 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
776 sizeof(struct qeth_hdr_tso));
777 diff -pruN ../orig-linux-2.6.16.29/include/linux/ethtool.h ./include/linux/ethtool.h
778 --- ../orig-linux-2.6.16.29/include/linux/ethtool.h 2006-09-12 19:02:10.000000000 +0100
779 +++ ./include/linux/ethtool.h 2006-09-19 13:59:20.000000000 +0100
780 @@ -408,6 +408,8 @@ struct ethtool_ops {
781 #define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */
782 #define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */
783 #define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */
784 +#define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */
785 +#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */
787 /* compatibility with older code */
788 #define SPARC_ETH_GSET ETHTOOL_GSET
789 diff -pruN ../orig-linux-2.6.16.29/include/linux/netdevice.h ./include/linux/netdevice.h
790 --- ../orig-linux-2.6.16.29/include/linux/netdevice.h 2006-09-12 19:02:10.000000000 +0100
791 +++ ./include/linux/netdevice.h 2006-09-19 13:59:20.000000000 +0100
792 @@ -230,7 +230,8 @@ enum netdev_state_t
793 __LINK_STATE_SCHED,
794 __LINK_STATE_NOCARRIER,
795 __LINK_STATE_RX_SCHED,
796 - __LINK_STATE_LINKWATCH_PENDING
797 + __LINK_STATE_LINKWATCH_PENDING,
798 + __LINK_STATE_QDISC_RUNNING,
799 };
802 @@ -306,9 +307,17 @@ struct net_device
803 #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
804 #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
805 #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
806 -#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */
807 +#define NETIF_F_GSO 2048 /* Enable software GSO. */
808 #define NETIF_F_LLTX 4096 /* LockLess TX */
809 -#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/
810 +
811 + /* Segmentation offload features */
812 +#define NETIF_F_GSO_SHIFT 16
813 +#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
814 +#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
815 +#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
816 +
817 +#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
818 +#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
820 struct net_device *next_sched;
822 @@ -394,6 +403,9 @@ struct net_device
823 struct list_head qdisc_list;
824 unsigned long tx_queue_len; /* Max frames per queue allowed */
826 + /* Partially transmitted GSO packet. */
827 + struct sk_buff *gso_skb;
828 +
829 /* ingress path synchronizer */
830 spinlock_t ingress_lock;
831 struct Qdisc *qdisc_ingress;
832 @@ -402,7 +414,7 @@ struct net_device
833 * One part is mostly used on xmit path (device)
834 */
835 /* hard_start_xmit synchronizer */
836 - spinlock_t xmit_lock ____cacheline_aligned_in_smp;
837 + spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
838 /* cpu id of processor entered to hard_start_xmit or -1,
839 if nobody entered there.
840 */
841 @@ -527,6 +539,8 @@ struct packet_type {
842 struct net_device *,
843 struct packet_type *,
844 struct net_device *);
845 + struct sk_buff *(*gso_segment)(struct sk_buff *skb,
846 + int features);
847 void *af_packet_priv;
848 struct list_head list;
849 };
850 @@ -693,7 +707,8 @@ extern int dev_change_name(struct net_d
851 extern int dev_set_mtu(struct net_device *, int);
852 extern int dev_set_mac_address(struct net_device *,
853 struct sockaddr *);
854 -extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
855 +extern int dev_hard_start_xmit(struct sk_buff *skb,
856 + struct net_device *dev);
858 extern void dev_init(void);
860 @@ -900,11 +915,43 @@ static inline void __netif_rx_complete(s
861 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
862 }
864 +static inline void netif_tx_lock(struct net_device *dev)
865 +{
866 + spin_lock(&dev->_xmit_lock);
867 + dev->xmit_lock_owner = smp_processor_id();
868 +}
869 +
870 +static inline void netif_tx_lock_bh(struct net_device *dev)
871 +{
872 + spin_lock_bh(&dev->_xmit_lock);
873 + dev->xmit_lock_owner = smp_processor_id();
874 +}
875 +
876 +static inline int netif_tx_trylock(struct net_device *dev)
877 +{
878 + int err = spin_trylock(&dev->_xmit_lock);
879 + if (!err)
880 + dev->xmit_lock_owner = smp_processor_id();
881 + return err;
882 +}
883 +
884 +static inline void netif_tx_unlock(struct net_device *dev)
885 +{
886 + dev->xmit_lock_owner = -1;
887 + spin_unlock(&dev->_xmit_lock);
888 +}
889 +
890 +static inline void netif_tx_unlock_bh(struct net_device *dev)
891 +{
892 + dev->xmit_lock_owner = -1;
893 + spin_unlock_bh(&dev->_xmit_lock);
894 +}
895 +
896 static inline void netif_tx_disable(struct net_device *dev)
897 {
898 - spin_lock_bh(&dev->xmit_lock);
899 + netif_tx_lock_bh(dev);
900 netif_stop_queue(dev);
901 - spin_unlock_bh(&dev->xmit_lock);
902 + netif_tx_unlock_bh(dev);
903 }
905 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
906 @@ -932,6 +979,7 @@ extern int netdev_max_backlog;
907 extern int weight_p;
908 extern int netdev_set_master(struct net_device *dev, struct net_device *master);
909 extern int skb_checksum_help(struct sk_buff *skb, int inward);
910 +extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
911 #ifdef CONFIG_BUG
912 extern void netdev_rx_csum_fault(struct net_device *dev);
913 #else
914 @@ -951,6 +999,18 @@ extern void dev_seq_stop(struct seq_file
916 extern void linkwatch_run_queue(void);
918 +static inline int skb_gso_ok(struct sk_buff *skb, int features)
919 +{
920 + int feature = skb_shinfo(skb)->gso_size ?
921 + skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
922 + return (features & feature) == feature;
923 +}
924 +
925 +static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
926 +{
927 + return !skb_gso_ok(skb, dev->features);
928 +}
929 +
930 #endif /* __KERNEL__ */
932 #endif /* _LINUX_DEV_H */
933 diff -pruN ../orig-linux-2.6.16.29/include/linux/skbuff.h ./include/linux/skbuff.h
934 --- ../orig-linux-2.6.16.29/include/linux/skbuff.h 2006-09-12 19:02:10.000000000 +0100
935 +++ ./include/linux/skbuff.h 2006-09-19 13:59:20.000000000 +0100
936 @@ -134,9 +134,10 @@ struct skb_frag_struct {
937 struct skb_shared_info {
938 atomic_t dataref;
939 unsigned short nr_frags;
940 - unsigned short tso_size;
941 - unsigned short tso_segs;
942 - unsigned short ufo_size;
943 + unsigned short gso_size;
944 + /* Warning: this field is not always filled in (UFO)! */
945 + unsigned short gso_segs;
946 + unsigned short gso_type;
947 unsigned int ip6_frag_id;
948 struct sk_buff *frag_list;
949 skb_frag_t frags[MAX_SKB_FRAGS];
950 @@ -168,6 +169,14 @@ enum {
951 SKB_FCLONE_CLONE,
952 };
954 +enum {
955 + SKB_GSO_TCPV4 = 1 << 0,
956 + SKB_GSO_UDPV4 = 1 << 1,
957 +
958 + /* This indicates the skb is from an untrusted source. */
959 + SKB_GSO_DODGY = 1 << 2,
960 +};
961 +
962 /**
963 * struct sk_buff - socket buffer
964 * @next: Next buffer in list
965 @@ -1148,18 +1157,34 @@ static inline int skb_can_coalesce(struc
966 return 0;
967 }
969 +static inline int __skb_linearize(struct sk_buff *skb)
970 +{
971 + return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
972 +}
973 +
974 /**
975 * skb_linearize - convert paged skb to linear one
976 * @skb: buffer to linarize
977 - * @gfp: allocation mode
978 *
979 * If there is no free memory -ENOMEM is returned, otherwise zero
980 * is returned and the old skb data released.
981 */
982 -extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
983 -static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
984 +static inline int skb_linearize(struct sk_buff *skb)
985 +{
986 + return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
987 +}
988 +
989 +/**
990 + * skb_linearize_cow - make sure skb is linear and writable
991 + * @skb: buffer to process
992 + *
993 + * If there is no free memory -ENOMEM is returned, otherwise zero
994 + * is returned and the old skb data released.
995 + */
996 +static inline int skb_linearize_cow(struct sk_buff *skb)
997 {
998 - return __skb_linearize(skb, gfp);
999 + return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1000 + __skb_linearize(skb) : 0;
1003 /**
1004 @@ -1254,6 +1279,7 @@ extern void skb_split(struct sk_b
1005 struct sk_buff *skb1, const u32 len);
1007 extern void skb_release_data(struct sk_buff *skb);
1008 +extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1010 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1011 int len, void *buffer)
1012 diff -pruN ../orig-linux-2.6.16.29/include/net/pkt_sched.h ./include/net/pkt_sched.h
1013 --- ../orig-linux-2.6.16.29/include/net/pkt_sched.h 2006-09-12 19:02:10.000000000 +0100
1014 +++ ./include/net/pkt_sched.h 2006-09-19 13:59:20.000000000 +0100
1015 @@ -218,12 +218,13 @@ extern struct qdisc_rate_table *qdisc_ge
1016 struct rtattr *tab);
1017 extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
1019 -extern int qdisc_restart(struct net_device *dev);
1020 +extern void __qdisc_run(struct net_device *dev);
1022 static inline void qdisc_run(struct net_device *dev)
1024 - while (!netif_queue_stopped(dev) && qdisc_restart(dev) < 0)
1025 - /* NOTHING */;
1026 + if (!netif_queue_stopped(dev) &&
1027 + !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
1028 + __qdisc_run(dev);
1031 extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1032 diff -pruN ../orig-linux-2.6.16.29/include/net/protocol.h ./include/net/protocol.h
1033 --- ../orig-linux-2.6.16.29/include/net/protocol.h 2006-09-12 19:02:10.000000000 +0100
1034 +++ ./include/net/protocol.h 2006-09-19 13:59:20.000000000 +0100
1035 @@ -37,6 +37,8 @@
1036 struct net_protocol {
1037 int (*handler)(struct sk_buff *skb);
1038 void (*err_handler)(struct sk_buff *skb, u32 info);
1039 + struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1040 + int features);
1041 int no_policy;
1042 };
1044 diff -pruN ../orig-linux-2.6.16.29/include/net/sock.h ./include/net/sock.h
1045 --- ../orig-linux-2.6.16.29/include/net/sock.h 2006-09-12 19:02:10.000000000 +0100
1046 +++ ./include/net/sock.h 2006-09-19 13:59:20.000000000 +0100
1047 @@ -1064,9 +1064,13 @@ static inline void sk_setup_caps(struct
1049 __sk_dst_set(sk, dst);
1050 sk->sk_route_caps = dst->dev->features;
1051 + if (sk->sk_route_caps & NETIF_F_GSO)
1052 + sk->sk_route_caps |= NETIF_F_TSO;
1053 if (sk->sk_route_caps & NETIF_F_TSO) {
1054 if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
1055 sk->sk_route_caps &= ~NETIF_F_TSO;
1056 + else
1057 + sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1061 diff -pruN ../orig-linux-2.6.16.29/include/net/tcp.h ./include/net/tcp.h
1062 --- ../orig-linux-2.6.16.29/include/net/tcp.h 2006-09-12 19:02:10.000000000 +0100
1063 +++ ./include/net/tcp.h 2006-09-19 13:59:20.000000000 +0100
1064 @@ -552,13 +552,13 @@ struct tcp_skb_cb {
1065 */
1066 static inline int tcp_skb_pcount(const struct sk_buff *skb)
1068 - return skb_shinfo(skb)->tso_segs;
1069 + return skb_shinfo(skb)->gso_segs;
1072 /* This is valid iff tcp_skb_pcount() > 1. */
1073 static inline int tcp_skb_mss(const struct sk_buff *skb)
1075 - return skb_shinfo(skb)->tso_size;
1076 + return skb_shinfo(skb)->gso_size;
1079 static inline void tcp_dec_pcount_approx(__u32 *count,
1080 @@ -1063,6 +1063,8 @@ extern struct request_sock_ops tcp_reque
1082 extern int tcp_v4_destroy_sock(struct sock *sk);
1084 +extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
1086 #ifdef CONFIG_PROC_FS
1087 extern int tcp4_proc_init(void);
1088 extern void tcp4_proc_exit(void);
1089 diff -pruN ../orig-linux-2.6.16.29/net/atm/clip.c ./net/atm/clip.c
1090 --- ../orig-linux-2.6.16.29/net/atm/clip.c 2006-09-12 19:02:10.000000000 +0100
1091 +++ ./net/atm/clip.c 2006-09-19 13:59:20.000000000 +0100
1092 @@ -101,7 +101,7 @@ static void unlink_clip_vcc(struct clip_
1093 printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc);
1094 return;
1096 - spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */
1097 + netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
1098 entry->neigh->used = jiffies;
1099 for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
1100 if (*walk == clip_vcc) {
1101 @@ -125,7 +125,7 @@ static void unlink_clip_vcc(struct clip_
1102 printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
1103 "0x%p)\n",entry,clip_vcc);
1104 out:
1105 - spin_unlock_bh(&entry->neigh->dev->xmit_lock);
1106 + netif_tx_unlock_bh(entry->neigh->dev);
1109 /* The neighbour entry n->lock is held. */
1110 diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_device.c ./net/bridge/br_device.c
1111 --- ../orig-linux-2.6.16.29/net/bridge/br_device.c 2006-09-12 19:02:10.000000000 +0100
1112 +++ ./net/bridge/br_device.c 2006-09-19 13:59:20.000000000 +0100
1113 @@ -146,9 +146,9 @@ static int br_set_tx_csum(struct net_dev
1114 struct net_bridge *br = netdev_priv(dev);
1116 if (data)
1117 - br->feature_mask |= NETIF_F_IP_CSUM;
1118 + br->feature_mask |= NETIF_F_NO_CSUM;
1119 else
1120 - br->feature_mask &= ~NETIF_F_IP_CSUM;
1121 + br->feature_mask &= ~NETIF_F_ALL_CSUM;
1123 br_features_recompute(br);
1124 return 0;
1125 @@ -185,6 +185,6 @@ void br_dev_setup(struct net_device *dev
1126 dev->set_mac_address = br_set_mac_address;
1127 dev->priv_flags = IFF_EBRIDGE;
1129 - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
1130 - | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_IP_CSUM;
1131 + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
1132 + NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
1134 diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_forward.c ./net/bridge/br_forward.c
1135 --- ../orig-linux-2.6.16.29/net/bridge/br_forward.c 2006-09-12 19:02:10.000000000 +0100
1136 +++ ./net/bridge/br_forward.c 2006-09-19 13:59:20.000000000 +0100
1137 @@ -32,7 +32,7 @@ static inline int should_deliver(const s
1138 int br_dev_queue_push_xmit(struct sk_buff *skb)
1140 /* drop mtu oversized packets except tso */
1141 - if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
1142 + if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
1143 kfree_skb(skb);
1144 else {
1145 #ifdef CONFIG_BRIDGE_NETFILTER
1146 diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_if.c ./net/bridge/br_if.c
1147 --- ../orig-linux-2.6.16.29/net/bridge/br_if.c 2006-09-12 19:02:10.000000000 +0100
1148 +++ ./net/bridge/br_if.c 2006-09-19 13:59:20.000000000 +0100
1149 @@ -385,17 +385,28 @@ void br_features_recompute(struct net_br
1150 struct net_bridge_port *p;
1151 unsigned long features, checksum;
1153 - features = br->feature_mask &~ NETIF_F_IP_CSUM;
1154 - checksum = br->feature_mask & NETIF_F_IP_CSUM;
1155 + checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
1156 + features = br->feature_mask & ~NETIF_F_ALL_CSUM;
1158 list_for_each_entry(p, &br->port_list, list) {
1159 - if (!(p->dev->features
1160 - & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)))
1161 + unsigned long feature = p->dev->features;
1163 + if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
1164 + checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
1165 + if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
1166 + checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
1167 + if (!(feature & NETIF_F_IP_CSUM))
1168 checksum = 0;
1169 - features &= p->dev->features;
1171 + if (feature & NETIF_F_GSO)
1172 + feature |= NETIF_F_TSO;
1173 + feature |= NETIF_F_GSO;
1175 + features &= feature;
1178 - br->dev->features = features | checksum | NETIF_F_LLTX;
1179 + br->dev->features = features | checksum | NETIF_F_LLTX |
1180 + NETIF_F_GSO_ROBUST;
1183 /* called with RTNL */
1184 diff -pruN ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c ./net/bridge/br_netfilter.c
1185 --- ../orig-linux-2.6.16.29/net/bridge/br_netfilter.c 2006-09-12 19:02:10.000000000 +0100
1186 +++ ./net/bridge/br_netfilter.c 2006-09-19 13:59:20.000000000 +0100
1187 @@ -743,7 +743,7 @@ static int br_nf_dev_queue_xmit(struct s
1189 if (skb->protocol == htons(ETH_P_IP) &&
1190 skb->len > skb->dev->mtu &&
1191 - !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
1192 + !skb_shinfo(skb)->gso_size)
1193 return ip_fragment(skb, br_dev_queue_push_xmit);
1194 else
1195 return br_dev_queue_push_xmit(skb);
1196 diff -pruN ../orig-linux-2.6.16.29/net/core/dev.c ./net/core/dev.c
1197 --- ../orig-linux-2.6.16.29/net/core/dev.c 2006-09-12 19:02:10.000000000 +0100
1198 +++ ./net/core/dev.c 2006-09-19 13:59:20.000000000 +0100
1199 @@ -115,6 +115,7 @@
1200 #include <net/iw_handler.h>
1201 #endif /* CONFIG_NET_RADIO */
1202 #include <asm/current.h>
1203 +#include <linux/err.h>
1205 /*
1206 * The list of packet types we will receive (as opposed to discard)
1207 @@ -1032,7 +1033,7 @@ static inline void net_timestamp(struct
1208 * taps currently in use.
1209 */
1211 -void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1212 +static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1214 struct packet_type *ptype;
1216 @@ -1106,6 +1107,45 @@ out:
1217 return ret;
1220 +/**
1221 + * skb_gso_segment - Perform segmentation on skb.
1222 + * @skb: buffer to segment
1223 + * @features: features for the output path (see dev->features)
1224 + *
1225 + * This function segments the given skb and returns a list of segments.
1226 + *
1227 + * It may return NULL if the skb requires no segmentation. This is
1228 + * only possible when GSO is used for verifying header integrity.
1229 + */
1230 +struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1231 +{
1232 + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1233 + struct packet_type *ptype;
1234 + int type = skb->protocol;
1236 + BUG_ON(skb_shinfo(skb)->frag_list);
1237 + BUG_ON(skb->ip_summed != CHECKSUM_HW);
1239 + skb->mac.raw = skb->data;
1240 + skb->mac_len = skb->nh.raw - skb->data;
1241 + __skb_pull(skb, skb->mac_len);
1243 + rcu_read_lock();
1244 + list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1245 + if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1246 + segs = ptype->gso_segment(skb, features);
1247 + break;
1248 + }
1249 + }
1250 + rcu_read_unlock();
1252 + __skb_push(skb, skb->data - skb->mac.raw);
1254 + return segs;
1255 +}
1257 +EXPORT_SYMBOL(skb_gso_segment);
1259 /* Take action when hardware reception checksum errors are detected. */
1260 #ifdef CONFIG_BUG
1261 void netdev_rx_csum_fault(struct net_device *dev)
1262 @@ -1142,75 +1182,108 @@ static inline int illegal_highdma(struct
1263 #define illegal_highdma(dev, skb) (0)
1264 #endif
1266 -/* Keep head the same: replace data */
1267 -int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
1268 +struct dev_gso_cb {
1269 + void (*destructor)(struct sk_buff *skb);
1270 +};
1272 +#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1274 +static void dev_gso_skb_destructor(struct sk_buff *skb)
1275 +{
1276 + struct dev_gso_cb *cb;
1278 + do {
1279 + struct sk_buff *nskb = skb->next;
1281 + skb->next = nskb->next;
1282 + nskb->next = NULL;
1283 + kfree_skb(nskb);
1284 + } while (skb->next);
1286 + cb = DEV_GSO_CB(skb);
1287 + if (cb->destructor)
1288 + cb->destructor(skb);
1289 +}
1291 +/**
1292 + * dev_gso_segment - Perform emulated hardware segmentation on skb.
1293 + * @skb: buffer to segment
1294 + *
1295 + * This function segments the given skb and stores the list of segments
1296 + * in skb->next.
1297 + */
1298 +static int dev_gso_segment(struct sk_buff *skb)
1300 - unsigned int size;
1301 - u8 *data;
1302 - long offset;
1303 - struct skb_shared_info *ninfo;
1304 - int headerlen = skb->data - skb->head;
1305 - int expand = (skb->tail + skb->data_len) - skb->end;
1307 - if (skb_shared(skb))
1308 - BUG();
1310 - if (expand <= 0)
1311 - expand = 0;
1313 - size = skb->end - skb->head + expand;
1314 - size = SKB_DATA_ALIGN(size);
1315 - data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
1316 - if (!data)
1317 - return -ENOMEM;
1319 - /* Copy entire thing */
1320 - if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
1321 - BUG();
1323 - /* Set up shinfo */
1324 - ninfo = (struct skb_shared_info*)(data + size);
1325 - atomic_set(&ninfo->dataref, 1);
1326 - ninfo->tso_size = skb_shinfo(skb)->tso_size;
1327 - ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
1328 - ninfo->nr_frags = 0;
1329 - ninfo->frag_list = NULL;
1331 - /* Offset between the two in bytes */
1332 - offset = data - skb->head;
1334 - /* Free old data. */
1335 - skb_release_data(skb);
1337 - skb->head = data;
1338 - skb->end = data + size;
1340 - /* Set up new pointers */
1341 - skb->h.raw += offset;
1342 - skb->nh.raw += offset;
1343 - skb->mac.raw += offset;
1344 - skb->tail += offset;
1345 - skb->data += offset;
1346 + struct net_device *dev = skb->dev;
1347 + struct sk_buff *segs;
1348 + int features = dev->features & ~(illegal_highdma(dev, skb) ?
1349 + NETIF_F_SG : 0);
1351 + segs = skb_gso_segment(skb, features);
1353 + /* Verifying header integrity only. */
1354 + if (!segs)
1355 + return 0;
1357 - /* We are no longer a clone, even if we were. */
1358 - skb->cloned = 0;
1359 + if (unlikely(IS_ERR(segs)))
1360 + return PTR_ERR(segs);
1362 + skb->next = segs;
1363 + DEV_GSO_CB(skb)->destructor = skb->destructor;
1364 + skb->destructor = dev_gso_skb_destructor;
1366 - skb->tail += skb->data_len;
1367 - skb->data_len = 0;
1368 + return 0;
1369 +}
1371 +int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1372 +{
1373 + if (likely(!skb->next)) {
1374 + if (netdev_nit)
1375 + dev_queue_xmit_nit(skb, dev);
1377 + if (netif_needs_gso(dev, skb)) {
1378 + if (unlikely(dev_gso_segment(skb)))
1379 + goto out_kfree_skb;
1380 + if (skb->next)
1381 + goto gso;
1382 + }
1384 + return dev->hard_start_xmit(skb, dev);
1385 + }
1387 +gso:
1388 + do {
1389 + struct sk_buff *nskb = skb->next;
1390 + int rc;
1392 + skb->next = nskb->next;
1393 + nskb->next = NULL;
1394 + rc = dev->hard_start_xmit(nskb, dev);
1395 + if (unlikely(rc)) {
1396 + nskb->next = skb->next;
1397 + skb->next = nskb;
1398 + return rc;
1399 + }
1400 + if (unlikely(netif_queue_stopped(dev) && skb->next))
1401 + return NETDEV_TX_BUSY;
1402 + } while (skb->next);
1404 + skb->destructor = DEV_GSO_CB(skb)->destructor;
1406 +out_kfree_skb:
1407 + kfree_skb(skb);
1408 return 0;
1411 #define HARD_TX_LOCK(dev, cpu) { \
1412 if ((dev->features & NETIF_F_LLTX) == 0) { \
1413 - spin_lock(&dev->xmit_lock); \
1414 - dev->xmit_lock_owner = cpu; \
1415 + netif_tx_lock(dev); \
1416 } \
1419 #define HARD_TX_UNLOCK(dev) { \
1420 if ((dev->features & NETIF_F_LLTX) == 0) { \
1421 - dev->xmit_lock_owner = -1; \
1422 - spin_unlock(&dev->xmit_lock); \
1423 + netif_tx_unlock(dev); \
1424 } \
1427 @@ -1246,9 +1319,13 @@ int dev_queue_xmit(struct sk_buff *skb)
1428 struct Qdisc *q;
1429 int rc = -ENOMEM;
1431 + /* GSO will handle the following emulations directly. */
1432 + if (netif_needs_gso(dev, skb))
1433 + goto gso;
1435 if (skb_shinfo(skb)->frag_list &&
1436 !(dev->features & NETIF_F_FRAGLIST) &&
1437 - __skb_linearize(skb, GFP_ATOMIC))
1438 + __skb_linearize(skb))
1439 goto out_kfree_skb;
1441 /* Fragmented skb is linearized if device does not support SG,
1442 @@ -1257,25 +1334,26 @@ int dev_queue_xmit(struct sk_buff *skb)
1443 */
1444 if (skb_shinfo(skb)->nr_frags &&
1445 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1446 - __skb_linearize(skb, GFP_ATOMIC))
1447 + __skb_linearize(skb))
1448 goto out_kfree_skb;
1450 /* If packet is not checksummed and device does not support
1451 * checksumming for this protocol, complete checksumming here.
1452 */
1453 if (skb->ip_summed == CHECKSUM_HW &&
1454 - (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
1455 + (!(dev->features & NETIF_F_GEN_CSUM) &&
1456 (!(dev->features & NETIF_F_IP_CSUM) ||
1457 skb->protocol != htons(ETH_P_IP))))
1458 if (skb_checksum_help(skb, 0))
1459 goto out_kfree_skb;
1461 +gso:
1462 spin_lock_prefetch(&dev->queue_lock);
1464 /* Disable soft irqs for various locks below. Also
1465 * stops preemption for RCU.
1466 */
1467 - local_bh_disable();
1468 + rcu_read_lock_bh();
1470 /* Updates of qdisc are serialized by queue_lock.
1471 * The struct Qdisc which is pointed to by qdisc is now a
1472 @@ -1309,8 +1387,8 @@ int dev_queue_xmit(struct sk_buff *skb)
1473 /* The device has no queue. Common case for software devices:
1474 loopback, all the sorts of tunnels...
1476 - Really, it is unlikely that xmit_lock protection is necessary here.
1477 - (f.e. loopback and IP tunnels are clean ignoring statistics
1478 + Really, it is unlikely that netif_tx_lock protection is necessary
1479 + here. (f.e. loopback and IP tunnels are clean ignoring statistics
1480 counters.)
1481 However, it is possible, that they rely on protection
1482 made by us here.
1483 @@ -1326,11 +1404,8 @@ int dev_queue_xmit(struct sk_buff *skb)
1484 HARD_TX_LOCK(dev, cpu);
1486 if (!netif_queue_stopped(dev)) {
1487 - if (netdev_nit)
1488 - dev_queue_xmit_nit(skb, dev);
1490 rc = 0;
1491 - if (!dev->hard_start_xmit(skb, dev)) {
1492 + if (!dev_hard_start_xmit(skb, dev)) {
1493 HARD_TX_UNLOCK(dev);
1494 goto out;
1496 @@ -1349,13 +1424,13 @@ int dev_queue_xmit(struct sk_buff *skb)
1499 rc = -ENETDOWN;
1500 - local_bh_enable();
1501 + rcu_read_unlock_bh();
1503 out_kfree_skb:
1504 kfree_skb(skb);
1505 return rc;
1506 out:
1507 - local_bh_enable();
1508 + rcu_read_unlock_bh();
1509 return rc;
1512 @@ -2670,7 +2745,7 @@ int register_netdevice(struct net_device
1513 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
1515 spin_lock_init(&dev->queue_lock);
1516 - spin_lock_init(&dev->xmit_lock);
1517 + spin_lock_init(&dev->_xmit_lock);
1518 dev->xmit_lock_owner = -1;
1519 #ifdef CONFIG_NET_CLS_ACT
1520 spin_lock_init(&dev->ingress_lock);
1521 @@ -2714,9 +2789,7 @@ int register_netdevice(struct net_device
1523 /* Fix illegal SG+CSUM combinations. */
1524 if ((dev->features & NETIF_F_SG) &&
1525 - !(dev->features & (NETIF_F_IP_CSUM |
1526 - NETIF_F_NO_CSUM |
1527 - NETIF_F_HW_CSUM))) {
1528 + !(dev->features & NETIF_F_ALL_CSUM)) {
1529 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
1530 dev->name);
1531 dev->features &= ~NETIF_F_SG;
1532 @@ -3268,7 +3341,6 @@ subsys_initcall(net_dev_init);
1533 EXPORT_SYMBOL(__dev_get_by_index);
1534 EXPORT_SYMBOL(__dev_get_by_name);
1535 EXPORT_SYMBOL(__dev_remove_pack);
1536 -EXPORT_SYMBOL(__skb_linearize);
1537 EXPORT_SYMBOL(dev_valid_name);
1538 EXPORT_SYMBOL(dev_add_pack);
1539 EXPORT_SYMBOL(dev_alloc_name);
1540 diff -pruN ../orig-linux-2.6.16.29/net/core/dev_mcast.c ./net/core/dev_mcast.c
1541 --- ../orig-linux-2.6.16.29/net/core/dev_mcast.c 2006-09-12 19:02:10.000000000 +0100
1542 +++ ./net/core/dev_mcast.c 2006-09-19 13:59:20.000000000 +0100
1543 @@ -62,7 +62,7 @@
1544 * Device mc lists are changed by bh at least if IPv6 is enabled,
1545 * so that it must be bh protected.
1547 - * We block accesses to device mc filters with dev->xmit_lock.
1548 + * We block accesses to device mc filters with netif_tx_lock.
1549 */
1551 /*
1552 @@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_d
1554 void dev_mc_upload(struct net_device *dev)
1556 - spin_lock_bh(&dev->xmit_lock);
1557 + netif_tx_lock_bh(dev);
1558 __dev_mc_upload(dev);
1559 - spin_unlock_bh(&dev->xmit_lock);
1560 + netif_tx_unlock_bh(dev);
1563 /*
1564 @@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev
1565 int err = 0;
1566 struct dev_mc_list *dmi, **dmip;
1568 - spin_lock_bh(&dev->xmit_lock);
1569 + netif_tx_lock_bh(dev);
1571 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
1572 /*
1573 @@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev
1574 */
1575 __dev_mc_upload(dev);
1577 - spin_unlock_bh(&dev->xmit_lock);
1578 + netif_tx_unlock_bh(dev);
1579 return 0;
1582 err = -ENOENT;
1583 done:
1584 - spin_unlock_bh(&dev->xmit_lock);
1585 + netif_tx_unlock_bh(dev);
1586 return err;
1589 @@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, v
1591 dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
1593 - spin_lock_bh(&dev->xmit_lock);
1594 + netif_tx_lock_bh(dev);
1595 for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
1596 if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
1597 dmi->dmi_addrlen == alen) {
1598 @@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, v
1601 if ((dmi = dmi1) == NULL) {
1602 - spin_unlock_bh(&dev->xmit_lock);
1603 + netif_tx_unlock_bh(dev);
1604 return -ENOMEM;
1606 memcpy(dmi->dmi_addr, addr, alen);
1607 @@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, v
1609 __dev_mc_upload(dev);
1611 - spin_unlock_bh(&dev->xmit_lock);
1612 + netif_tx_unlock_bh(dev);
1613 return 0;
1615 done:
1616 - spin_unlock_bh(&dev->xmit_lock);
1617 + netif_tx_unlock_bh(dev);
1618 kfree(dmi1);
1619 return err;
1621 @@ -204,7 +204,7 @@ done:
1623 void dev_mc_discard(struct net_device *dev)
1625 - spin_lock_bh(&dev->xmit_lock);
1626 + netif_tx_lock_bh(dev);
1628 while (dev->mc_list != NULL) {
1629 struct dev_mc_list *tmp = dev->mc_list;
1630 @@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *d
1632 dev->mc_count = 0;
1634 - spin_unlock_bh(&dev->xmit_lock);
1635 + netif_tx_unlock_bh(dev);
1638 #ifdef CONFIG_PROC_FS
1639 @@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_fi
1640 struct dev_mc_list *m;
1641 struct net_device *dev = v;
1643 - spin_lock_bh(&dev->xmit_lock);
1644 + netif_tx_lock_bh(dev);
1645 for (m = dev->mc_list; m; m = m->next) {
1646 int i;
1648 @@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_fi
1650 seq_putc(seq, '\n');
1652 - spin_unlock_bh(&dev->xmit_lock);
1653 + netif_tx_unlock_bh(dev);
1654 return 0;
1657 diff -pruN ../orig-linux-2.6.16.29/net/core/ethtool.c ./net/core/ethtool.c
1658 --- ../orig-linux-2.6.16.29/net/core/ethtool.c 2006-09-12 19:02:10.000000000 +0100
1659 +++ ./net/core/ethtool.c 2006-09-19 13:59:20.000000000 +0100
1660 @@ -30,7 +30,7 @@ u32 ethtool_op_get_link(struct net_devic
1662 u32 ethtool_op_get_tx_csum(struct net_device *dev)
1664 - return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0;
1665 + return (dev->features & NETIF_F_ALL_CSUM) != 0;
1668 int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1669 @@ -551,9 +551,7 @@ static int ethtool_set_sg(struct net_dev
1670 return -EFAULT;
1672 if (edata.data &&
1673 - !(dev->features & (NETIF_F_IP_CSUM |
1674 - NETIF_F_NO_CSUM |
1675 - NETIF_F_HW_CSUM)))
1676 + !(dev->features & NETIF_F_ALL_CSUM))
1677 return -EINVAL;
1679 return __ethtool_set_sg(dev, edata.data);
1680 @@ -561,7 +559,7 @@ static int ethtool_set_sg(struct net_dev
1682 static int ethtool_get_tso(struct net_device *dev, char __user *useraddr)
1684 - struct ethtool_value edata = { ETHTOOL_GTSO };
1685 + struct ethtool_value edata = { ETHTOOL_GUFO };
1687 if (!dev->ethtool_ops->get_tso)
1688 return -EOPNOTSUPP;
1689 @@ -616,6 +614,29 @@ static int ethtool_set_ufo(struct net_de
1690 return dev->ethtool_ops->set_ufo(dev, edata.data);
1693 +static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
1694 +{
1695 + struct ethtool_value edata = { ETHTOOL_GGSO };
1697 + edata.data = dev->features & NETIF_F_GSO;
1698 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
1699 + return -EFAULT;
1700 + return 0;
1701 +}
1703 +static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
1704 +{
1705 + struct ethtool_value edata;
1707 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
1708 + return -EFAULT;
1709 + if (edata.data)
1710 + dev->features |= NETIF_F_GSO;
1711 + else
1712 + dev->features &= ~NETIF_F_GSO;
1713 + return 0;
1714 +}
1716 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
1718 struct ethtool_test test;
1719 @@ -907,6 +928,12 @@ int dev_ethtool(struct ifreq *ifr)
1720 case ETHTOOL_SUFO:
1721 rc = ethtool_set_ufo(dev, useraddr);
1722 break;
1723 + case ETHTOOL_GGSO:
1724 + rc = ethtool_get_gso(dev, useraddr);
1725 + break;
1726 + case ETHTOOL_SGSO:
1727 + rc = ethtool_set_gso(dev, useraddr);
1728 + break;
1729 default:
1730 rc = -EOPNOTSUPP;
1732 diff -pruN ../orig-linux-2.6.16.29/net/core/netpoll.c ./net/core/netpoll.c
1733 --- ../orig-linux-2.6.16.29/net/core/netpoll.c 2006-09-12 19:02:10.000000000 +0100
1734 +++ ./net/core/netpoll.c 2006-09-19 13:59:20.000000000 +0100
1735 @@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netp
1737 do {
1738 npinfo->tries--;
1739 - spin_lock(&np->dev->xmit_lock);
1740 - np->dev->xmit_lock_owner = smp_processor_id();
1741 + netif_tx_lock(np->dev);
1743 /*
1744 * network drivers do not expect to be called if the queue is
1745 * stopped.
1746 */
1747 if (netif_queue_stopped(np->dev)) {
1748 - np->dev->xmit_lock_owner = -1;
1749 - spin_unlock(&np->dev->xmit_lock);
1750 + netif_tx_unlock(np->dev);
1751 netpoll_poll(np);
1752 udelay(50);
1753 continue;
1756 status = np->dev->hard_start_xmit(skb, np->dev);
1757 - np->dev->xmit_lock_owner = -1;
1758 - spin_unlock(&np->dev->xmit_lock);
1759 + netif_tx_unlock(np->dev);
1761 /* success */
1762 if(!status) {
1763 diff -pruN ../orig-linux-2.6.16.29/net/core/pktgen.c ./net/core/pktgen.c
1764 --- ../orig-linux-2.6.16.29/net/core/pktgen.c 2006-09-12 19:02:10.000000000 +0100
1765 +++ ./net/core/pktgen.c 2006-09-19 13:59:20.000000000 +0100
1766 @@ -2586,7 +2586,7 @@ static __inline__ void pktgen_xmit(struc
1770 - spin_lock_bh(&odev->xmit_lock);
1771 + netif_tx_lock_bh(odev);
1772 if (!netif_queue_stopped(odev)) {
1774 atomic_inc(&(pkt_dev->skb->users));
1775 @@ -2631,7 +2631,7 @@ retry_now:
1776 pkt_dev->next_tx_ns = 0;
1779 - spin_unlock_bh(&odev->xmit_lock);
1780 + netif_tx_unlock_bh(odev);
1782 /* If pkt_dev->count is zero, then run forever */
1783 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
1784 diff -pruN ../orig-linux-2.6.16.29/net/core/skbuff.c ./net/core/skbuff.c
1785 --- ../orig-linux-2.6.16.29/net/core/skbuff.c 2006-09-12 19:02:10.000000000 +0100
1786 +++ ./net/core/skbuff.c 2006-09-19 13:59:20.000000000 +0100
1787 @@ -164,9 +164,9 @@ struct sk_buff *__alloc_skb(unsigned int
1788 shinfo = skb_shinfo(skb);
1789 atomic_set(&shinfo->dataref, 1);
1790 shinfo->nr_frags = 0;
1791 - shinfo->tso_size = 0;
1792 - shinfo->tso_segs = 0;
1793 - shinfo->ufo_size = 0;
1794 + shinfo->gso_size = 0;
1795 + shinfo->gso_segs = 0;
1796 + shinfo->gso_type = 0;
1797 shinfo->ip6_frag_id = 0;
1798 shinfo->frag_list = NULL;
1800 @@ -230,8 +230,9 @@ struct sk_buff *alloc_skb_from_cache(kme
1802 atomic_set(&(skb_shinfo(skb)->dataref), 1);
1803 skb_shinfo(skb)->nr_frags = 0;
1804 - skb_shinfo(skb)->tso_size = 0;
1805 - skb_shinfo(skb)->tso_segs = 0;
1806 + skb_shinfo(skb)->gso_size = 0;
1807 + skb_shinfo(skb)->gso_segs = 0;
1808 + skb_shinfo(skb)->gso_type = 0;
1809 skb_shinfo(skb)->frag_list = NULL;
1810 out:
1811 return skb;
1812 @@ -501,8 +502,9 @@ static void copy_skb_header(struct sk_bu
1813 new->tc_index = old->tc_index;
1814 #endif
1815 atomic_set(&new->users, 1);
1816 - skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
1817 - skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
1818 + skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1819 + skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1820 + skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1823 /**
1824 @@ -1777,6 +1779,133 @@ int skb_append_datato_frags(struct sock
1825 return 0;
1828 +/**
1829 + * skb_segment - Perform protocol segmentation on skb.
1830 + * @skb: buffer to segment
1831 + * @features: features for the output path (see dev->features)
1832 + *
1833 + * This function performs segmentation on the given skb. It returns
1834 + * the segment at the given position. It returns NULL if there are
1835 + * no more segments to generate, or when an error is encountered.
1836 + */
1837 +struct sk_buff *skb_segment(struct sk_buff *skb, int features)
1838 +{
1839 + struct sk_buff *segs = NULL;
1840 + struct sk_buff *tail = NULL;
1841 + unsigned int mss = skb_shinfo(skb)->gso_size;
1842 + unsigned int doffset = skb->data - skb->mac.raw;
1843 + unsigned int offset = doffset;
1844 + unsigned int headroom;
1845 + unsigned int len;
1846 + int sg = features & NETIF_F_SG;
1847 + int nfrags = skb_shinfo(skb)->nr_frags;
1848 + int err = -ENOMEM;
1849 + int i = 0;
1850 + int pos;
1852 + __skb_push(skb, doffset);
1853 + headroom = skb_headroom(skb);
1854 + pos = skb_headlen(skb);
1856 + do {
1857 + struct sk_buff *nskb;
1858 + skb_frag_t *frag;
1859 + int hsize, nsize;
1860 + int k;
1861 + int size;
1863 + len = skb->len - offset;
1864 + if (len > mss)
1865 + len = mss;
1867 + hsize = skb_headlen(skb) - offset;
1868 + if (hsize < 0)
1869 + hsize = 0;
1870 + nsize = hsize + doffset;
1871 + if (nsize > len + doffset || !sg)
1872 + nsize = len + doffset;
1874 + nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
1875 + if (unlikely(!nskb))
1876 + goto err;
1878 + if (segs)
1879 + tail->next = nskb;
1880 + else
1881 + segs = nskb;
1882 + tail = nskb;
1884 + nskb->dev = skb->dev;
1885 + nskb->priority = skb->priority;
1886 + nskb->protocol = skb->protocol;
1887 + nskb->dst = dst_clone(skb->dst);
1888 + memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
1889 + nskb->pkt_type = skb->pkt_type;
1890 + nskb->mac_len = skb->mac_len;
1892 + skb_reserve(nskb, headroom);
1893 + nskb->mac.raw = nskb->data;
1894 + nskb->nh.raw = nskb->data + skb->mac_len;
1895 + nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
1896 + memcpy(skb_put(nskb, doffset), skb->data, doffset);
1898 + if (!sg) {
1899 + nskb->csum = skb_copy_and_csum_bits(skb, offset,
1900 + skb_put(nskb, len),
1901 + len, 0);
1902 + continue;
1903 + }
1905 + frag = skb_shinfo(nskb)->frags;
1906 + k = 0;
1908 + nskb->ip_summed = CHECKSUM_HW;
1909 + nskb->csum = skb->csum;
1910 + memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
1912 + while (pos < offset + len) {
1913 + BUG_ON(i >= nfrags);
1915 + *frag = skb_shinfo(skb)->frags[i];
1916 + get_page(frag->page);
1917 + size = frag->size;
1919 + if (pos < offset) {
1920 + frag->page_offset += offset - pos;
1921 + frag->size -= offset - pos;
1922 + }
1924 + k++;
1926 + if (pos + size <= offset + len) {
1927 + i++;
1928 + pos += size;
1929 + } else {
1930 + frag->size -= pos + size - (offset + len);
1931 + break;
1932 + }
1934 + frag++;
1935 + }
1937 + skb_shinfo(nskb)->nr_frags = k;
1938 + nskb->data_len = len - hsize;
1939 + nskb->len += nskb->data_len;
1940 + nskb->truesize += nskb->data_len;
1941 + } while ((offset += len) < skb->len);
1943 + return segs;
1945 +err:
1946 + while ((skb = segs)) {
1947 + segs = skb->next;
1948 + kfree(skb);
1949 + }
1950 + return ERR_PTR(err);
1951 +}
1953 +EXPORT_SYMBOL_GPL(skb_segment);
1955 void __init skb_init(void)
1957 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
1958 diff -pruN ../orig-linux-2.6.16.29/net/decnet/dn_nsp_in.c ./net/decnet/dn_nsp_in.c
1959 --- ../orig-linux-2.6.16.29/net/decnet/dn_nsp_in.c 2006-09-12 19:02:10.000000000 +0100
1960 +++ ./net/decnet/dn_nsp_in.c 2006-09-19 13:59:20.000000000 +0100
1961 @@ -801,8 +801,7 @@ got_it:
1962 * We linearize everything except data segments here.
1963 */
1964 if (cb->nsp_flags & ~0x60) {
1965 - if (unlikely(skb_is_nonlinear(skb)) &&
1966 - skb_linearize(skb, GFP_ATOMIC) != 0)
1967 + if (unlikely(skb_linearize(skb)))
1968 goto free_out;
1971 diff -pruN ../orig-linux-2.6.16.29/net/decnet/dn_route.c ./net/decnet/dn_route.c
1972 --- ../orig-linux-2.6.16.29/net/decnet/dn_route.c 2006-09-12 19:02:10.000000000 +0100
1973 +++ ./net/decnet/dn_route.c 2006-09-19 13:59:20.000000000 +0100
1974 @@ -629,8 +629,7 @@ int dn_route_rcv(struct sk_buff *skb, st
1975 padlen);
1977 if (flags & DN_RT_PKT_CNTL) {
1978 - if (unlikely(skb_is_nonlinear(skb)) &&
1979 - skb_linearize(skb, GFP_ATOMIC) != 0)
1980 + if (unlikely(skb_linearize(skb)))
1981 goto dump_it;
1983 switch(flags & DN_RT_CNTL_MSK) {
1984 diff -pruN ../orig-linux-2.6.16.29/net/ipv4/af_inet.c ./net/ipv4/af_inet.c
1985 --- ../orig-linux-2.6.16.29/net/ipv4/af_inet.c 2006-09-12 19:02:10.000000000 +0100
1986 +++ ./net/ipv4/af_inet.c 2006-09-19 13:59:20.000000000 +0100
1987 @@ -68,6 +68,7 @@
1988 */
1990 #include <linux/config.h>
1991 +#include <linux/err.h>
1992 #include <linux/errno.h>
1993 #include <linux/types.h>
1994 #include <linux/socket.h>
1995 @@ -1084,6 +1085,54 @@ int inet_sk_rebuild_header(struct sock *
1997 EXPORT_SYMBOL(inet_sk_rebuild_header);
1999 +static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
2000 +{
2001 + struct sk_buff *segs = ERR_PTR(-EINVAL);
2002 + struct iphdr *iph;
2003 + struct net_protocol *ops;
2004 + int proto;
2005 + int ihl;
2006 + int id;
2008 + if (!pskb_may_pull(skb, sizeof(*iph)))
2009 + goto out;
2011 + iph = skb->nh.iph;
2012 + ihl = iph->ihl * 4;
2013 + if (ihl < sizeof(*iph))
2014 + goto out;
2016 + if (!pskb_may_pull(skb, ihl))
2017 + goto out;
2019 + skb->h.raw = __skb_pull(skb, ihl);
2020 + iph = skb->nh.iph;
2021 + id = ntohs(iph->id);
2022 + proto = iph->protocol & (MAX_INET_PROTOS - 1);
2023 + segs = ERR_PTR(-EPROTONOSUPPORT);
2025 + rcu_read_lock();
2026 + ops = rcu_dereference(inet_protos[proto]);
2027 + if (ops && ops->gso_segment)
2028 + segs = ops->gso_segment(skb, features);
2029 + rcu_read_unlock();
2031 + if (!segs || unlikely(IS_ERR(segs)))
2032 + goto out;
2034 + skb = segs;
2035 + do {
2036 + iph = skb->nh.iph;
2037 + iph->id = htons(id++);
2038 + iph->tot_len = htons(skb->len - skb->mac_len);
2039 + iph->check = 0;
2040 + iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
2041 + } while ((skb = skb->next));
2043 +out:
2044 + return segs;
2045 +}
2047 #ifdef CONFIG_IP_MULTICAST
2048 static struct net_protocol igmp_protocol = {
2049 .handler = igmp_rcv,
2050 @@ -1093,6 +1142,7 @@ static struct net_protocol igmp_protocol
2051 static struct net_protocol tcp_protocol = {
2052 .handler = tcp_v4_rcv,
2053 .err_handler = tcp_v4_err,
2054 + .gso_segment = tcp_tso_segment,
2055 .no_policy = 1,
2056 };
2058 @@ -1138,6 +1188,7 @@ static int ipv4_proc_init(void);
2059 static struct packet_type ip_packet_type = {
2060 .type = __constant_htons(ETH_P_IP),
2061 .func = ip_rcv,
2062 + .gso_segment = inet_gso_segment,
2063 };
2065 static int __init inet_init(void)
2066 diff -pruN ../orig-linux-2.6.16.29/net/ipv4/ip_output.c ./net/ipv4/ip_output.c
2067 --- ../orig-linux-2.6.16.29/net/ipv4/ip_output.c 2006-09-12 19:02:10.000000000 +0100
2068 +++ ./net/ipv4/ip_output.c 2006-09-19 13:59:20.000000000 +0100
2069 @@ -210,8 +210,7 @@ static inline int ip_finish_output(struc
2070 return dst_output(skb);
2072 #endif
2073 - if (skb->len > dst_mtu(skb->dst) &&
2074 - !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
2075 + if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
2076 return ip_fragment(skb, ip_finish_output2);
2077 else
2078 return ip_finish_output2(skb);
2079 @@ -362,7 +361,7 @@ packet_routed:
2082 ip_select_ident_more(iph, &rt->u.dst, sk,
2083 - (skb_shinfo(skb)->tso_segs ?: 1) - 1);
2084 + (skb_shinfo(skb)->gso_segs ?: 1) - 1);
2086 /* Add an IP checksum. */
2087 ip_send_check(iph);
2088 @@ -743,7 +742,8 @@ static inline int ip_ufo_append_data(str
2089 (length - transhdrlen));
2090 if (!err) {
2091 /* specify the length of each IP datagram fragment*/
2092 - skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
2093 + skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
2094 + skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
2095 __skb_queue_tail(&sk->sk_write_queue, skb);
2097 return 0;
2098 @@ -839,7 +839,7 @@ int ip_append_data(struct sock *sk,
2099 */
2100 if (transhdrlen &&
2101 length + fragheaderlen <= mtu &&
2102 - rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
2103 + rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
2104 !exthdrlen)
2105 csummode = CHECKSUM_HW;
2107 @@ -1086,14 +1086,16 @@ ssize_t ip_append_page(struct sock *sk,
2109 inet->cork.length += size;
2110 if ((sk->sk_protocol == IPPROTO_UDP) &&
2111 - (rt->u.dst.dev->features & NETIF_F_UFO))
2112 - skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
2113 + (rt->u.dst.dev->features & NETIF_F_UFO)) {
2114 + skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
2115 + skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
2116 + }
2119 while (size > 0) {
2120 int i;
2122 - if (skb_shinfo(skb)->ufo_size)
2123 + if (skb_shinfo(skb)->gso_size)
2124 len = size;
2125 else {
2127 diff -pruN ../orig-linux-2.6.16.29/net/ipv4/ipcomp.c ./net/ipv4/ipcomp.c
2128 --- ../orig-linux-2.6.16.29/net/ipv4/ipcomp.c 2006-09-12 19:02:10.000000000 +0100
2129 +++ ./net/ipv4/ipcomp.c 2006-09-19 13:59:20.000000000 +0100
2130 @@ -84,7 +84,7 @@ static int ipcomp_input(struct xfrm_stat
2131 struct xfrm_decap_state *decap, struct sk_buff *skb)
2133 u8 nexthdr;
2134 - int err = 0;
2135 + int err = -ENOMEM;
2136 struct iphdr *iph;
2137 union {
2138 struct iphdr iph;
2139 @@ -92,11 +92,8 @@ static int ipcomp_input(struct xfrm_stat
2140 } tmp_iph;
2143 - if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
2144 - skb_linearize(skb, GFP_ATOMIC) != 0) {
2145 - err = -ENOMEM;
2146 + if (skb_linearize_cow(skb))
2147 goto out;
2148 - }
2150 skb->ip_summed = CHECKSUM_NONE;
2152 @@ -171,10 +168,8 @@ static int ipcomp_output(struct xfrm_sta
2153 goto out_ok;
2156 - if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
2157 - skb_linearize(skb, GFP_ATOMIC) != 0) {
2158 + if (skb_linearize_cow(skb))
2159 goto out_ok;
2160 - }
2162 err = ipcomp_compress(x, skb);
2163 iph = skb->nh.iph;
2164 diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp.c ./net/ipv4/tcp.c
2165 --- ../orig-linux-2.6.16.29/net/ipv4/tcp.c 2006-09-12 19:02:10.000000000 +0100
2166 +++ ./net/ipv4/tcp.c 2006-09-19 13:59:20.000000000 +0100
2167 @@ -257,6 +257,7 @@
2168 #include <linux/fs.h>
2169 #include <linux/random.h>
2170 #include <linux/bootmem.h>
2171 +#include <linux/err.h>
2173 #include <net/icmp.h>
2174 #include <net/tcp.h>
2175 @@ -570,7 +571,7 @@ new_segment:
2176 skb->ip_summed = CHECKSUM_HW;
2177 tp->write_seq += copy;
2178 TCP_SKB_CB(skb)->end_seq += copy;
2179 - skb_shinfo(skb)->tso_segs = 0;
2180 + skb_shinfo(skb)->gso_segs = 0;
2182 if (!copied)
2183 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
2184 @@ -621,14 +622,10 @@ ssize_t tcp_sendpage(struct socket *sock
2185 ssize_t res;
2186 struct sock *sk = sock->sk;
2188 -#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
2190 if (!(sk->sk_route_caps & NETIF_F_SG) ||
2191 - !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
2192 + !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
2193 return sock_no_sendpage(sock, page, offset, size, flags);
2195 -#undef TCP_ZC_CSUM_FLAGS
2197 lock_sock(sk);
2198 TCP_CHECK_TIMER(sk);
2199 res = do_tcp_sendpages(sk, &page, offset, size, flags);
2200 @@ -725,9 +722,7 @@ new_segment:
2201 /*
2202 * Check whether we can use HW checksum.
2203 */
2204 - if (sk->sk_route_caps &
2205 - (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
2206 - NETIF_F_HW_CSUM))
2207 + if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
2208 skb->ip_summed = CHECKSUM_HW;
2210 skb_entail(sk, tp, skb);
2211 @@ -823,7 +818,7 @@ new_segment:
2213 tp->write_seq += copy;
2214 TCP_SKB_CB(skb)->end_seq += copy;
2215 - skb_shinfo(skb)->tso_segs = 0;
2216 + skb_shinfo(skb)->gso_segs = 0;
2218 from += copy;
2219 copied += copy;
2220 @@ -2026,6 +2021,71 @@ int tcp_getsockopt(struct sock *sk, int
2224 +struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2225 +{
2226 + struct sk_buff *segs = ERR_PTR(-EINVAL);
2227 + struct tcphdr *th;
2228 + unsigned thlen;
2229 + unsigned int seq;
2230 + unsigned int delta;
2231 + unsigned int oldlen;
2232 + unsigned int len;
2234 + if (!pskb_may_pull(skb, sizeof(*th)))
2235 + goto out;
2237 + th = skb->h.th;
2238 + thlen = th->doff * 4;
2239 + if (thlen < sizeof(*th))
2240 + goto out;
2242 + if (!pskb_may_pull(skb, thlen))
2243 + goto out;
2245 + segs = NULL;
2246 + if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
2247 + goto out;
2249 + oldlen = (u16)~skb->len;
2250 + __skb_pull(skb, thlen);
2252 + segs = skb_segment(skb, features);
2253 + if (IS_ERR(segs))
2254 + goto out;
2256 + len = skb_shinfo(skb)->gso_size;
2257 + delta = htonl(oldlen + (thlen + len));
2259 + skb = segs;
2260 + th = skb->h.th;
2261 + seq = ntohl(th->seq);
2263 + do {
2264 + th->fin = th->psh = 0;
2266 + th->check = ~csum_fold(th->check + delta);
2267 + if (skb->ip_summed != CHECKSUM_HW)
2268 + th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2269 + skb->csum));
2271 + seq += len;
2272 + skb = skb->next;
2273 + th = skb->h.th;
2275 + th->seq = htonl(seq);
2276 + th->cwr = 0;
2277 + } while (skb->next);
2279 + delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
2280 + th->check = ~csum_fold(th->check + delta);
2281 + if (skb->ip_summed != CHECKSUM_HW)
2282 + th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2283 + skb->csum));
2285 +out:
2286 + return segs;
2287 +}
2289 extern void __skb_cb_too_small_for_tcp(int, int);
2290 extern struct tcp_congestion_ops tcp_reno;
2292 diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp_input.c ./net/ipv4/tcp_input.c
2293 --- ../orig-linux-2.6.16.29/net/ipv4/tcp_input.c 2006-09-12 19:02:10.000000000 +0100
2294 +++ ./net/ipv4/tcp_input.c 2006-09-19 13:59:20.000000000 +0100
2295 @@ -1072,7 +1072,7 @@ tcp_sacktag_write_queue(struct sock *sk,
2296 else
2297 pkt_len = (end_seq -
2298 TCP_SKB_CB(skb)->seq);
2299 - if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
2300 + if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
2301 break;
2302 pcount = tcp_skb_pcount(skb);
2304 diff -pruN ../orig-linux-2.6.16.29/net/ipv4/tcp_output.c ./net/ipv4/tcp_output.c
2305 --- ../orig-linux-2.6.16.29/net/ipv4/tcp_output.c 2006-09-12 19:02:10.000000000 +0100
2306 +++ ./net/ipv4/tcp_output.c 2006-09-19 13:59:20.000000000 +0100
2307 @@ -497,15 +497,17 @@ static void tcp_set_skb_tso_segs(struct
2308 /* Avoid the costly divide in the normal
2309 * non-TSO case.
2310 */
2311 - skb_shinfo(skb)->tso_segs = 1;
2312 - skb_shinfo(skb)->tso_size = 0;
2313 + skb_shinfo(skb)->gso_segs = 1;
2314 + skb_shinfo(skb)->gso_size = 0;
2315 + skb_shinfo(skb)->gso_type = 0;
2316 } else {
2317 unsigned int factor;
2319 factor = skb->len + (mss_now - 1);
2320 factor /= mss_now;
2321 - skb_shinfo(skb)->tso_segs = factor;
2322 - skb_shinfo(skb)->tso_size = mss_now;
2323 + skb_shinfo(skb)->gso_segs = factor;
2324 + skb_shinfo(skb)->gso_size = mss_now;
2325 + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2329 @@ -850,7 +852,7 @@ static int tcp_init_tso_segs(struct sock
2331 if (!tso_segs ||
2332 (tso_segs > 1 &&
2333 - skb_shinfo(skb)->tso_size != mss_now)) {
2334 + tcp_skb_mss(skb) != mss_now)) {
2335 tcp_set_skb_tso_segs(sk, skb, mss_now);
2336 tso_segs = tcp_skb_pcount(skb);
2338 @@ -1510,8 +1512,9 @@ int tcp_retransmit_skb(struct sock *sk,
2339 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
2340 if (!pskb_trim(skb, 0)) {
2341 TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
2342 - skb_shinfo(skb)->tso_segs = 1;
2343 - skb_shinfo(skb)->tso_size = 0;
2344 + skb_shinfo(skb)->gso_segs = 1;
2345 + skb_shinfo(skb)->gso_size = 0;
2346 + skb_shinfo(skb)->gso_type = 0;
2347 skb->ip_summed = CHECKSUM_NONE;
2348 skb->csum = 0;
2350 @@ -1716,8 +1719,9 @@ void tcp_send_fin(struct sock *sk)
2351 skb->csum = 0;
2352 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
2353 TCP_SKB_CB(skb)->sacked = 0;
2354 - skb_shinfo(skb)->tso_segs = 1;
2355 - skb_shinfo(skb)->tso_size = 0;
2356 + skb_shinfo(skb)->gso_segs = 1;
2357 + skb_shinfo(skb)->gso_size = 0;
2358 + skb_shinfo(skb)->gso_type = 0;
2360 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2361 TCP_SKB_CB(skb)->seq = tp->write_seq;
2362 @@ -1749,8 +1753,9 @@ void tcp_send_active_reset(struct sock *
2363 skb->csum = 0;
2364 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
2365 TCP_SKB_CB(skb)->sacked = 0;
2366 - skb_shinfo(skb)->tso_segs = 1;
2367 - skb_shinfo(skb)->tso_size = 0;
2368 + skb_shinfo(skb)->gso_segs = 1;
2369 + skb_shinfo(skb)->gso_size = 0;
2370 + skb_shinfo(skb)->gso_type = 0;
2372 /* Send it off. */
2373 TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
2374 @@ -1833,8 +1838,9 @@ struct sk_buff * tcp_make_synack(struct
2375 TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
2376 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2377 TCP_SKB_CB(skb)->sacked = 0;
2378 - skb_shinfo(skb)->tso_segs = 1;
2379 - skb_shinfo(skb)->tso_size = 0;
2380 + skb_shinfo(skb)->gso_segs = 1;
2381 + skb_shinfo(skb)->gso_size = 0;
2382 + skb_shinfo(skb)->gso_type = 0;
2383 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2384 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2385 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2386 @@ -1937,8 +1943,9 @@ int tcp_connect(struct sock *sk)
2387 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
2388 TCP_ECN_send_syn(sk, tp, buff);
2389 TCP_SKB_CB(buff)->sacked = 0;
2390 - skb_shinfo(buff)->tso_segs = 1;
2391 - skb_shinfo(buff)->tso_size = 0;
2392 + skb_shinfo(buff)->gso_segs = 1;
2393 + skb_shinfo(buff)->gso_size = 0;
2394 + skb_shinfo(buff)->gso_type = 0;
2395 buff->csum = 0;
2396 TCP_SKB_CB(buff)->seq = tp->write_seq++;
2397 TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2398 @@ -2042,8 +2049,9 @@ void tcp_send_ack(struct sock *sk)
2399 buff->csum = 0;
2400 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
2401 TCP_SKB_CB(buff)->sacked = 0;
2402 - skb_shinfo(buff)->tso_segs = 1;
2403 - skb_shinfo(buff)->tso_size = 0;
2404 + skb_shinfo(buff)->gso_segs = 1;
2405 + skb_shinfo(buff)->gso_size = 0;
2406 + skb_shinfo(buff)->gso_type = 0;
2408 /* Send it off, this clears delayed acks for us. */
2409 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
2410 @@ -2078,8 +2086,9 @@ static int tcp_xmit_probe_skb(struct soc
2411 skb->csum = 0;
2412 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
2413 TCP_SKB_CB(skb)->sacked = urgent;
2414 - skb_shinfo(skb)->tso_segs = 1;
2415 - skb_shinfo(skb)->tso_size = 0;
2416 + skb_shinfo(skb)->gso_segs = 1;
2417 + skb_shinfo(skb)->gso_size = 0;
2418 + skb_shinfo(skb)->gso_type = 0;
2420 /* Use a previous sequence. This should cause the other
2421 * end to send an ack. Don't queue or clone SKB, just
2422 diff -pruN ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c ./net/ipv4/xfrm4_output.c
2423 --- ../orig-linux-2.6.16.29/net/ipv4/xfrm4_output.c 2006-09-19 13:59:15.000000000 +0100
2424 +++ ./net/ipv4/xfrm4_output.c 2006-09-19 13:59:20.000000000 +0100
2425 @@ -9,6 +9,8 @@
2426 */
2428 #include <linux/compiler.h>
2429 +#include <linux/if_ether.h>
2430 +#include <linux/kernel.h>
2431 #include <linux/skbuff.h>
2432 #include <linux/spinlock.h>
2433 #include <linux/netfilter_ipv4.h>
2434 @@ -158,16 +160,10 @@ error_nolock:
2435 goto out_exit;
2438 -static int xfrm4_output_finish(struct sk_buff *skb)
2439 +static int xfrm4_output_finish2(struct sk_buff *skb)
2441 int err;
2443 -#ifdef CONFIG_NETFILTER
2444 - if (!skb->dst->xfrm) {
2445 - IPCB(skb)->flags |= IPSKB_REROUTED;
2446 - return dst_output(skb);
2447 - }
2448 -#endif
2449 while (likely((err = xfrm4_output_one(skb)) == 0)) {
2450 nf_reset(skb);
2452 @@ -180,7 +176,7 @@ static int xfrm4_output_finish(struct sk
2453 return dst_output(skb);
2455 err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL,
2456 - skb->dst->dev, xfrm4_output_finish);
2457 + skb->dst->dev, xfrm4_output_finish2);
2458 if (unlikely(err != 1))
2459 break;
2461 @@ -188,6 +184,48 @@ static int xfrm4_output_finish(struct sk
2462 return err;
2465 +static int xfrm4_output_finish(struct sk_buff *skb)
2466 +{
2467 + struct sk_buff *segs;
2469 +#ifdef CONFIG_NETFILTER
2470 + if (!skb->dst->xfrm) {
2471 + IPCB(skb)->flags |= IPSKB_REROUTED;
2472 + return dst_output(skb);
2473 + }
2474 +#endif
2476 + if (!skb_shinfo(skb)->gso_size)
2477 + return xfrm4_output_finish2(skb);
2479 + skb->protocol = htons(ETH_P_IP);
2480 + segs = skb_gso_segment(skb, 0);
2481 + kfree_skb(skb);
2482 + if (unlikely(IS_ERR(segs)))
2483 + return PTR_ERR(segs);
2485 + do {
2486 + struct sk_buff *nskb = segs->next;
2487 + int err;
2489 + segs->next = NULL;
2490 + err = xfrm4_output_finish2(segs);
2492 + if (unlikely(err)) {
2493 + while ((segs = nskb)) {
2494 + nskb = segs->next;
2495 + segs->next = NULL;
2496 + kfree_skb(segs);
2497 + }
2498 + return err;
2499 + }
2501 + segs = nskb;
2502 + } while (segs);
2504 + return 0;
2505 +}
2507 int xfrm4_output(struct sk_buff *skb)
2509 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
2510 diff -pruN ../orig-linux-2.6.16.29/net/ipv6/ip6_output.c ./net/ipv6/ip6_output.c
2511 --- ../orig-linux-2.6.16.29/net/ipv6/ip6_output.c 2006-09-12 19:02:10.000000000 +0100
2512 +++ ./net/ipv6/ip6_output.c 2006-09-19 13:59:20.000000000 +0100
2513 @@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *s
2515 int ip6_output(struct sk_buff *skb)
2517 - if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) ||
2518 + if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
2519 dst_allfrag(skb->dst))
2520 return ip6_fragment(skb, ip6_output2);
2521 else
2522 @@ -829,8 +829,9 @@ static inline int ip6_ufo_append_data(st
2523 struct frag_hdr fhdr;
2525 /* specify the length of each IP datagram fragment*/
2526 - skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) -
2527 - sizeof(struct frag_hdr);
2528 + skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
2529 + sizeof(struct frag_hdr);
2530 + skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4;
2531 ipv6_select_ident(skb, &fhdr);
2532 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
2533 __skb_queue_tail(&sk->sk_write_queue, skb);
2534 diff -pruN ../orig-linux-2.6.16.29/net/ipv6/ipcomp6.c ./net/ipv6/ipcomp6.c
2535 --- ../orig-linux-2.6.16.29/net/ipv6/ipcomp6.c 2006-09-12 19:02:10.000000000 +0100
2536 +++ ./net/ipv6/ipcomp6.c 2006-09-19 13:59:20.000000000 +0100
2537 @@ -64,7 +64,7 @@ static LIST_HEAD(ipcomp6_tfms_list);
2539 static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
2541 - int err = 0;
2542 + int err = -ENOMEM;
2543 u8 nexthdr = 0;
2544 int hdr_len = skb->h.raw - skb->nh.raw;
2545 unsigned char *tmp_hdr = NULL;
2546 @@ -75,11 +75,8 @@ static int ipcomp6_input(struct xfrm_sta
2547 struct crypto_tfm *tfm;
2548 int cpu;
2550 - if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
2551 - skb_linearize(skb, GFP_ATOMIC) != 0) {
2552 - err = -ENOMEM;
2553 + if (skb_linearize_cow(skb))
2554 goto out;
2555 - }
2557 skb->ip_summed = CHECKSUM_NONE;
2559 @@ -158,10 +155,8 @@ static int ipcomp6_output(struct xfrm_st
2560 goto out_ok;
2563 - if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
2564 - skb_linearize(skb, GFP_ATOMIC) != 0) {
2565 + if (skb_linearize_cow(skb))
2566 goto out_ok;
2567 - }
2569 /* compression */
2570 plen = skb->len - hdr_len;
2571 diff -pruN ../orig-linux-2.6.16.29/net/ipv6/xfrm6_output.c ./net/ipv6/xfrm6_output.c
2572 --- ../orig-linux-2.6.16.29/net/ipv6/xfrm6_output.c 2006-09-12 19:02:10.000000000 +0100
2573 +++ ./net/ipv6/xfrm6_output.c 2006-09-19 13:59:20.000000000 +0100
2574 @@ -151,7 +151,7 @@ error_nolock:
2575 goto out_exit;
2578 -static int xfrm6_output_finish(struct sk_buff *skb)
2579 +static int xfrm6_output_finish2(struct sk_buff *skb)
2581 int err;
2583 @@ -167,7 +167,7 @@ static int xfrm6_output_finish(struct sk
2584 return dst_output(skb);
2586 err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL,
2587 - skb->dst->dev, xfrm6_output_finish);
2588 + skb->dst->dev, xfrm6_output_finish2);
2589 if (unlikely(err != 1))
2590 break;
2592 @@ -175,6 +175,41 @@ static int xfrm6_output_finish(struct sk
2593 return err;
2596 +static int xfrm6_output_finish(struct sk_buff *skb)
2597 +{
2598 + struct sk_buff *segs;
2600 + if (!skb_shinfo(skb)->gso_size)
2601 + return xfrm6_output_finish2(skb);
2603 + skb->protocol = htons(ETH_P_IP);
2604 + segs = skb_gso_segment(skb, 0);
2605 + kfree_skb(skb);
2606 + if (unlikely(IS_ERR(segs)))
2607 + return PTR_ERR(segs);
2609 + do {
2610 + struct sk_buff *nskb = segs->next;
2611 + int err;
2613 + segs->next = NULL;
2614 + err = xfrm6_output_finish2(segs);
2616 + if (unlikely(err)) {
2617 + while ((segs = nskb)) {
2618 + nskb = segs->next;
2619 + segs->next = NULL;
2620 + kfree_skb(segs);
2621 + }
2622 + return err;
2623 + }
2625 + segs = nskb;
2626 + } while (segs);
2628 + return 0;
2629 +}
2631 int xfrm6_output(struct sk_buff *skb)
2633 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dst->dev,
2634 diff -pruN ../orig-linux-2.6.16.29/net/sched/sch_generic.c ./net/sched/sch_generic.c
2635 --- ../orig-linux-2.6.16.29/net/sched/sch_generic.c 2006-09-12 19:02:10.000000000 +0100
2636 +++ ./net/sched/sch_generic.c 2006-09-19 13:59:20.000000000 +0100
2637 @@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device
2638 dev->queue_lock serializes queue accesses for this device
2639 AND dev->qdisc pointer itself.
2641 - dev->xmit_lock serializes accesses to device driver.
2642 + netif_tx_lock serializes accesses to device driver.
2644 - dev->queue_lock and dev->xmit_lock are mutually exclusive,
2645 + dev->queue_lock and netif_tx_lock are mutually exclusive,
2646 if one is grabbed, another must be free.
2647 */
2649 @@ -90,14 +90,17 @@ void qdisc_unlock_tree(struct net_device
2650 NOTE: Called under dev->queue_lock with locally disabled BH.
2651 */
2653 -int qdisc_restart(struct net_device *dev)
2654 +static inline int qdisc_restart(struct net_device *dev)
2656 struct Qdisc *q = dev->qdisc;
2657 struct sk_buff *skb;
2659 /* Dequeue packet */
2660 - if ((skb = q->dequeue(q)) != NULL) {
2661 + if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
2662 unsigned nolock = (dev->features & NETIF_F_LLTX);
2664 + dev->gso_skb = NULL;
2666 /*
2667 * When the driver has LLTX set it does its own locking
2668 * in start_xmit. No need to add additional overhead by
2669 @@ -108,7 +111,7 @@ int qdisc_restart(struct net_device *dev
2670 * will be requeued.
2671 */
2672 if (!nolock) {
2673 - if (!spin_trylock(&dev->xmit_lock)) {
2674 + if (!netif_tx_trylock(dev)) {
2675 collision:
2676 /* So, someone grabbed the driver. */
2678 @@ -126,8 +129,6 @@ int qdisc_restart(struct net_device *dev
2679 __get_cpu_var(netdev_rx_stat).cpu_collision++;
2680 goto requeue;
2682 - /* Remember that the driver is grabbed by us. */
2683 - dev->xmit_lock_owner = smp_processor_id();
2687 @@ -136,14 +137,11 @@ int qdisc_restart(struct net_device *dev
2689 if (!netif_queue_stopped(dev)) {
2690 int ret;
2691 - if (netdev_nit)
2692 - dev_queue_xmit_nit(skb, dev);
2694 - ret = dev->hard_start_xmit(skb, dev);
2695 + ret = dev_hard_start_xmit(skb, dev);
2696 if (ret == NETDEV_TX_OK) {
2697 if (!nolock) {
2698 - dev->xmit_lock_owner = -1;
2699 - spin_unlock(&dev->xmit_lock);
2700 + netif_tx_unlock(dev);
2702 spin_lock(&dev->queue_lock);
2703 return -1;
2704 @@ -157,8 +155,7 @@ int qdisc_restart(struct net_device *dev
2705 /* NETDEV_TX_BUSY - we need to requeue */
2706 /* Release the driver */
2707 if (!nolock) {
2708 - dev->xmit_lock_owner = -1;
2709 - spin_unlock(&dev->xmit_lock);
2710 + netif_tx_unlock(dev);
2712 spin_lock(&dev->queue_lock);
2713 q = dev->qdisc;
2714 @@ -175,7 +172,10 @@ int qdisc_restart(struct net_device *dev
2715 */
2717 requeue:
2718 - q->ops->requeue(skb, q);
2719 + if (skb->next)
2720 + dev->gso_skb = skb;
2721 + else
2722 + q->ops->requeue(skb, q);
2723 netif_schedule(dev);
2724 return 1;
2726 @@ -183,11 +183,23 @@ requeue:
2727 return q->q.qlen;
2730 +void __qdisc_run(struct net_device *dev)
2731 +{
2732 + if (unlikely(dev->qdisc == &noop_qdisc))
2733 + goto out;
2735 + while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
2736 + /* NOTHING */;
2738 +out:
2739 + clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
2740 +}
2742 static void dev_watchdog(unsigned long arg)
2744 struct net_device *dev = (struct net_device *)arg;
2746 - spin_lock(&dev->xmit_lock);
2747 + netif_tx_lock(dev);
2748 if (dev->qdisc != &noop_qdisc) {
2749 if (netif_device_present(dev) &&
2750 netif_running(dev) &&
2751 @@ -201,7 +213,7 @@ static void dev_watchdog(unsigned long a
2752 dev_hold(dev);
2755 - spin_unlock(&dev->xmit_lock);
2756 + netif_tx_unlock(dev);
2758 dev_put(dev);
2760 @@ -225,17 +237,17 @@ void __netdev_watchdog_up(struct net_dev
2762 static void dev_watchdog_up(struct net_device *dev)
2764 - spin_lock_bh(&dev->xmit_lock);
2765 + netif_tx_lock_bh(dev);
2766 __netdev_watchdog_up(dev);
2767 - spin_unlock_bh(&dev->xmit_lock);
2768 + netif_tx_unlock_bh(dev);
2771 static void dev_watchdog_down(struct net_device *dev)
2773 - spin_lock_bh(&dev->xmit_lock);
2774 + netif_tx_lock_bh(dev);
2775 if (del_timer(&dev->watchdog_timer))
2776 __dev_put(dev);
2777 - spin_unlock_bh(&dev->xmit_lock);
2778 + netif_tx_unlock_bh(dev);
2781 void netif_carrier_on(struct net_device *dev)
2782 @@ -577,10 +589,17 @@ void dev_deactivate(struct net_device *d
2784 dev_watchdog_down(dev);
2786 - while (test_bit(__LINK_STATE_SCHED, &dev->state))
2787 + /* Wait for outstanding dev_queue_xmit calls. */
2788 + synchronize_rcu();
2790 + /* Wait for outstanding qdisc_run calls. */
2791 + while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
2792 yield();
2794 - spin_unlock_wait(&dev->xmit_lock);
2795 + if (dev->gso_skb) {
2796 + kfree_skb(dev->gso_skb);
2797 + dev->gso_skb = NULL;
2798 + }
2801 void dev_init_scheduler(struct net_device *dev)
2802 @@ -622,6 +641,5 @@ EXPORT_SYMBOL(qdisc_create_dflt);
2803 EXPORT_SYMBOL(qdisc_alloc);
2804 EXPORT_SYMBOL(qdisc_destroy);
2805 EXPORT_SYMBOL(qdisc_reset);
2806 -EXPORT_SYMBOL(qdisc_restart);
2807 EXPORT_SYMBOL(qdisc_lock_tree);
2808 EXPORT_SYMBOL(qdisc_unlock_tree);
2809 diff -pruN ../orig-linux-2.6.16.29/net/sched/sch_teql.c ./net/sched/sch_teql.c
2810 --- ../orig-linux-2.6.16.29/net/sched/sch_teql.c 2006-09-12 19:02:10.000000000 +0100
2811 +++ ./net/sched/sch_teql.c 2006-09-19 13:59:20.000000000 +0100
2812 @@ -302,20 +302,17 @@ restart:
2814 switch (teql_resolve(skb, skb_res, slave)) {
2815 case 0:
2816 - if (spin_trylock(&slave->xmit_lock)) {
2817 - slave->xmit_lock_owner = smp_processor_id();
2818 + if (netif_tx_trylock(slave)) {
2819 if (!netif_queue_stopped(slave) &&
2820 slave->hard_start_xmit(skb, slave) == 0) {
2821 - slave->xmit_lock_owner = -1;
2822 - spin_unlock(&slave->xmit_lock);
2823 + netif_tx_unlock(slave);
2824 master->slaves = NEXT_SLAVE(q);
2825 netif_wake_queue(dev);
2826 master->stats.tx_packets++;
2827 master->stats.tx_bytes += len;
2828 return 0;
2830 - slave->xmit_lock_owner = -1;
2831 - spin_unlock(&slave->xmit_lock);
2832 + netif_tx_unlock(slave);
2834 if (netif_queue_stopped(dev))
2835 busy = 1;