ia64/xen-unstable

diff linux-2.6-xen-sparse/net/core/dev.c @ 10714:a4041ac6f152

[NET] net-gso.patch: Fix up GSO packets with broken checksums

Here is the original changelog:

[NET] gso: Fix up GSO packets with broken checksums

Certain subsystems in the stack (e.g., netfilter) can break the
partial
checksum on GSO packets. Until they're fixed, this patch allows
this to
work by recomputing the partial checksums through the GSO
mechanism.

Once they've all been converted to update the partial checksum
instead of
clearing it, this workaround can be removed.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
author kfraser@localhost.localdomain
date Mon Jul 10 15:36:04 2006 +0100 (2006-07-10)
parents 6e7027a2abca
children 9519445d9e9d
line diff
     1.1 --- a/linux-2.6-xen-sparse/net/core/dev.c	Mon Jul 10 15:23:15 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/net/core/dev.c	Mon Jul 10 15:36:04 2006 +0100
     1.3 @@ -1089,9 +1089,17 @@ int skb_checksum_help(struct sk_buff *sk
     1.4  	unsigned int csum;
     1.5  	int ret = 0, offset = skb->h.raw - skb->data;
     1.6  
     1.7 -	if (inward) {
     1.8 -		skb->ip_summed = CHECKSUM_NONE;
     1.9 -		goto out;
    1.10 +	if (inward)
    1.11 +		goto out_set_summed;
    1.12 +
    1.13 +	if (unlikely(skb_shinfo(skb)->gso_size)) {
    1.14 +		static int warned;
    1.15 +
    1.16 +		WARN_ON(!warned);
    1.17 +		warned = 1;
    1.18 +
    1.19 +		/* Let GSO fix up the checksum. */
    1.20 +		goto out_set_summed;
    1.21  	}
    1.22  
    1.23  	if (skb_cloned(skb)) {
    1.24 @@ -1108,6 +1116,8 @@ int skb_checksum_help(struct sk_buff *sk
    1.25  	BUG_ON(skb->csum + 2 > offset);
    1.26  
    1.27  	*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
    1.28 +
    1.29 +out_set_summed:
    1.30  	skb->ip_summed = CHECKSUM_NONE;
    1.31  out:	
    1.32  	return ret;
    1.33 @@ -1128,17 +1138,35 @@ struct sk_buff *skb_gso_segment(struct s
    1.34  	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
    1.35  	struct packet_type *ptype;
    1.36  	int type = skb->protocol;
    1.37 +	int err;
    1.38  
    1.39  	BUG_ON(skb_shinfo(skb)->frag_list);
    1.40 -	BUG_ON(skb->ip_summed != CHECKSUM_HW);
    1.41  
    1.42  	skb->mac.raw = skb->data;
    1.43  	skb->mac_len = skb->nh.raw - skb->data;
    1.44  	__skb_pull(skb, skb->mac_len);
    1.45  
    1.46 +	if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
    1.47 +		static int warned;
    1.48 +
    1.49 +		WARN_ON(!warned);
    1.50 +		warned = 1;
    1.51 +
    1.52 +		if (skb_header_cloned(skb) &&
    1.53 +		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
    1.54 +			return ERR_PTR(err);
    1.55 +	}
    1.56 +
    1.57  	rcu_read_lock();
    1.58  	list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
    1.59  		if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
    1.60 +			if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
    1.61 +				err = ptype->gso_send_check(skb);
    1.62 +				segs = ERR_PTR(err);
    1.63 +				if (err || skb_gso_ok(skb, features))
    1.64 +					break;
    1.65 +				__skb_push(skb, skb->data - skb->nh.raw);
    1.66 +			}
    1.67  			segs = ptype->gso_segment(skb, features);
    1.68  			break;
    1.69  		}