ia64/xen-unstable
changeset 1026:50fc09c0f9d7
bitkeeper revision 1.666 (3ff980d7gcf3e_3nqJ9SkvI1vvfOIw)
dev.c, skbuff.h, netdevice.h:
Fix network packet receive ordering.
dev.c, skbuff.h, netdevice.h:
Fix network packet receive ordering.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Mon Jan 05 15:20:55 2004 +0000 (2004-01-05) |
parents | 7e0ecccb1426 |
children | 175dadca8022 |
files | xen/include/xeno/netdevice.h xen/include/xeno/skbuff.h xen/net/dev.c |
line diff
1.1 --- a/xen/include/xeno/netdevice.h Fri Jan 02 22:41:26 2004 +0000 1.2 +++ b/xen/include/xeno/netdevice.h Mon Jan 05 15:20:55 2004 +0000 1.3 @@ -30,7 +30,7 @@ 1.4 #include <xeno/if_packet.h> 1.5 #include <xeno/sched.h> 1.6 #include <xeno/interrupt.h> 1.7 - 1.8 +#include <xeno/skbuff.h> 1.9 #include <asm/atomic.h> 1.10 #include <asm/cache.h> 1.11 #include <asm/byteorder.h> 1.12 @@ -41,9 +41,8 @@ 1.13 struct vlan_group; 1.14 1.15 extern struct skb_completion_queues { 1.16 - struct sk_buff *rx; /* Packets received in interrupt context. */ 1.17 - unsigned int rx_qlen; 1.18 - struct sk_buff *tx; /* Tx buffers defunct in interrupt context. */ 1.19 + struct sk_buff_head rx; /* Packets received in interrupt context. */ 1.20 + struct sk_buff *tx; /* Tx buffers defunct in interrupt context. */ 1.21 } skb_queue[NR_CPUS] __cacheline_aligned; 1.22 1.23 /* Backlog congestion levels */
2.1 --- a/xen/include/xeno/skbuff.h Fri Jan 02 22:41:26 2004 +0000 2.2 +++ b/xen/include/xeno/skbuff.h Mon Jan 05 15:20:55 2004 +0000 2.3 @@ -23,7 +23,6 @@ 2.4 #include <asm/system.h> 2.5 #include <asm/atomic.h> 2.6 #include <asm/types.h> 2.7 -#include <linux/spinlock.h> 2.8 #include <linux/mm.h> 2.9 #include <xeno/vif.h> 2.10 2.11 @@ -88,9 +87,7 @@ struct sk_buff_head { 2.12 /* These two members must be first. */ 2.13 struct sk_buff * next; 2.14 struct sk_buff * prev; 2.15 - 2.16 __u32 qlen; 2.17 - spinlock_t lock; 2.18 }; 2.19 2.20 #define MAX_SKB_FRAGS 1 /* KAF: was 6 */ 2.21 @@ -204,7 +201,6 @@ static inline __u32 skb_queue_len(struct 2.22 2.23 static inline void skb_queue_head_init(struct sk_buff_head *list) 2.24 { 2.25 - spin_lock_init(&list->lock); 2.26 list->prev = (struct sk_buff *)list; 2.27 list->next = (struct sk_buff *)list; 2.28 list->qlen = 0; 2.29 @@ -215,9 +211,6 @@ static inline void skb_queue_head_init(s 2.30 * @list: list to use 2.31 * @newsk: buffer to queue 2.32 * 2.33 - * Queue a buffer at the start of a list. This function takes no locks 2.34 - * and you must therefore hold required locks before calling it. 2.35 - * 2.36 * A buffer cannot be placed on two lists at the same time. 2.37 */ 2.38 2.39 @@ -240,9 +233,6 @@ static inline void __skb_queue_head(stru 2.40 * @list: list to use 2.41 * @newsk: buffer to queue 2.42 * 2.43 - * Queue a buffer at the end of a list. This function takes no locks 2.44 - * and you must therefore hold required locks before calling it. 2.45 - * 2.46 * A buffer cannot be placed on two lists at the same time. 2.47 */ 2.48
3.1 --- a/xen/net/dev.c Fri Jan 02 22:41:26 2004 +0000 3.2 +++ b/xen/net/dev.c Mon Jan 05 15:20:55 2004 +0000 3.3 @@ -582,18 +582,13 @@ int netif_rx(struct sk_buff *skb) 3.4 unsigned long flags; 3.5 3.6 local_irq_save(flags); 3.7 - 3.8 - if ( unlikely(skb_queue[cpu].rx_qlen > 100) ) 3.9 + if ( unlikely(skb_queue_len(&skb_queue[cpu].rx) > 100) ) 3.10 { 3.11 local_irq_restore(flags); 3.12 perfc_incr(net_rx_congestion_drop); 3.13 return NET_RX_DROP; 3.14 } 3.15 - 3.16 - skb->next = skb_queue[cpu].rx; 3.17 - skb_queue[cpu].rx = skb; 3.18 - skb_queue[cpu].rx_qlen++; 3.19 - 3.20 + __skb_queue_tail(&skb_queue[cpu].rx, skb); 3.21 local_irq_restore(flags); 3.22 3.23 __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ); 3.24 @@ -604,15 +599,25 @@ int netif_rx(struct sk_buff *skb) 3.25 static void net_rx_action(struct softirq_action *h) 3.26 { 3.27 int offset, cpu = smp_processor_id(); 3.28 - struct sk_buff *skb, *nskb; 3.29 + struct sk_buff_head list, *q = &skb_queue[cpu].rx; 3.30 + struct sk_buff *skb; 3.31 3.32 local_irq_disable(); 3.33 - skb = skb_queue[cpu].rx; 3.34 - skb_queue[cpu].rx = NULL; 3.35 - skb_queue[cpu].rx_qlen = 0; 3.36 + /* Code to patch to the private list header is invalid if list is empty! */ 3.37 + if ( unlikely(skb_queue_len(q) == 0) ) 3.38 + { 3.39 + local_irq_enable(); 3.40 + return; 3.41 + } 3.42 + /* Patch the head and tail skbuffs to point at the private list header. */ 3.43 + q->next->prev = (struct sk_buff *)&list; 3.44 + q->prev->next = (struct sk_buff *)&list; 3.45 + /* Move the list to our private header. The public header is reinit'ed. */ 3.46 + list = *q; 3.47 + skb_queue_head_init(q); 3.48 local_irq_enable(); 3.49 3.50 - while ( skb != NULL ) 3.51 + while ( (skb = __skb_dequeue(&list)) != NULL ) 3.52 { 3.53 ASSERT(skb->skb_type == SKB_ZERO_COPY); 3.54 3.55 @@ -646,9 +651,7 @@ static void net_rx_action(struct softirq 3.56 3.57 unmap_domain_mem(skb->head); 3.58 3.59 - nskb = skb->next; 3.60 kfree_skb(skb); 3.61 - skb = nskb; 3.62 } 3.63 } 3.64 3.65 @@ -2336,10 +2339,12 @@ static void make_rx_response(net_vif_t 3.66 3.67 int setup_network_devices(void) 3.68 { 3.69 - int ret; 3.70 + int i, ret; 3.71 extern char opt_ifname[]; 3.72 3.73 memset(skb_queue, 0, sizeof(skb_queue)); 3.74 + for ( i = 0; i < smp_num_cpus; i++ ) 3.75 + skb_queue_head_init(&skb_queue[i].rx); 3.76 3.77 /* Actual receive processing happens in softirq context. */ 3.78 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);