From: Keir Fraser Date: Wed, 3 Nov 2010 08:20:42 +0000 (+0000) Subject: netback: take net_schedule_list_lock when removing entry from net_schedule_list X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=12a5cbb6a5fa1dc2172809a7a8204cc5f27b847e;p=legacy%2Flinux-2.6.18-xen.git netback: take net_schedule_list_lock when removing entry from net_schedule_list There is a race in net_tx_build_mops between checking if net_schedule_list is empty and actually dequeuing the first entry on the list. If another thread dequeues the only entry on the list during this window we crash because list_first_entry expects a non-empty list. Therefore after the initial lock free check for an empty list check again with the lock held before dequeueing the entry. Based on a patch by Tomasz Wroblewski. Signed-off-by: Ian Campbell Signed-off-by: Jan Beulich --- diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c index 1c960aa0..34d8d1df 100644 --- a/drivers/xen/netback/netback.c +++ b/drivers/xen/netback/netback.c @@ -784,15 +784,28 @@ static int __on_net_schedule_list(netif_t *netif) return netif->list.next != NULL; } +/* Must be called with net_schedule_list_lock held. */ static void remove_from_net_schedule_list(netif_t *netif) { - spin_lock_irq(&net_schedule_list_lock); if (likely(__on_net_schedule_list(netif))) { list_del(&netif->list); netif->list.next = NULL; netif_put(netif); } +} + +static netif_t *poll_net_schedule_list(void) +{ + netif_t *netif = NULL; + + spin_lock_irq(&net_schedule_list_lock); + if (!list_empty(&net_schedule_list)) { + netif = list_first_entry(&net_schedule_list, netif_t, list); + netif_get(netif); + remove_from_net_schedule_list(netif); + } spin_unlock_irq(&net_schedule_list_lock); + return netif; } static void add_to_net_schedule_list_tail(netif_t *netif) @@ -837,7 +850,9 @@ void netif_schedule_work(netif_t *netif) void netif_deschedule_work(netif_t *netif) { + spin_lock_irq(&net_schedule_list_lock); remove_from_net_schedule_list(netif); + spin_unlock_irq(&net_schedule_list_lock); } @@ -1224,7 +1239,6 @@ static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) /* Called after netfront has transmitted */ static void net_tx_action(unsigned long unused) { - struct list_head *ent; struct sk_buff *skb; netif_t *netif; netif_tx_request_t txreq; @@ -1242,10 +1256,9 @@ static void net_tx_action(unsigned long unused) while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && !list_empty(&net_schedule_list)) { /* Get a netif from the list with work to do. */ - ent = net_schedule_list.next; - netif = list_entry(ent, netif_t, list); - netif_get(netif); - remove_from_net_schedule_list(netif); + netif = poll_net_schedule_list(); + if (!netif) + continue; RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do); if (!work_to_do) {