}
spin_unlock_irqrestore(&pending_interfaces_lock,
flags);
- receive_pending_skbs();
+ if(!skb_queue_empty(&pending_rx_queue))
+ receive_pending_skbs();
EXIT();
}
extern struct hypercall_batcher pending_rx_hypercalls;
extern struct ethtool_ops nc2_ethtool_ops;
+extern struct sk_buff_head pending_rx_queue;
+
void nc2_init_poller(struct netchannel2_ring_pair *ncrp);
void nc2_start_polling(struct netchannel2_ring_pair *ncrp);
void nc2_stop_polling(struct netchannel2_ring_pair *ncrp);
#include "netchannel2_core.h"
/* Only accessed from the tasklet, so no synchronisation needed. */
-static struct sk_buff_head pending_rx_queue;
+struct sk_buff_head pending_rx_queue;
/* Send as many finish packet messages as will fit on the ring. */
void send_finish_packet_messages(struct netchannel2_ring_pair *ncrp)
}
__skb_queue_tail(&pending_rx_queue, skb);
+
+ if (pending_rx_hypercalls.nr_pending_gops >= RX_GRANT_COPY_BATCH) {
+ flush_prepared_grant_copies(&pending_rx_hypercalls,
+ nc2_rscb_on_gntcopy_fail);
+ /* since receive could generate ACKs to the start_xmit()
+ function we need to release the ring lock */
+ spin_unlock(&ncrp->lock);
+ /* we should receive the packet as soon as the copy is
+ complete to benefit from cache locality */
+ receive_pending_skbs();
+ spin_lock(&ncrp->lock);
+
+ }
+
}
return;