#include "common.h"
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
+#include <linux/delay.h>
/*
* Module parameter 'queue_length':
gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
GNTMAP_host_map, tx_ring_ref, netif->domid);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
- BUG();
+ do {
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+ BUG();
+ msleep(10);
+ } while(op.status == GNTST_eagain);
if (op.status) {
DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
GNTMAP_host_map, rx_ring_ref, netif->domid);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
- BUG();
+ do {
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+ BUG();
+ msleep(10);
+ } while(op.status == GNTST_eagain);
if (op.status) {
struct gnttab_unmap_grant_ref unop;
netrx_pending_operations, which have since been done. Check that
they didn't give any errors and advance over them. */
static int netbk_check_gop(int nr_frags, domid_t domid,
- struct netrx_pending_operations *npo)
+ struct netrx_pending_operations *npo, int *eagain)
{
multicall_entry_t *mcl;
gnttab_transfer_t *gop;
int status = NETIF_RSP_OKAY;
int i;
+ *eagain = 0;
+
for (i = 0; i <= nr_frags; i++) {
if (npo->meta[npo->meta_cons + i].copy) {
copy_op = npo->copy + npo->copy_cons++;
DPRINTK("Bad status %d from copy to DOM%d.\n",
copy_op->status, domid);
status = NETIF_RSP_ERROR;
+ if(copy_op->status == GNTST_eagain)
+ *eagain = 1;
}
} else {
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
* a fatal error anyway.
*/
BUG_ON(gop->status == GNTST_bad_page);
+ if(gop->status == GNTST_eagain)
+ *eagain = 1;
status = NETIF_RSP_ERROR;
}
}
int nr_frags;
int count;
unsigned long offset;
+ int eagain;
/*
* Putting hundreds of bytes on the stack is considered rude.
nr_frags = *(int *)skb->cb;
netif = netdev_priv(skb->dev);
+
+ status = netbk_check_gop(nr_frags, netif->domid, &npo, &eagain);
+
/* We can't rely on skb_release_data to release the
pages used by fragments for us, since it tries to
touch the pages in the fraglist. If we're in
/* (Freeing the fragments is safe since we copy
non-linear skbs destined for flipping interfaces) */
if (!netif->copying_receiver) {
+ /*
+ * Cannot handle failed grant transfers at the moment (because
+ * mmu_updates likely completed)
+ */
+ BUG_ON(eagain);
atomic_set(&(skb_shinfo(skb)->dataref), 1);
skb_shinfo(skb)->frag_list = NULL;
skb_shinfo(skb)->nr_frags = 0;
netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
}
- netif->stats.tx_bytes += skb->len;
- netif->stats.tx_packets++;
-
- status = netbk_check_gop(nr_frags, netif->domid, &npo);
+ if(!eagain)
+ {
+ netif->stats.tx_bytes += skb->len;
+ netif->stats.tx_packets++;
+ }
id = meta[npo.meta_cons].id;
flags = nr_frags ? NETRXF_more_data : 0;
!netbk_queue_full(netif))
netif_wake_queue(netif->dev);
- netif_put(netif);
- dev_kfree_skb(skb);
+ if(!eagain || netbk_queue_full(netif))
+ {
+ netif_put(netif);
+ dev_kfree_skb(skb);
+ netif->stats.tx_dropped += !!eagain;
+ }
+ else
+ {
+ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
+ !!skb_shinfo(skb)->gso_size;
+ skb_queue_head(&rx_queue, skb);
+ }
+
npo.meta_cons += nr_frags + 1;
}
if (mop == tx_map_ops)
goto out;
+ /* NOTE: some maps may fail with GNTST_eagain, which could be successfully
+ * retried in the backend after a delay. However, we can also fail the tx
+ * req and let the frontend resend the relevant packet again. This is fine
+ * because it is unlikely that a network buffer will be paged out or shared,
+ * and therefore it is unlikely to fail with GNTST_eagain. */
ret = HYPERVISOR_grant_table_op(
GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
BUG_ON(ret);