/* Internal feature information. */
u8 can_queue:1; /* can queue packets for receiver? */
u8 copying_receiver:1; /* copy packets to receiver? */
+ unsigned copying_rx_offset;
/* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
RING_IDX rx_req_cons_peek;
}
copy_gop->source.offset = offset;
copy_gop->dest.domid = netif->domid;
- copy_gop->dest.offset = 0;
+ if (i == 0)
+ copy_gop->dest.offset = netif->copying_rx_offset;
+ else
+ copy_gop->dest.offset = 0;
copy_gop->dest.u.ref = req->gref;
+ /* We rely on Xen to enforce that offset + size <=
+ * PAGE_SIZE */
copy_gop->len = size;
} else {
meta->copy = 0;
goto abort_transaction;
}
+ /* We rx-copy at an offset.. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-rx-copy-offset", "%d", 1);
+ if (err) {
+ message = "writing feature-rx-copy-offset";
+ goto abort_transaction;
+ }
+
/*
* We don't support rx-flip path (except old guests who don't
* grok this feature flag).
{
struct xenbus_device *dev = be->dev;
unsigned long tx_ring_ref, rx_ring_ref;
- unsigned int evtchn, rx_copy;
+ unsigned int evtchn, rx_copy, rx_copy_offset;
int err;
int val;
}
be->netif->copying_receiver = !!rx_copy;
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy-offset",
+ "%u", &rx_copy_offset);
+ if (err == -ENOENT) {
+ err = 0;
+ rx_copy_offset = 0;
+ }
+ if (err < 0) {
+ xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy-offset",
+ dev->otherend);
+ return err;
+ }
+ be->netif->copying_rx_offset = rx_copy_offset;
+
if (be->netif->dev->tx_queue_len != 0) {
if (xenbus_scanf(XBT_NIL, dev->otherend,
"feature-rx-notify", "%d", &val) < 0)