#endif
-#ifdef CONFIG_XEN
-static int MODPARM_rx_copy = 0;
-module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
-MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
-static int MODPARM_rx_flip = 0;
-module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
-MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
-#else
-static const int MODPARM_rx_copy = 1;
-static const int MODPARM_rx_flip = 0;
-#endif
-
/**
* \brief The maximum allowed data fragments in a single transmit
* request.
static int xennet_get_responses(struct netfront_info *np,
struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
- struct mbuf **list, int *pages_flipped_p);
+ struct mbuf **list);
#define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT)
struct mtx sc_lock;
xen_intr_handle_t xen_intr_handle;
- u_int copying_receiver;
u_int carrier;
u_int maxfrags;
message = "writing event-channel";
goto abort_transaction;
}
- err = xs_printf(xst, node, "request-rx-copy", "%u",
- info->copying_receiver);
+ err = xs_printf(xst, node, "request-rx-copy", "%u", 1);
if (err) {
message = "writing request-rx-copy";
goto abort_transaction;
struct mbuf *m_new;
int i, batch_target, notify;
RING_IDX req_prod;
- struct xen_memory_reservation reservation;
grant_ref_t ref;
- int nr_flips;
netif_rx_request_t *req;
vm_offset_t vaddr;
u_long pfn;
}
refill:
- for (nr_flips = i = 0; ; i++) {
+ for (i = 0; ; i++) {
if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
break;
pfn = vtophys(vaddr) >> PAGE_SHIFT;
req = RING_GET_REQUEST(&sc->rx, req_prod + i);
- if (sc->copying_receiver == 0) {
- gnttab_grant_foreign_transfer_ref(ref,
- otherend_id, pfn);
- sc->rx_pfn_array[nr_flips] = pfn;
- nr_flips++;
- } else {
- gnttab_grant_foreign_access_ref(ref,
- otherend_id,
- pfn, 0);
- }
+ gnttab_grant_foreign_access_ref(ref, otherend_id, pfn, 0);
req->id = id;
req->gref = ref;
* We may have allocated buffers which have entries outstanding
* in the page * update queue -- make sure we flush those first!
*/
- if (nr_flips != 0) {
-#ifdef notyet
- /* Tell the ballon driver what is going on. */
- balloon_update_driver_allowance(i);
-#endif
- set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array);
- reservation.nr_extents = i;
- reservation.extent_order = 0;
- reservation.address_bits = 0;
- reservation.domid = DOMID_SELF;
- } else {
- wmb();
- }
+ wmb();
/* Above is a suitable barrier to ensure backend will see requests. */
sc->rx.req_prod_pvt = req_prod + i;
RING_IDX i, rp;
struct mbuf *m;
struct mbufq rxq, errq;
- int err, pages_flipped = 0, work_to_do;
+ int err, work_to_do;
do {
XN_RX_LOCK_ASSERT(np);
memset(extras, 0, sizeof(rinfo.extras));
m = NULL;
- err = xennet_get_responses(np, &rinfo, rp, &i, &m,
- &pages_flipped);
+ err = xennet_get_responses(np, &rinfo, rp, &i, &m);
if (__predict_false(err)) {
if (m)
np->rx.rsp_cons = i;
}
- if (pages_flipped) {
- /* Some pages are no longer absent... */
-#ifdef notyet
- balloon_update_driver_allowance(-pages_flipped);
-#endif
- }
-
mbufq_drain(&errq);
/*
static int
xennet_get_responses(struct netfront_info *np,
struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
- struct mbuf **list,
- int *pages_flipped_p)
+ struct mbuf **list)
{
- int pages_flipped = *pages_flipped_p;
struct netif_rx_response *rx = &rinfo->rx;
struct netif_extra_info *extras = rinfo->extras;
struct mbuf *m, *m0, *m_prev;
}
for (;;) {
- u_long mfn;
-
#if 0
DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
rx->status, rx->offset, frags);
goto next;
}
- if (!np->copying_receiver) {
- /* Memory pressure, insufficient buffer
- * headroom, ...
- */
- if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
- WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
- rx->id, rx->status);
- xennet_move_rx_slot(np, m, ref);
- err = ENOMEM;
- goto next;
- }
-
- pages_flipped++;
- } else {
- ret = gnttab_end_foreign_access_ref(ref);
- KASSERT(ret, ("ret != 0"));
- }
+ ret = gnttab_end_foreign_access_ref(ref);
+ KASSERT(ret, ("Unable to end access to grant references"));
gnttab_release_grant_reference(&np->gref_rx_head, ref);
}
*list = m0;
*cons += frags;
- *pages_flipped_p = pages_flipped;
return (err);
}
int i, requeue_idx, error;
grant_ref_t ref;
netif_rx_request_t *req;
- u_int feature_rx_copy, feature_rx_flip;
+ u_int feature_rx_copy;
error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
"feature-rx-copy", NULL, "%u", &feature_rx_copy);
if (error)
feature_rx_copy = 0;
- error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
- "feature-rx-flip", NULL, "%u", &feature_rx_flip);
- if (error)
- feature_rx_flip = 1;
- /*
- * Copy packets on receive path if:
- * (a) This was requested by user, and the backend supports it; or
- * (b) Flipping was requested, but this is unsupported by the backend.
- */
- np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
- (MODPARM_rx_flip && !feature_rx_flip));
+ /* We only support rx copy. */
+ if (!feature_rx_copy)
+ return (EPROTONOSUPPORT);
/* Recovery procedure: */
error = talk_to_backend(np->xbdev, np);
req = RING_GET_REQUEST(&np->rx, requeue_idx);
pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
- if (!np->copying_receiver) {
- gnttab_grant_foreign_transfer_ref(ref,
- xenbus_get_otherend_id(np->xbdev),
- pfn);
- } else {
- gnttab_grant_foreign_access_ref(ref,
- xenbus_get_otherend_id(np->xbdev),
- pfn, 0);
- }
+ gnttab_grant_foreign_access_ref(ref,
+ xenbus_get_otherend_id(np->xbdev),
+ pfn, 0);
+
req->gref = ref;
req->id = requeue_idx;