#define net_ratelimit() 0
+struct netfront_queue_info;
struct netfront_info;
struct netfront_rx_info;
-static void xn_txeof(struct netfront_info *);
-static void xn_rxeof(struct netfront_info *);
-static void network_alloc_rx_buffers(struct netfront_info *);
+static void xn_txeof(struct netfront_queue_info *);
+static void xn_rxeof(struct netfront_queue_info *);
+static void xn_alloc_rx_buffers(struct netfront_queue_info *);
static void xn_tick_locked(struct netfront_info *);
static void xn_tick(void *);
static void xn_intr(void *);
static inline int xn_count_frags(struct mbuf *m);
-static int xn_assemble_tx_request(struct netfront_info *sc,
+static int xn_assemble_tx_request(struct netfront_queue_info *queue,
struct mbuf *m_head);
-static void xn_start_locked(struct ifnet *);
+static void xn_start_locked(struct ifnet *, struct netfront_queue_info *);
static void xn_start(struct ifnet *);
static int xn_ioctl(struct ifnet *, u_long, caddr_t);
static void xn_ifinit_locked(struct netfront_info *);
static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
/* Xenolinux helper functions */
-int network_connect(struct netfront_info *);
+int xn_connect(struct netfront_info *);
-static void xn_free_rx_ring(struct netfront_info *);
+static void xn_free_rx_ring(struct netfront_queue_info *);
-static void xn_free_tx_ring(struct netfront_info *);
+static void xn_free_tx_ring(struct netfront_queue_info *);
-static int xennet_get_responses(struct netfront_info *np,
+static int xennet_get_responses(struct netfront_queue_info *queue,
struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
struct mbuf **list);
u_long tx_errors; /* packet transmit problems */
};
-struct netfront_info {
- struct ifnet *xn_ifp;
- struct lro_ctrl xn_lro;
+struct netfront_queue_info {
+ struct netfront_info *info;
+ u_int num;
- struct netfront_stats stats;
u_int tx_full;
netif_tx_front_ring_t tx;
struct mtx tx_lock;
struct mtx rx_lock;
+ xen_intr_handle_t xen_intr_handle;
+
+ grant_ref_t gref_tx_head;
+ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
+ grant_ref_t gref_rx_head;
+ grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
+
+ int tx_ring_ref;
+ int rx_ring_ref;
+
+ struct xn_chain_data xn_cdata; /* mbufs */
+ struct mbufq xn_rx_batch; /* batch queue */
+ int rx_target;
+
+ xen_pfn_t rx_pfn_array[NET_RX_RING_SIZE];
+ struct netfront_stats stats;
+
+ struct lro_ctrl lro;
+};
+
+struct netfront_info {
+ struct ifnet *xn_ifp;
+ u_int num_queues;
+ struct netfront_queue_info *queue;
+
struct mtx sc_lock;
- xen_intr_handle_t xen_intr_handle;
u_int carrier;
u_int maxfrags;
#define RX_MAX_TARGET NET_RX_RING_SIZE
int rx_min_target;
int rx_max_target;
- int rx_target;
-
- grant_ref_t gref_tx_head;
- grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
- grant_ref_t gref_rx_head;
- grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
device_t xbdev;
- int tx_ring_ref;
- int rx_ring_ref;
uint8_t mac[ETHER_ADDR_LEN];
- struct xn_chain_data xn_cdata; /* mbufs */
- struct mbufq xn_rx_batch; /* batch queue */
int xn_if_flags;
struct callout xn_stat_ch;
- xen_pfn_t rx_pfn_array[NET_RX_RING_SIZE];
struct ifmedia sc_media;
bool xn_resume;
#define rx_mbufs xn_cdata.xn_rx_chain
#define tx_mbufs xn_cdata.xn_tx_chain
-#define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock)
-#define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock)
+#define XN_RX_LOCK(_q) mtx_lock(&(_q)->rx_lock)
+#define XN_RX_UNLOCK(_q) mtx_unlock(&(_q)->rx_lock)
-#define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock)
-#define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock)
+#define XN_TX_LOCK(_q) mtx_lock(&(_q)->tx_lock)
+#define XN_TX_UNLOCK(_q) mtx_unlock(&(_q)->tx_lock)
#define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
#define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
#define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
-#define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED);
-#define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED);
+#define XN_RX_LOCK_ASSERT(_q) mtx_assert(&(_q)->rx_lock, MA_OWNED);
+#define XN_TX_LOCK_ASSERT(_q) mtx_assert(&(_q)->tx_lock, MA_OWNED);
struct netfront_rx_info {
struct netif_rx_response rx;
}
static inline struct mbuf *
-xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri)
+xennet_get_rx_mbuf(struct netfront_queue_info *queue, RING_IDX ri)
{
int i = xennet_rxidx(ri);
struct mbuf *m;
- m = np->rx_mbufs[i];
- np->rx_mbufs[i] = NULL;
+ m = queue->rx_mbufs[i];
+ queue->rx_mbufs[i] = NULL;
return (m);
}
static inline grant_ref_t
-xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
+xennet_get_rx_ref(struct netfront_queue_info *queue, RING_IDX ri)
{
int i = xennet_rxidx(ri);
- grant_ref_t ref = np->grant_rx_ref[i];
+ grant_ref_t ref = queue->grant_rx_ref[i];
KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
- np->grant_rx_ref[i] = GRANT_REF_INVALID;
- return ref;
+ queue->grant_rx_ref[i] = GRANT_REF_INVALID;
+ return (ref);
}
#define IPRINTK(fmt, args...) \
static int
netfront_suspend(device_t dev)
{
- struct netfront_info *info = device_get_softc(dev);
-
- XN_RX_LOCK(info);
- XN_TX_LOCK(info);
- netfront_carrier_off(info);
- XN_TX_UNLOCK(info);
- XN_RX_UNLOCK(info);
+ struct netfront_info *np = device_get_softc(dev);
+ u_int i;
+ struct netfront_queue_info *queue;
+
+ for (i = 0; i < np->num_queues; i++) {
+ queue = &np->queue[i];
+ XN_RX_LOCK(queue);
+ XN_TX_LOCK(queue);
+ }
+ netfront_carrier_off(np);
+ for (i = 0; i < np->num_queues; i++) {
+ queue = &np->queue[i];
+ XN_RX_UNLOCK(queue);
+ XN_TX_UNLOCK(queue);
+ }
return (0);
}
goto out;
}
+ /* XXX negotiate other features as well. */
+
/* Create shared ring, alloc event channel. */
err = setup_device(dev, info);
if (err)
xenbus_dev_fatal(dev, err, "starting transaction");
goto destroy_ring;
}
+
err = xs_printf(xst, node, "tx-ring-ref","%u",
- info->tx_ring_ref);
+ info->queue[0].tx_ring_ref);
if (err) {
message = "writing tx ring-ref";
goto abort_transaction;
}
err = xs_printf(xst, node, "rx-ring-ref","%u",
- info->rx_ring_ref);
+ info->queue[0].rx_ring_ref);
if (err) {
message = "writing rx ring-ref";
goto abort_transaction;
}
err = xs_printf(xst, node,
"event-channel", "%u",
- xen_intr_port(info->xen_intr_handle));
+ xen_intr_port(info->queue[0].xen_intr_handle));
if (err) {
message = "writing event-channel";
goto abort_transaction;
}
+
err = xs_printf(xst, node, "request-rx-copy", "%u", 1);
if (err) {
message = "writing request-rx-copy";
netif_rx_sring_t *rxs;
int error;
- info->tx_ring_ref = GRANT_REF_INVALID;
- info->rx_ring_ref = GRANT_REF_INVALID;
- info->rx.sring = NULL;
- info->tx.sring = NULL;
+ u_int q = 0; /* XXX temporary arrangement */
+ KASSERT(info->num_queues == 1, ("num_queues != 1"));
+
+ info->queue[q].tx_ring_ref = GRANT_REF_INVALID;
+ info->queue[q].rx_ring_ref = GRANT_REF_INVALID;
+ info->queue[q].rx.sring = NULL;
+ info->queue[q].tx.sring = NULL;
txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
if (!txs) {
goto fail;
}
SHARED_RING_INIT(txs);
- FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
- error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref);
+ FRONT_RING_INIT(&info->queue[q].tx, txs, PAGE_SIZE);
+ error = xenbus_grant_ring(dev, virt_to_mfn(txs),
+ &info->queue[q].tx_ring_ref);
if (error)
goto fail;
goto fail;
}
SHARED_RING_INIT(rxs);
- FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
+ FRONT_RING_INIT(&info->queue[q].rx, rxs, PAGE_SIZE);
- error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref);
+ error = xenbus_grant_ring(dev, virt_to_mfn(rxs),
+ &info->queue[q].rx_ring_ref);
if (error)
goto fail;
error = xen_intr_alloc_and_bind_local_port(dev,
- xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr, info,
- INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &info->xen_intr_handle);
+ xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr,
+ &info->queue[q],
+ INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
+ &info->queue[q].xen_intr_handle);
if (error) {
xenbus_dev_fatal(dev, error,
case XenbusStateInitWait:
if (xenbus_get_state(dev) != XenbusStateInitialising)
break;
- if (network_connect(sc) != 0)
+ if (xn_connect(sc) != 0)
break;
xenbus_set_state(dev, XenbusStateConnected);
break;
}
static void
-xn_free_rx_ring(struct netfront_info *sc)
+xn_free_rx_ring(struct netfront_queue_info *queue)
{
-#if 0
int i;
for (i = 0; i < NET_RX_RING_SIZE; i++) {
- if (sc->xn_cdata.rx_mbufs[i] != NULL) {
- m_freem(sc->rx_mbufs[i]);
- sc->rx_mbufs[i] = NULL;
+ if (queue->rx_mbufs[i] != NULL) {
+ m_freem(queue->rx_mbufs[i]);
+ queue->rx_mbufs[i] = NULL;
}
}
- sc->rx.rsp_cons = 0;
- sc->xn_rx_if->req_prod = 0;
- sc->xn_rx_if->event = sc->rx.rsp_cons ;
-#endif
+ queue->rx.rsp_cons = 0;
+ queue->rx.sring->req_prod = 0;
+ queue->rx.sring->rsp_event = queue->rx.rsp_cons ;
}
static void
-xn_free_tx_ring(struct netfront_info *sc)
+xn_free_tx_ring(struct netfront_queue_info *queue)
{
-#if 0
int i;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
- if (sc->tx_mbufs[i] != NULL) {
- m_freem(sc->tx_mbufs[i]);
- sc->xn_cdata.xn_tx_chain[i] = NULL;
+ if (queue->tx_mbufs[i] != NULL) {
+ m_freem(queue->tx_mbufs[i]);
+ queue->xn_cdata.xn_tx_chain[i] = NULL;
}
}
return;
-#endif
}
/**
* fragment, plus up to 2 entries for "options" (e.g. TSO).
*/
static inline int
-xn_tx_slot_available(struct netfront_info *np)
+xn_tx_slot_available(struct netfront_queue_info *queue)
{
- return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2));
+ return (RING_FREE_REQUESTS(&queue->tx) > (MAX_TX_REQ_FRAGS + 2));
}
static void
-netif_release_tx_bufs(struct netfront_info *np)
+xn_release_tx_bufs(struct netfront_queue_info *queue)
{
int i;
for (i = 1; i <= NET_TX_RING_SIZE; i++) {
struct mbuf *m;
- m = np->tx_mbufs[i];
+ m = queue->tx_mbufs[i];
/*
* We assume that no kernel addresses are
*/
if (((uintptr_t)m) <= NET_TX_RING_SIZE)
continue;
- gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
- gnttab_release_grant_reference(&np->gref_tx_head,
- np->grant_tx_ref[i]);
- np->grant_tx_ref[i] = GRANT_REF_INVALID;
- add_id_to_freelist(np->tx_mbufs, i);
- np->xn_cdata.xn_tx_chain_cnt--;
- if (np->xn_cdata.xn_tx_chain_cnt < 0) {
+ gnttab_end_foreign_access_ref(queue->grant_tx_ref[i]);
+ gnttab_release_grant_reference(&queue->gref_tx_head,
+ queue->grant_tx_ref[i]);
+ queue->grant_tx_ref[i] = GRANT_REF_INVALID;
+ add_id_to_freelist(queue->tx_mbufs, i);
+ queue->xn_cdata.xn_tx_chain_cnt--;
+ if (queue->xn_cdata.xn_tx_chain_cnt < 0) {
panic("%s: tx_chain_cnt must be >= 0", __func__);
}
m_free(m);
}
static void
-network_alloc_rx_buffers(struct netfront_info *sc)
+xn_alloc_rx_buffers(struct netfront_queue_info *queue)
{
- int otherend_id = xenbus_get_otherend_id(sc->xbdev);
+ struct netfront_info *np = queue->info;
+ int otherend_id = xenbus_get_otherend_id(np->xbdev);
unsigned short id;
struct mbuf *m_new;
int i, batch_target, notify;
vm_offset_t vaddr;
u_long pfn;
- req_prod = sc->rx.req_prod_pvt;
+ req_prod = queue->rx.req_prod_pvt;
- if (__predict_false(sc->carrier == 0))
+ if (__predict_false(np->carrier == 0))
return;
/*
* Here we attempt to maintain rx_target buffers in flight, counting
* buffers that we have yet to process in the receive ring.
*/
- batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
- for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
+ batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons);
+ for (i = mbufq_len(&queue->xn_rx_batch); i < batch_target; i++) {
m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
if (m_new == NULL) {
if (i != 0)
m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
/* queue the mbufs allocated */
- (void )mbufq_enqueue(&sc->xn_rx_batch, m_new);
+ (void )mbufq_enqueue(&queue->xn_rx_batch, m_new);
}
/*
* of submission worthwhile. Otherwise wait for more mbufs and
* request entries to become available.
*/
- if (i < (sc->rx_target/2)) {
- if (req_prod >sc->rx.sring->req_prod)
+ if (i < (queue->rx_target/2)) {
+ if (req_prod > queue->rx.sring->req_prod)
goto push;
return;
}
* low" as having less than a fourth of our target buffers free
* at the time we refilled the queue.
*/
- if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) {
- sc->rx_target *= 2;
- if (sc->rx_target > sc->rx_max_target)
- sc->rx_target = sc->rx_max_target;
+ if ((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) {
+ queue->rx_target *= 2;
+ if (queue->rx_target > np->rx_max_target)
+ queue->rx_target = np->rx_max_target;
}
refill:
for (i = 0; ; i++) {
- if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
+ if ((m_new = mbufq_dequeue(&queue->xn_rx_batch)) == NULL)
break;
m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
id = xennet_rxidx(req_prod + i);
- KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain"));
- sc->rx_mbufs[id] = m_new;
+ KASSERT(queue->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain"));
+ queue->rx_mbufs[id] = m_new;
- ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
+ ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
KASSERT(ref != GNTTAB_LIST_END,
("reserved grant references exhuasted"));
- sc->grant_rx_ref[id] = ref;
+ queue->grant_rx_ref[id] = ref;
vaddr = mtod(m_new, vm_offset_t);
pfn = vtophys(vaddr) >> PAGE_SHIFT;
- req = RING_GET_REQUEST(&sc->rx, req_prod + i);
+ req = RING_GET_REQUEST(&queue->rx, req_prod + i);
gnttab_grant_foreign_access_ref(ref, otherend_id, pfn, 0);
req->id = id;
req->gref = ref;
- sc->rx_pfn_array[i] =
+ queue->rx_pfn_array[i] =
vtophys(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
}
KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
- KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
+ KASSERT(mbufq_len(&queue->xn_rx_batch) == 0, ("not all mbufs processed"));
/*
* We may have allocated buffers which have entries outstanding
* in the page * update queue -- make sure we flush those first!
wmb();
/* Above is a suitable barrier to ensure backend will see requests. */
- sc->rx.req_prod_pvt = req_prod + i;
+ queue->rx.req_prod_pvt = req_prod + i;
push:
- RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
- xen_intr_signal(sc->xen_intr_handle);
+ xen_intr_signal(queue->xen_intr_handle);
}
static void
-xn_rxeof(struct netfront_info *np)
+xn_rxeof(struct netfront_queue_info *queue)
{
struct ifnet *ifp;
+ struct netfront_info *np = queue->info;
#if (defined(INET) || defined(INET6))
- struct lro_ctrl *lro = &np->xn_lro;
+ struct lro_ctrl *lro = &queue->lro;
struct lro_entry *queued;
#endif
struct netfront_rx_info rinfo;
int err, work_to_do;
do {
- XN_RX_LOCK_ASSERT(np);
+ XN_RX_LOCK_ASSERT(queue);
if (!netfront_carrier_ok(np))
return;
ifp = np->xn_ifp;
- rp = np->rx.sring->rsp_prod;
+ rp = queue->rx.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
- i = np->rx.rsp_cons;
+ i = queue->rx.rsp_cons;
while ((i != rp)) {
- memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
+ memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
memset(extras, 0, sizeof(rinfo.extras));
m = NULL;
- err = xennet_get_responses(np, &rinfo, rp, &i, &m);
+ err = xennet_get_responses(queue, &rinfo, rp, &i, &m);
if (__predict_false(err)) {
if (m)
(void )mbufq_enqueue(&errq, m);
- np->stats.rx_errors++;
+ queue->stats.rx_errors++;
continue;
}
m->m_pkthdr.csum_data = 0xffff;
}
- np->stats.rx_packets++;
- np->stats.rx_bytes += m->m_pkthdr.len;
+ queue->stats.rx_packets++;
+ queue->stats.rx_bytes += m->m_pkthdr.len;
(void )mbufq_enqueue(&rxq, m);
- np->rx.rsp_cons = i;
+ queue->rx.rsp_cons = i;
}
mbufq_drain(&errq);
/*
* Do we really need to drop the rx lock?
*/
- XN_RX_UNLOCK(np);
+ XN_RX_UNLOCK(queue);
#if (defined(INET) || defined(INET6))
/* Use LRO if possible */
if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
#else
(*ifp->if_input)(ifp, m);
#endif
- XN_RX_LOCK(np);
+ XN_RX_LOCK(queue);
}
- np->rx.rsp_cons = i;
+ queue->rx.rsp_cons = i;
#if (defined(INET) || defined(INET6))
/*
#if 0
/* If we get a callback with very few responses, reduce fill target. */
/* NB. Note exponential increase, linear decrease. */
- if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
- ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
- np->rx_target = np->rx_min_target;
+ if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) >
+ ((3*queue->rx_target) / 4)) && (--queue->rx_target < np->rx_min_target))
+ queue->rx_target = np->rx_min_target;
#endif
- network_alloc_rx_buffers(np);
+ xn_alloc_rx_buffers(queue);
- RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
+ RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, work_to_do);
} while (work_to_do);
}
static void
-xn_txeof(struct netfront_info *np)
+xn_txeof(struct netfront_queue_info *queue)
{
RING_IDX i, prod;
unsigned short id;
struct ifnet *ifp;
netif_tx_response_t *txr;
struct mbuf *m;
+ struct netfront_info *np = queue->info;
- XN_TX_LOCK_ASSERT(np);
+ XN_TX_LOCK_ASSERT(queue);
if (!netfront_carrier_ok(np))
return;
ifp = np->xn_ifp;
do {
- prod = np->tx.sring->rsp_prod;
+ prod = queue->tx.sring->rsp_prod;
rmb(); /* Ensure we see responses up to 'rp'. */
- for (i = np->tx.rsp_cons; i != prod; i++) {
- txr = RING_GET_RESPONSE(&np->tx, i);
+ for (i = queue->tx.rsp_cons; i != prod; i++) {
+ txr = RING_GET_RESPONSE(&queue->tx, i);
if (txr->status == NETIF_RSP_NULL)
continue;
__func__, txr->status);
}
id = txr->id;
- m = np->tx_mbufs[id];
+ m = queue->tx_mbufs[id];
KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
("mbuf already on the free list, but we're "
if (!m->m_next)
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if (__predict_false(gnttab_query_foreign_access(
- np->grant_tx_ref[id]) != 0)) {
+ queue->grant_tx_ref[id]) != 0)) {
panic("%s: grant id %u still in use by the "
"backend", __func__, id);
}
gnttab_end_foreign_access_ref(
- np->grant_tx_ref[id]);
+ queue->grant_tx_ref[id]);
gnttab_release_grant_reference(
- &np->gref_tx_head, np->grant_tx_ref[id]);
- np->grant_tx_ref[id] = GRANT_REF_INVALID;
+ &queue->gref_tx_head, queue->grant_tx_ref[id]);
+ queue->grant_tx_ref[id] = GRANT_REF_INVALID;
- np->tx_mbufs[id] = NULL;
- add_id_to_freelist(np->tx_mbufs, id);
- np->xn_cdata.xn_tx_chain_cnt--;
+ queue->tx_mbufs[id] = NULL;
+ add_id_to_freelist(queue->tx_mbufs, id);
+ queue->xn_cdata.xn_tx_chain_cnt--;
m_free(m);
/* Only mark the queue active if we've freed up at least one slot to try */
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
- np->tx.rsp_cons = prod;
+ queue->tx.rsp_cons = prod;
/*
* Set a new event, then check for race with update of
* cases notification from Xen is likely to be the only kick
* that we'll get.
*/
- np->tx.sring->rsp_event =
- prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
+ queue->tx.sring->rsp_event =
+ prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
mb();
- } while (prod != np->tx.sring->rsp_prod);
+ } while (prod != queue->tx.sring->rsp_prod);
- if (np->tx_full &&
- ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
- np->tx_full = 0;
+ if (queue->tx_full &&
+ ((queue->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
+ queue->tx_full = 0;
#if 0
if (np->user_state == UST_OPEN)
netif_wake_queue(dev);
static void
xn_intr(void *xsc)
{
- struct netfront_info *np = xsc;
+ struct netfront_queue_info *queue = xsc;
+ struct netfront_info *np = queue->info;
struct ifnet *ifp = np->xn_ifp;
#if 0
- if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
+ if (!(queue->rx.rsp_cons != queue->rx.sring->rsp_prod &&
likely(netfront_carrier_ok(np)) &&
ifp->if_drv_flags & IFF_DRV_RUNNING))
return;
#endif
- if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) {
- XN_TX_LOCK(np);
- xn_txeof(np);
- XN_TX_UNLOCK(np);
+ if (RING_HAS_UNCONSUMED_RESPONSES(&queue->tx)) {
+ XN_TX_LOCK(queue);
+ xn_txeof(queue);
+ XN_TX_UNLOCK(queue);
}
- XN_RX_LOCK(np);
- xn_rxeof(np);
- XN_RX_UNLOCK(np);
+ XN_RX_LOCK(queue);
+ xn_rxeof(queue);
+ XN_RX_UNLOCK(queue);
if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
- !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- xn_start(ifp);
+ !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ XN_TX_LOCK(queue);
+ xn_start_locked(ifp, queue);
+ XN_TX_UNLOCK(queue);
+ }
}
static void
-xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
+xennet_move_rx_slot(struct netfront_queue_info *queue, struct mbuf *m,
grant_ref_t ref)
{
- int new = xennet_rxidx(np->rx.req_prod_pvt);
-
- KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
- np->rx_mbufs[new] = m;
- np->grant_rx_ref[new] = ref;
- RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
- RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
- np->rx.req_prod_pvt++;
+ int new = xennet_rxidx(queue->rx.req_prod_pvt);
+
+ KASSERT(queue->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
+ queue->rx_mbufs[new] = m;
+ queue->grant_rx_ref[new] = ref;
+ RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
+ RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
+ queue->rx.req_prod_pvt++;
}
static int
-xennet_get_extras(struct netfront_info *np,
+xennet_get_extras(struct netfront_queue_info *queue,
struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
{
struct netif_extra_info *extra;
}
extra = (struct netif_extra_info *)
- RING_GET_RESPONSE(&np->rx, ++(*cons));
+ RING_GET_RESPONSE(&queue->rx, ++(*cons));
if (__predict_false(!extra->type ||
extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
}
- m = xennet_get_rx_mbuf(np, *cons);
- ref = xennet_get_rx_ref(np, *cons);
- xennet_move_rx_slot(np, m, ref);
+ m = xennet_get_rx_mbuf(queue, *cons);
+ ref = xennet_get_rx_ref(queue, *cons);
+ xennet_move_rx_slot(queue, m, ref);
} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
return err;
}
static int
-xennet_get_responses(struct netfront_info *np,
+xennet_get_responses(struct netfront_queue_info *queue,
struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
struct mbuf **list)
{
struct netif_rx_response *rx = &rinfo->rx;
struct netif_extra_info *extras = rinfo->extras;
struct mbuf *m, *m0, *m_prev;
- grant_ref_t ref = xennet_get_rx_ref(np, *cons);
+ grant_ref_t ref = xennet_get_rx_ref(queue, *cons);
RING_IDX ref_cons = *cons;
int frags = 1;
int err = 0;
u_long ret;
- m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons);
+ m0 = m = m_prev = xennet_get_rx_mbuf(queue, *cons);
if (rx->flags & NETRXF_extra_info) {
- err = xennet_get_extras(np, extras, rp, cons);
+ err = xennet_get_extras(queue, extras, rp, cons);
}
if (m0 != NULL) {
WPRINTK("rx->offset: %x, size: %u\n",
rx->offset, rx->status);
#endif
- xennet_move_rx_slot(np, m, ref);
+ xennet_move_rx_slot(queue, m, ref);
if (m0 == m)
m0 = NULL;
m = NULL;
ret = gnttab_end_foreign_access_ref(ref);
KASSERT(ret, ("Unable to end access to grant references"));
- gnttab_release_grant_reference(&np->gref_rx_head, ref);
+ gnttab_release_grant_reference(&queue->gref_rx_head, ref);
next:
if (m == NULL)
*/
m_prev = m;
- rx = RING_GET_RESPONSE(&np->rx, *cons + frags);
- m = xennet_get_rx_mbuf(np, *cons + frags);
+ rx = RING_GET_RESPONSE(&queue->rx, *cons + frags);
+ m = xennet_get_rx_mbuf(queue, *cons + frags);
/*
* m_prev == NULL can happen if rx->status < 0 or if
if (m0 == NULL)
m0 = m;
m->m_next = NULL;
- ref = xennet_get_rx_ref(np, *cons + frags);
+ ref = xennet_get_rx_ref(queue, *cons + frags);
ref_cons = *cons + frags;
frags++;
}
}
static void
-xn_tick_locked(struct netfront_info *sc)
+xn_tick_locked(struct netfront_info *np)
{
- XN_RX_LOCK_ASSERT(sc);
- callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
+ XN_LOCK_ASSERT(np);
+ callout_reset(&np->xn_stat_ch, hz, xn_tick, np);
/* XXX placeholder for printing debug information */
}
static void
xn_tick(void *xsc)
{
- struct netfront_info *sc;
+ struct netfront_info *np = xsc;
- sc = xsc;
- XN_RX_LOCK(sc);
- xn_tick_locked(sc);
- XN_RX_UNLOCK(sc);
+ XN_LOCK(np);
+ xn_tick_locked(np);
+ XN_UNLOCK(np);
}
/**
* it onto the transmit ring.
*/
static int
-xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head)
+xn_assemble_tx_request(struct netfront_queue_info *queue, struct mbuf *m_head)
{
- struct ifnet *ifp;
struct mbuf *m;
+ struct netfront_info *np = queue->info;
+ struct ifnet *ifp = np->xn_ifp;
u_int nfrags;
int otherend_id;
- ifp = sc->xn_ifp;
-
/**
* Defragment the mbuf if necessary.
*/
* deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
* the Linux network stack.
*/
- if (nfrags > sc->maxfrags) {
+ if (nfrags > np->maxfrags) {
m = m_defrag(m_head, M_NOWAIT);
if (!m) {
/*
* it in here as an assert for now just to make certain that
* xn_tx_chain_cnt is accurate.
*/
- KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE,
+ KASSERT((queue->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE,
("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
- "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt,
+ "(%d)!", __func__, (int) queue->xn_cdata.xn_tx_chain_cnt,
(int) nfrags, (int) NET_TX_RING_SIZE));
/*
* of fragments or hit the end of the mbuf chain.
*/
m = m_head;
- otherend_id = xenbus_get_otherend_id(sc->xbdev);
+ otherend_id = xenbus_get_otherend_id(np->xbdev);
for (m = m_head; m; m = m->m_next) {
netif_tx_request_t *tx;
uintptr_t id;
grant_ref_t ref;
u_long mfn; /* XXX Wrong type? */
- tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt);
- id = get_id_from_freelist(sc->tx_mbufs);
+ tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt);
+ id = get_id_from_freelist(queue->tx_mbufs);
if (id == 0)
panic("%s: was allocated the freelist head!\n",
__func__);
- sc->xn_cdata.xn_tx_chain_cnt++;
- if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE)
+ queue->xn_cdata.xn_tx_chain_cnt++;
+ if (queue->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE)
panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
__func__);
- sc->tx_mbufs[id] = m;
+ queue->tx_mbufs[id] = m;
tx->id = id;
- ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
+ ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
KASSERT((short)ref >= 0, ("Negative ref"));
mfn = virt_to_mfn(mtod(m, vm_offset_t));
gnttab_grant_foreign_access_ref(ref, otherend_id,
mfn, GNTMAP_readonly);
- tx->gref = sc->grant_tx_ref[id] = ref;
+ tx->gref = queue->grant_tx_ref[id] = ref;
tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
tx->flags = 0;
if (m == m_head) {
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
struct netif_extra_info *gso =
(struct netif_extra_info *)
- RING_GET_REQUEST(&sc->tx,
- ++sc->tx.req_prod_pvt);
+ RING_GET_REQUEST(&queue->tx,
+ ++queue->tx.req_prod_pvt);
tx->flags |= NETTXF_extra_info;
if (m->m_next)
tx->flags |= NETTXF_more_data;
- sc->tx.req_prod_pvt++;
+ queue->tx.req_prod_pvt++;
}
BPF_MTAP(ifp, m_head);
- sc->stats.tx_bytes += m_head->m_pkthdr.len;
- sc->stats.tx_packets++;
+ queue->stats.tx_bytes += m_head->m_pkthdr.len;
+ queue->stats.tx_packets++;
return (0);
}
static void
-xn_start_locked(struct ifnet *ifp)
+xn_start_locked(struct ifnet *ifp, struct netfront_queue_info *queue)
{
- struct netfront_info *sc;
+ struct netfront_info *np;
struct mbuf *m_head;
int notify;
- sc = ifp->if_softc;
+ XN_TX_LOCK_ASSERT(queue);
+
+ np = ifp->if_softc;
- if (!netfront_carrier_ok(sc))
+ if (!netfront_carrier_ok(np))
return;
/*
* maximum-sized packet, pull mbufs off the queue and put them on
* the transmit ring.
*/
- while (xn_tx_slot_available(sc)) {
+ while (xn_tx_slot_available(queue)) {
IF_DEQUEUE(&ifp->if_snd, m_head);
if (m_head == NULL)
break;
- if (xn_assemble_tx_request(sc, m_head) != 0)
+ if (xn_assemble_tx_request(queue, m_head) != 0)
break;
}
- RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
- xen_intr_signal(sc->xen_intr_handle);
+ xen_intr_signal(queue->xen_intr_handle);
- if (RING_FULL(&sc->tx)) {
- sc->tx_full = 1;
+ if (RING_FULL(&queue->tx)) {
+ queue->tx_full = 1;
#if 0
netif_stop_queue(dev);
#endif
static void
xn_start(struct ifnet *ifp)
{
- struct netfront_info *sc;
- sc = ifp->if_softc;
- XN_TX_LOCK(sc);
- xn_start_locked(ifp);
- XN_TX_UNLOCK(sc);
+ struct netfront_info *np = ifp->if_softc;
+ struct netfront_queue_info *queue;
+
+ queue = &np->queue[0];
+
+ XN_TX_LOCK(queue);
+ xn_start_locked(ifp, queue);
+ XN_TX_UNLOCK(queue);
}
/* equivalent of network_open() in Linux */
static void
-xn_ifinit_locked(struct netfront_info *sc)
+xn_ifinit_locked(struct netfront_info *np)
{
struct ifnet *ifp;
+ int i;
+ struct netfront_queue_info *queue;
- XN_LOCK_ASSERT(sc);
+ XN_LOCK_ASSERT(np);
- ifp = sc->xn_ifp;
+ ifp = np->xn_ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
- xn_stop(sc);
+ xn_stop(np);
- network_alloc_rx_buffers(sc);
- sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
+ for (i = 0; i < np->num_queues; i++) {
+ queue = &np->queue[i];
+ xn_alloc_rx_buffers(queue);
+ queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
+ }
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if_link_state_change(ifp, LINK_STATE_UP);
- callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
+ callout_reset(&np->xn_stat_ch, hz, xn_tick, np);
}
static void
callout_stop(&sc->xn_stat_ch);
- xn_free_rx_ring(sc);
- xn_free_tx_ring(sc);
-
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
if_link_state_change(ifp, LINK_STATE_DOWN);
}
-/* START of Xenolinux helper functions adapted to FreeBSD */
-int
-network_connect(struct netfront_info *np)
+static void
+xn_rebuild_rx_bufs(struct netfront_queue_info *queue)
{
- int i, requeue_idx, error;
+ int requeue_idx, i;
grant_ref_t ref;
netif_rx_request_t *req;
+
+ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
+ struct mbuf *m;
+ u_long pfn;
+
+ if (queue->rx_mbufs[i] == NULL)
+ continue;
+
+ m = queue->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(queue, i);
+ ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
+
+ req = RING_GET_REQUEST(&queue->rx, requeue_idx);
+ pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
+
+ gnttab_grant_foreign_access_ref(ref,
+ xenbus_get_otherend_id(queue->info->xbdev),
+ pfn, 0);
+
+ req->gref = ref;
+ req->id = requeue_idx;
+
+ requeue_idx++;
+ }
+
+ queue->rx.req_prod_pvt = requeue_idx;
+}
+
+/* START of Xenolinux helper functions adapted to FreeBSD */
+int
+xn_connect(struct netfront_info *np)
+{
+ int i, error;
u_int feature_rx_copy;
+ struct netfront_queue_info *queue;
error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
"feature-rx-copy", NULL, "%u", &feature_rx_copy);
/* Step 1: Reinitialise variables. */
xn_query_features(np);
xn_configure_features(np);
- netif_release_tx_bufs(np);
-
- /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
- for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
- struct mbuf *m;
- u_long pfn;
-
- if (np->rx_mbufs[i] == NULL)
- continue;
-
- m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
- ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
-
- req = RING_GET_REQUEST(&np->rx, requeue_idx);
- pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
-
- gnttab_grant_foreign_access_ref(ref,
- xenbus_get_otherend_id(np->xbdev),
- pfn, 0);
-
- req->gref = ref;
- req->id = requeue_idx;
- requeue_idx++;
+ /* Step 2: Release TX buffer */
+ for (i = 0; i < np->num_queues; i++) {
+ queue = &np->queue[i];
+ xn_release_tx_bufs(queue);
}
- np->rx.req_prod_pvt = requeue_idx;
+ /* Step 3: Rebuild the RX buffer freelist and the RX ring itself. */
+ for (i = 0; i < np->num_queues; i++) {
+ queue = &np->queue[i];
+ xn_rebuild_rx_bufs(queue);
+ }
- /* Step 3: All public and private state should now be sane. Get
+ /* Step 4: All public and private state should now be sane. Get
* ready to start sending and receiving packets and give the driver
* domain a kick because we've probably just requeued some
* packets.
*/
netfront_carrier_on(np);
- xen_intr_signal(np->xen_intr_handle);
- XN_TX_LOCK(np);
- xn_txeof(np);
- XN_TX_UNLOCK(np);
- network_alloc_rx_buffers(np);
+ for (i = 0; i < np->num_queues; i++) {
+ queue = &np->queue[i];
+ xen_intr_signal(queue->xen_intr_handle);
+ XN_TX_LOCK(queue);
+ xn_txeof(queue);
+ XN_TX_UNLOCK(queue);
+ xn_alloc_rx_buffers(queue);
+ }
return (0);
}
static int
xn_configure_features(struct netfront_info *np)
{
- int err, cap_enabled;
+ int err, cap_enabled, i;
err = 0;
cap_enabled = UINT_MAX;
#if (defined(INET) || defined(INET6))
- if ((np->xn_ifp->if_capenable & IFCAP_LRO) == (cap_enabled & IFCAP_LRO))
- tcp_lro_free(&np->xn_lro);
+ for (i = 0; i < np->num_queues; i++)
+ if ((np->xn_ifp->if_capenable & IFCAP_LRO) ==
+ (cap_enabled & IFCAP_LRO))
+ tcp_lro_free(&np->queue[i].lro);
#endif
np->xn_ifp->if_capenable =
np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4) & cap_enabled;
np->xn_ifp->if_hwassist &= ~CSUM_TSO;
#if (defined(INET) || defined(INET6))
- if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) ==
- (cap_enabled & IFCAP_LRO)) {
- err = tcp_lro_init(&np->xn_lro);
- if (err) {
- device_printf(np->xbdev, "LRO initialization failed\n");
- } else {
- np->xn_lro.ifp = np->xn_ifp;
- np->xn_ifp->if_capenable |= IFCAP_LRO;
+ for (i = 0; i < np->num_queues; i++) {
+ if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) ==
+ (cap_enabled & IFCAP_LRO)) {
+ err = tcp_lro_init(&np->queue[i].lro);
+ if (err) {
+ device_printf(np->xbdev, "LRO initialization failed\n");
+ } else {
+ np->queue[i].lro.ifp = np->xn_ifp;
+ np->xn_ifp->if_capenable |= IFCAP_LRO;
+ }
}
}
if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) ==
np->xbdev = dev;
- mtx_init(&np->tx_lock, "xntx", "netfront transmit lock", MTX_DEF);
- mtx_init(&np->rx_lock, "xnrx", "netfront receive lock", MTX_DEF);
mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF);
ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
- np->rx_target = RX_MIN_TARGET;
np->rx_min_target = RX_MIN_TARGET;
np->rx_max_target = RX_MAX_TARGET;
+ np->num_queues = 1;
+ np->queue = malloc(sizeof(struct netfront_queue_info), M_DEVBUF,
+ M_WAITOK|M_ZERO);
+
+ np->queue[0].num = 0;
+ np->queue[0].info = np;
+ np->queue[0].rx_target = RX_MIN_TARGET;
+
+ mtx_init(&np->queue[0].tx_lock, "xntx", "netfront transmit lock",
+ MTX_DEF);
+ mtx_init(&np->queue[0].rx_lock, "xnrx", "netfront receive lock",
+ MTX_DEF);
+
/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
for (i = 0; i <= NET_TX_RING_SIZE; i++) {
- np->tx_mbufs[i] = (void *) ((u_long) i+1);
- np->grant_tx_ref[i] = GRANT_REF_INVALID;
+ np->queue[0].tx_mbufs[i] = (void *) ((u_long) i+1);
+ np->queue[0].grant_tx_ref[i] = GRANT_REF_INVALID;
}
- np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
+ np->queue[0].tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
for (i = 0; i <= NET_RX_RING_SIZE; i++) {
-
- np->rx_mbufs[i] = NULL;
- np->grant_rx_ref[i] = GRANT_REF_INVALID;
+ np->queue[0].rx_mbufs[i] = NULL;
+ np->queue[0].grant_rx_ref[i] = GRANT_REF_INVALID;
}
- mbufq_init(&np->xn_rx_batch, INT_MAX);
+ mbufq_init(&np->queue[0].xn_rx_batch, INT_MAX);
/* A grant for every tx ring slot */
if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
- &np->gref_tx_head) != 0) {
+ &np->queue[0].gref_tx_head) != 0) {
IPRINTK("#### netfront can't alloc tx grant refs\n");
err = ENOMEM;
goto error;
}
/* A grant for every rx ring slot */
if (gnttab_alloc_grant_references(RX_MAX_TARGET,
- &np->gref_rx_head) != 0) {
+ &np->queue[0].gref_rx_head) != 0) {
WPRINTK("#### netfront can't alloc rx grant refs\n");
- gnttab_free_grant_references(np->gref_tx_head);
+ gnttab_free_grant_references(np->queue[0].gref_tx_head);
err = ENOMEM;
goto error;
}
err = xen_net_read_mac(dev, np->mac);
- if (err) {
- gnttab_free_grant_references(np->gref_rx_head);
- gnttab_free_grant_references(np->gref_tx_head);
+ if (err)
goto error;
- }
/* Set up ifnet structure */
ifp = np->xn_ifp = if_alloc(IFT_ETHER);
}
static void
-netif_free(struct netfront_info *info)
+netif_free(struct netfront_info *np)
{
- XN_LOCK(info);
- xn_stop(info);
- XN_UNLOCK(info);
- callout_drain(&info->xn_stat_ch);
- netif_disconnect_backend(info);
- if (info->xn_ifp != NULL) {
- ether_ifdetach(info->xn_ifp);
- if_free(info->xn_ifp);
- info->xn_ifp = NULL;
+ XN_LOCK(np);
+ xn_stop(np);
+ XN_UNLOCK(np);
+ callout_drain(&np->xn_stat_ch);
+ netif_disconnect_backend(np);
+ free(np->queue, M_DEVBUF);
+ if (np->xn_ifp != NULL) {
+ ether_ifdetach(np->xn_ifp);
+ if_free(np->xn_ifp);
+ np->xn_ifp = NULL;
}
- ifmedia_removeall(&info->sc_media);
+ ifmedia_removeall(&np->sc_media);
}
static void
-netif_disconnect_backend(struct netfront_info *info)
+netif_disconnect_backend(struct netfront_info *np)
{
- XN_RX_LOCK(info);
- XN_TX_LOCK(info);
- netfront_carrier_off(info);
- XN_TX_UNLOCK(info);
- XN_RX_UNLOCK(info);
+ u_int i;
+ struct netfront_queue_info *queue;
- free_ring(&info->tx_ring_ref, &info->tx.sring);
- free_ring(&info->rx_ring_ref, &info->rx.sring);
+ for (i = 0; i < np->num_queues; i++) {
+ queue = &np->queue[i];
+ XN_RX_LOCK(queue);
+ XN_TX_LOCK(queue);
+ }
+ netfront_carrier_off(np);
+ for (i = 0; i < np->num_queues; i++) {
+ queue = &np->queue[i];
+ XN_RX_UNLOCK(queue);
+ XN_TX_UNLOCK(queue);
+ }
- xen_intr_unbind(&info->xen_intr_handle);
+ for (i = 0; i < np->num_queues; i++) {
+ queue = &np->queue[i];
+ xn_free_rx_ring(queue);
+ xn_free_tx_ring(queue);
+ free_ring(&queue->tx_ring_ref, &queue->tx.sring);
+ free_ring(&queue->rx_ring_ref, &queue->rx.sring);
+ xen_intr_unbind(&queue->xen_intr_handle);
+ }
}
static void