static inline int tx_work_todo(struct xenvif_queue *queue);
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
- u16 id,
s8 st,
u16 offset,
u16 size,
req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
- meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
- meta->gso_size = 0;
meta->size = 0;
- meta->id = req->id;
npo->copy_off = 0;
npo->copy_gref = req->gref;
static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
- unsigned long offset, int *head)
+ unsigned long offset)
{
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
unsigned long bytes;
- int gso_type = XEN_NETIF_GSO_TYPE_NONE;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
page++;
offset = 0;
}
-
- /* Leave a gap for the GSO descriptor. */
- if (skb_is_gso(skb)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- }
-
- if (*head && ((1 << gso_type) & queue->vif->gso_mask))
- queue->rx.req_cons++;
-
- *head = 0; /* There must be something in this buffer now. */
-
}
}
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
- struct xen_netif_rx_request *req;
- struct xenvif_rx_meta *meta;
unsigned char *data;
- int head = 1;
int old_meta_prod;
- int gso_type;
+ int gso_type = XEN_NETIF_GSO_TYPE_NONE;
old_meta_prod = npo->meta_prod;
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
if (skb_is_gso(skb)) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
- /* Set up a GSO prefix descriptor, if necessary */
- if ((1 << gso_type) & vif->gso_prefix_mask) {
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
- meta = npo->meta + npo->meta_prod++;
- meta->gso_type = gso_type;
- meta->gso_size = skb_shinfo(skb)->gso_size;
- meta->size = 0;
- meta->id = req->id;
- }
+ /* Skip a GSO prefix descriptor, if necessary */
+ if ((1 << gso_type) & vif->gso_prefix_mask)
+ queue->rx.req_cons++;
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
- meta = npo->meta + npo->meta_prod++;
+ get_next_rx_buffer(queue, npo);
- if ((1 << gso_type) & vif->gso_mask) {
- meta->gso_type = gso_type;
- meta->gso_size = skb_shinfo(skb)->gso_size;
- } else {
- meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
- meta->gso_size = 0;
- }
-
- meta->size = 0;
- meta->id = req->id;
- npo->copy_off = 0;
- npo->copy_gref = req->gref;
+ /* Skip a GSO extra segment, if necessary */
+ if ((1 << gso_type) & vif->gso_mask)
+ queue->rx.req_cons++;
data = skb->data;
while (data < skb_tail_pointer(skb)) {
len = skb_tail_pointer(skb) - data;
xenvif_gop_frag_copy(queue, skb, npo,
- virt_to_page(data), len, offset, &head);
+ virt_to_page(data), len, offset);
data += len;
}
xenvif_gop_frag_copy(queue, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].page_offset,
- &head);
+ skb_shinfo(skb)->frags[i].page_offset);
}
return npo->meta_prod - old_meta_prod;
flags = XEN_NETRXF_more_data;
offset = 0;
- make_rx_response(queue, meta[i].id, status, offset,
+ make_rx_response(queue, status, offset,
meta[i].size, flags);
}
}
gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
+ struct xenvif *vif = queue->vif;
+ int gso_type = XEN_NETIF_GSO_TYPE_NONE;
+
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ }
- if ((1 << queue->meta[npo.meta_cons].gso_type) &
- queue->vif->gso_prefix_mask) {
- resp = RING_GET_RESPONSE(&queue->rx,
- queue->rx.rsp_prod_pvt++);
+ /* Set up a GSO prefix descriptor, if necessary */
+ if ((1 << gso_type) & vif->gso_prefix_mask) {
+ RING_IDX i = queue->rx.rsp_prod_pvt++;
- resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
+ resp = RING_GET_RESPONSE(&queue->rx, i);
- resp->offset = queue->meta[npo.meta_cons].gso_size;
- resp->id = queue->meta[npo.meta_cons].id;
- resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
+ resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
- npo.meta_cons++;
- XENVIF_RX_CB(skb)->meta_slots_used--;
+ resp->offset = skb_shinfo(skb)->gso_size;
+ resp->id = i & (RING_SIZE(&queue->rx) - 1);
+ resp->status = XENVIF_RX_CB(skb)->meta_slots_used + 1;
}
-
queue->stats.tx_bytes += skb->len;
queue->stats.tx_packets++;
- status = xenvif_check_gop(queue->vif,
+ status = xenvif_check_gop(vif,
XENVIF_RX_CB(skb)->meta_slots_used,
&npo);
flags |= XEN_NETRXF_data_validated;
offset = 0;
- resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
+ resp = make_rx_response(queue,
status, offset,
queue->meta[npo.meta_cons].size,
flags);
- if ((1 << queue->meta[npo.meta_cons].gso_type) &
- queue->vif->gso_mask) {
+ if ((1 << gso_type) & vif->gso_mask) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
RING_GET_RESPONSE(&queue->rx,
resp->flags |= XEN_NETRXF_extra_info;
- gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
- gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+ gso->u.gso.type = gso_type;
+ gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
}
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
- u16 id,
s8 st,
u16 offset,
u16 size,
u16 flags)
{
- RING_IDX i = queue->rx.rsp_prod_pvt;
+ RING_IDX i = queue->rx.rsp_prod_pvt++;
struct xen_netif_rx_response *resp;
resp = RING_GET_RESPONSE(&queue->rx, i);
resp->offset = offset;
resp->flags = flags;
- resp->id = id;
+ resp->id = i & (RING_SIZE(&queue->rx) - 1);
resp->status = (s16)size;
if (st < 0)
resp->status = (s16)st;
- queue->rx.rsp_prod_pvt = ++i;
-
return resp;
}