]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
xen-netback: simplify metadata
authorPaul Durrant <paul.durrant@citrix.com>
Fri, 9 Oct 2015 12:39:53 +0000 (13:39 +0100)
committerPaul Durrant <paul.durrant@citrix.com>
Fri, 9 Oct 2015 14:32:32 +0000 (15:32 +0100)
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
drivers/net/xen-netback/common.h
drivers/net/xen-netback/netback.c

index e6d03e2d93379854c751dc0826b2f86f9e29bc6f..b4dcf64a42d629af0e283257ddcccefa2f06ce1a 100644 (file)
@@ -68,10 +68,7 @@ struct pending_tx_info {
 #define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
 
 struct xenvif_rx_meta {
-       int id;
        int size;
-       int gso_type;
-       int gso_size;
 };
 
 #define GSO_BIT(type) \
index ec98d43916a818152263c8128b4854e9d340c3d2..8a6ea3546adf9698f9200a2f78293d46efc4f1cb 100644 (file)
@@ -101,7 +101,6 @@ static void push_tx_responses(struct xenvif_queue *queue);
 static inline int tx_work_todo(struct xenvif_queue *queue);
 
 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
-                                            u16      id,
                                             s8       st,
                                             u16      offset,
                                             u16      size,
@@ -263,10 +262,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
        req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
 
        meta = npo->meta + npo->meta_prod++;
-       meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
-       meta->gso_size = 0;
        meta->size = 0;
-       meta->id = req->id;
 
        npo->copy_off = 0;
        npo->copy_gref = req->gref;
@@ -281,12 +277,11 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
 static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
                                 struct netrx_pending_operations *npo,
                                 struct page *page, unsigned long size,
-                                unsigned long offset, int *head)
+                                unsigned long offset)
 {
        struct gnttab_copy *copy_gop;
        struct xenvif_rx_meta *meta;
        unsigned long bytes;
-       int gso_type = XEN_NETIF_GSO_TYPE_NONE;
 
        /* Data must not cross a page boundary. */
        BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -345,20 +340,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
                        page++;
                        offset = 0;
                }
-
-               /* Leave a gap for the GSO descriptor. */
-               if (skb_is_gso(skb)) {
-                       if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-                               gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
-                       else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-                               gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
-               }
-
-               if (*head && ((1 << gso_type) & queue->vif->gso_mask))
-                       queue->rx.req_cons++;
-
-               *head = 0; /* There must be something in this buffer now. */
-
        }
 }
 
@@ -381,16 +362,12 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        struct xenvif *vif = netdev_priv(skb->dev);
        int nr_frags = skb_shinfo(skb)->nr_frags;
        int i;
-       struct xen_netif_rx_request *req;
-       struct xenvif_rx_meta *meta;
        unsigned char *data;
-       int head = 1;
        int old_meta_prod;
-       int gso_type;
+       int gso_type = XEN_NETIF_GSO_TYPE_NONE;
 
        old_meta_prod = npo->meta_prod;
 
-       gso_type = XEN_NETIF_GSO_TYPE_NONE;
        if (skb_is_gso(skb)) {
                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
                        gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
@@ -398,31 +375,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                        gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
        }
 
-       /* Set up a GSO prefix descriptor, if necessary */
-       if ((1 << gso_type) & vif->gso_prefix_mask) {
-               req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
-               meta = npo->meta + npo->meta_prod++;
-               meta->gso_type = gso_type;
-               meta->gso_size = skb_shinfo(skb)->gso_size;
-               meta->size = 0;
-               meta->id = req->id;
-       }
+       /* Skip a GSO prefix descriptor, if necessary */
+       if ((1 << gso_type) & vif->gso_prefix_mask)
+               queue->rx.req_cons++;
 
-       req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
-       meta = npo->meta + npo->meta_prod++;
+       get_next_rx_buffer(queue, npo);
 
-       if ((1 << gso_type) & vif->gso_mask) {
-               meta->gso_type = gso_type;
-               meta->gso_size = skb_shinfo(skb)->gso_size;
-       } else {
-               meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
-               meta->gso_size = 0;
-       }
-
-       meta->size = 0;
-       meta->id = req->id;
-       npo->copy_off = 0;
-       npo->copy_gref = req->gref;
+       /* Skip a GSO extra segment, if necessary */
+       if ((1 << gso_type) & vif->gso_mask)
+               queue->rx.req_cons++;
 
        data = skb->data;
        while (data < skb_tail_pointer(skb)) {
@@ -433,7 +394,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                        len = skb_tail_pointer(skb) - data;
 
                xenvif_gop_frag_copy(queue, skb, npo,
-                                    virt_to_page(data), len, offset, &head);
+                                    virt_to_page(data), len, offset);
                data += len;
        }
 
@@ -441,8 +402,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                xenvif_gop_frag_copy(queue, skb, npo,
                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
-                                    skb_shinfo(skb)->frags[i].page_offset,
-                                    &head);
+                                    skb_shinfo(skb)->frags[i].page_offset);
        }
 
        return npo->meta_prod - old_meta_prod;
@@ -495,7 +455,7 @@ static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
                        flags = XEN_NETRXF_more_data;
 
                offset = 0;
-               make_rx_response(queue, meta[i].id, status, offset,
+               make_rx_response(queue, status, offset,
                                 meta[i].size, flags);
        }
 }
@@ -542,27 +502,33 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
        gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
+               struct xenvif *vif = queue->vif;
+               int gso_type = XEN_NETIF_GSO_TYPE_NONE;
+
+               if (skb_is_gso(skb)) {
+                       if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+                               gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+                       else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+                               gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+               }
 
-               if ((1 << queue->meta[npo.meta_cons].gso_type) &
-                   queue->vif->gso_prefix_mask) {
-                       resp = RING_GET_RESPONSE(&queue->rx,
-                                                queue->rx.rsp_prod_pvt++);
+               /* Set up a GSO prefix descriptor, if necessary */
+               if ((1 << gso_type) & vif->gso_prefix_mask) {
+                       RING_IDX i = queue->rx.rsp_prod_pvt++;
 
-                       resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
+                       resp = RING_GET_RESPONSE(&queue->rx, i);
 
-                       resp->offset = queue->meta[npo.meta_cons].gso_size;
-                       resp->id = queue->meta[npo.meta_cons].id;
-                       resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
+                       resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
 
-                       npo.meta_cons++;
-                       XENVIF_RX_CB(skb)->meta_slots_used--;
+                       resp->offset = skb_shinfo(skb)->gso_size;
+                       resp->id = i & (RING_SIZE(&queue->rx) - 1);
+                       resp->status = XENVIF_RX_CB(skb)->meta_slots_used + 1;
                }
 
-
                queue->stats.tx_bytes += skb->len;
                queue->stats.tx_packets++;
 
-               status = xenvif_check_gop(queue->vif,
+               status = xenvif_check_gop(vif,
                                          XENVIF_RX_CB(skb)->meta_slots_used,
                                          &npo);
 
@@ -578,13 +544,12 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                        flags |= XEN_NETRXF_data_validated;
 
                offset = 0;
-               resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
+               resp = make_rx_response(queue,
                                        status, offset,
                                        queue->meta[npo.meta_cons].size,
                                        flags);
 
-               if ((1 << queue->meta[npo.meta_cons].gso_type) &
-                   queue->vif->gso_mask) {
+               if ((1 << gso_type) & vif->gso_mask) {
                        struct xen_netif_extra_info *gso =
                                (struct xen_netif_extra_info *)
                                RING_GET_RESPONSE(&queue->rx,
@@ -592,8 +557,8 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
 
                        resp->flags |= XEN_NETRXF_extra_info;
 
-                       gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
-                       gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+                       gso->u.gso.type = gso_type;
+                       gso->u.gso.size = skb_shinfo(skb)->gso_size;
                        gso->u.gso.pad = 0;
                        gso->u.gso.features = 0;
 
@@ -1808,25 +1773,22 @@ static void push_tx_responses(struct xenvif_queue *queue)
 }
 
 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
-                                            u16      id,
                                             s8       st,
                                             u16      offset,
                                             u16      size,
                                             u16      flags)
 {
-       RING_IDX i = queue->rx.rsp_prod_pvt;
+       RING_IDX i = queue->rx.rsp_prod_pvt++;
        struct xen_netif_rx_response *resp;
 
        resp = RING_GET_RESPONSE(&queue->rx, i);
        resp->offset     = offset;
        resp->flags      = flags;
-       resp->id         = id;
+       resp->id         = i & (RING_SIZE(&queue->rx) - 1);
        resp->status     = (s16)size;
        if (st < 0)
                resp->status = (s16)st;
 
-       queue->rx.rsp_prod_pvt = ++i;
-
        return resp;
 }