]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
xen-netback: pass hash to VM on receive rss
authorPaul Durrant <paul.durrant@citrix.com>
Mon, 5 Oct 2015 16:43:58 +0000 (17:43 +0100)
committerPaul Durrant <paul.durrant@citrix.com>
Mon, 5 Oct 2015 16:46:16 +0000 (17:46 +0100)
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
include/xen/interface/io/netif.h

index 3f116515c4064f98c36e73a7bcbc9dd7c16419b0..f1cabe7e4b535f62ba3a87770b9eb8c4abdd9a89 100644 (file)
@@ -201,9 +201,9 @@ struct xenvif_queue { /* Per-queue data for xenvif */
 };
 
 /* Maximum number of Rx slots a to-guest packet may use, including the
- * slot needed for GSO meta-data.
+ * slots needed for GSO meta-data and hash.
  */
-#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
+#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 2)
 
 enum state_bit_shift {
        /* This bit marks that the vif is connected */
index a3f5f379166b28a5af84dce3f0ec28af369202be..ae35045a891cf41a9b48e777b4314dde65a36279 100644 (file)
@@ -181,18 +181,15 @@ static u32 toeplitz_hash(const u8 *k, unsigned int klen,
 static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb)
 {
        struct flow_keys flow;
-       u32 hash;
-       enum pkt_hash_types type;
+       u32 hash = 0;
+       bool is_l4 = false;
        const u8 *key = vif->hash_params.toeplitz.key;
        const unsigned int len = ARRAY_SIZE(vif->hash_params.toeplitz.key);
 
-       hash = 0;
-       type = PKT_HASH_TYPE_NONE; 
-
        memset(&flow, 0, sizeof(flow));
        if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
-               goto done;
-
+               return;
+       
        if (flow.basic.n_proto == htons(ETH_P_IP)) {
                if (vif->hash_params.toeplitz.ipv4_tcp_enabled &&
                    flow.basic.ip_proto == IPPROTO_TCP) {
@@ -206,7 +203,7 @@ static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb)
 
                        hash = toeplitz_hash(key, len,
                                             &data, sizeof(data));
-                       type = PKT_HASH_TYPE_L4;
+                       is_l4 = true;
                } else if (vif->hash_params.toeplitz.ipv4_enabled) {
                        struct {
                                struct flow_dissector_key_ipv4_addrs addrs;
@@ -216,8 +213,8 @@ static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb)
 
                        hash = toeplitz_hash(key, len,
                                             &data, sizeof(data));
-                       type = PKT_HASH_TYPE_L4;
-               }
+               } else
+                       return;
        }
        else if (flow.basic.n_proto == htons(ETH_P_IPV6)) {
                if (vif->hash_params.toeplitz.ipv6_tcp_enabled &&
@@ -232,7 +229,7 @@ static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb)
 
                        hash = toeplitz_hash(key, len,
                                             &data, sizeof(data));
-                       type = PKT_HASH_TYPE_L4;
+                       is_l4 = true;
                } else if (vif->hash_params.toeplitz.ipv6_enabled) {
                        struct {
                                struct flow_dissector_key_ipv6_addrs addrs;
@@ -242,12 +239,11 @@ static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb)
 
                        hash = toeplitz_hash(key, len,
                                             &data, sizeof(data));
-                       type = PKT_HASH_TYPE_L4;
-               }
+               } else
+                       return;
        }
 
-done:
-       skb_set_hash(skb, hash, type);
+       __skb_set_sw_hash(skb, hash, is_l4);
 }
 
 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
index 42569b994ea84ae03a9ff0d9b88109d3029c4f30..b88126f0b67bc8edf6c0b40c5bf0c490b0b19dee 100644 (file)
@@ -287,6 +287,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
        offset &= ~PAGE_MASK;
 
        while (size > 0) {
+               struct xenvif *vif = queue->vif;
                struct xen_page_foreign *foreign;
 
                BUG_ON(offset >= PAGE_SIZE);
@@ -318,7 +319,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
                }
                copy_gop->source.offset = offset;
 
-               copy_gop->dest.domid = queue->vif->domid;
+               copy_gop->dest.domid = vif->domid;
                copy_gop->dest.offset = npo->copy_off;
                copy_gop->dest.u.ref = npo->copy_gref;
 
@@ -343,8 +344,14 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
                                gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
                }
 
-               if (*head && ((1 << gso_type) & queue->vif->gso_mask))
-                       queue->rx.req_cons++;
+               if (*head) {
+                       if ((1 << gso_type) & vif->gso_mask)
+                               queue->rx.req_cons++;
+
+                       if (vif->hash_alg != XEN_NETBK_HASH_UNSPECIFIED &&
+                           skb->sw_hash)
+                               queue->rx.req_cons++;
+               }
 
                *head = 0; /* There must be something in this buffer now. */
 
@@ -496,6 +503,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue)
 
 static void xenvif_rx_action(struct xenvif_queue *queue)
 {
+       struct xenvif *vif = queue->vif;
        s8 status;
        u16 flags;
        struct xen_netif_rx_response *resp;
@@ -531,9 +539,10 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
        gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
+               struct xen_netif_extra_info *extra = NULL;
 
                if ((1 << queue->meta[npo.meta_cons].gso_type) &
-                   queue->vif->gso_prefix_mask) {
+                   vif->gso_prefix_mask) {
                        resp = RING_GET_RESPONSE(&queue->rx,
                                                 queue->rx.rsp_prod_pvt++);
 
@@ -551,7 +560,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                queue->stats.tx_bytes += skb->len;
                queue->stats.tx_packets++;
 
-               status = xenvif_check_gop(queue->vif,
+               status = xenvif_check_gop(vif,
                                          XENVIF_RX_CB(skb)->meta_slots_used,
                                          &npo);
 
@@ -573,21 +582,46 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                                        flags);
 
                if ((1 << queue->meta[npo.meta_cons].gso_type) &
-                   queue->vif->gso_mask) {
-                       struct xen_netif_extra_info *gso =
-                               (struct xen_netif_extra_info *)
+                   vif->gso_mask) {
+                       resp->flags |= XEN_NETRXF_extra_info;
+
+                       extra = (struct xen_netif_extra_info *)
                                RING_GET_RESPONSE(&queue->rx,
                                                  queue->rx.rsp_prod_pvt++);
 
-                       resp->flags |= XEN_NETRXF_extra_info;
+                       extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+                       extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+                       extra->u.gso.pad = 0;
+                       extra->u.gso.features = 0;
+
+                       extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+                       extra->flags = 0;
+               }
+
+               if (vif->hash_alg != XEN_NETBK_HASH_UNSPECIFIED &&
+                   skb->sw_hash) {
+                       if (resp->flags & XEN_NETRXF_extra_info)
+                               extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+                       else
+                               resp->flags |= XEN_NETRXF_extra_info;
+
+                       extra = (struct xen_netif_extra_info *)
+                               RING_GET_RESPONSE(&queue->rx,
+                                                 queue->rx.rsp_prod_pvt++);
+
+                       if (skb->l4_hash)
+                               extra->u.hash.type = (skb->protocol == htons(ETH_P_IP)) ?
+                                       XEN_NETIF_HASH_TYPE_IP_TCPV4 :
+                                       XEN_NETIF_HASH_TYPE_IP_TCPV6;
+                       else
+                               extra->u.hash.type = (skb->protocol == htons(ETH_P_IP)) ?
+                                       XEN_NETIF_HASH_TYPE_IPV4 :
+                                       XEN_NETIF_HASH_TYPE_IPV6;
 
-                       gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
-                       gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
-                       gso->u.gso.pad = 0;
-                       gso->u.gso.features = 0;
+                       extra->u.hash.value = skb->hash;
 
-                       gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
-                       gso->flags = 0;
+                       extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+                       extra->flags = 0;
                }
 
                xenvif_add_frag_responses(queue, status,
index 252ffd4801ef68cbfd8b37c58875e816084149a7..abd508bc6041651cd11fc5a3a416fc5a1d1ecbc6 100644 (file)
@@ -158,7 +158,8 @@ struct xen_netif_tx_request {
 #define XEN_NETIF_EXTRA_TYPE_GSO       (1)  /* u.gso */
 #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2)  /* u.mcast */
 #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3)  /* u.mcast */
-#define XEN_NETIF_EXTRA_TYPE_MAX       (4)
+#define XEN_NETIF_EXTRA_TYPE_HASH       (4)  /* u.hash */
+#define XEN_NETIF_EXTRA_TYPE_MAX       (5)
 
 /* xen_netif_extra_info flags. */
 #define _XEN_NETIF_EXTRA_FLAG_MORE     (0)
@@ -169,6 +170,13 @@ struct xen_netif_tx_request {
 #define XEN_NETIF_GSO_TYPE_TCPV4       (1)
 #define XEN_NETIF_GSO_TYPE_TCPV6       (2)
 
+/* Hash types */
+#define XEN_NETIF_HASH_TYPE_NONE        (0)
+#define XEN_NETIF_HASH_TYPE_IPV4        (1)
+#define XEN_NETIF_HASH_TYPE_IP_TCPV4    (2)
+#define XEN_NETIF_HASH_TYPE_IPV6        (3)
+#define XEN_NETIF_HASH_TYPE_IP_TCPV6    (4)
+
 /*
  * This structure needs to fit within both netif_tx_request and
  * netif_rx_response for compatibility.
@@ -207,6 +215,20 @@ struct xen_netif_extra_info {
                        uint8_t addr[6]; /* Address to add/remove. */
                } mcast;
 
+               struct {
+                        /* The calculated hash value */
+                        uint32_t value;
+
+                       /*
+                        * Hash type. This determines the headers over
+                        * which the has was calculated.
+                        */
+                        uint8_t type;
+
+                       /* Future expansion. */
+                       uint8_t pad;
+               } hash;
+
                uint16_t pad[3];
        } u;
 };