From: Paul Durrant Date: Mon, 5 Oct 2015 16:43:58 +0000 (+0100) Subject: xen-netback: pass hash to VM on receive X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=refs%2Fheads%2Frss;p=people%2Fpauldu%2Flinux.git xen-netback: pass hash to VM on receive Signed-off-by: Paul Durrant --- diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 3f116515c406..f1cabe7e4b53 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -201,9 +201,9 @@ struct xenvif_queue { /* Per-queue data for xenvif */ }; /* Maximum number of Rx slots a to-guest packet may use, including the - * slot needed for GSO meta-data. + * slots needed for GSO meta-data and hash. */ -#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1) +#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 2) enum state_bit_shift { /* This bit marks that the vif is connected */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index a3f5f379166b..ae35045a891c 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -181,18 +181,15 @@ static u32 toeplitz_hash(const u8 *k, unsigned int klen, static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb) { struct flow_keys flow; - u32 hash; - enum pkt_hash_types type; + u32 hash = 0; + bool is_l4 = false; const u8 *key = vif->hash_params.toeplitz.key; const unsigned int len = ARRAY_SIZE(vif->hash_params.toeplitz.key); - hash = 0; - type = PKT_HASH_TYPE_NONE; - memset(&flow, 0, sizeof(flow)); if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) - goto done; - + return; + if (flow.basic.n_proto == htons(ETH_P_IP)) { if (vif->hash_params.toeplitz.ipv4_tcp_enabled && flow.basic.ip_proto == IPPROTO_TCP) { @@ -206,7 +203,7 @@ static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb) hash = toeplitz_hash(key, len, &data, sizeof(data)); - type = PKT_HASH_TYPE_L4; + is_l4 = true; } else if (vif->hash_params.toeplitz.ipv4_enabled) { struct { struct flow_dissector_key_ipv4_addrs addrs; @@ -216,8 +213,8 @@ static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb) hash = toeplitz_hash(key, len, &data, sizeof(data)); - type = PKT_HASH_TYPE_L4; - } + } else + return; } else if (flow.basic.n_proto == htons(ETH_P_IPV6)) { if (vif->hash_params.toeplitz.ipv6_tcp_enabled && @@ -232,7 +229,7 @@ static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb) hash = toeplitz_hash(key, len, &data, sizeof(data)); - type = PKT_HASH_TYPE_L4; + is_l4 = true; } else if (vif->hash_params.toeplitz.ipv6_enabled) { struct { struct flow_dissector_key_ipv6_addrs addrs; @@ -242,12 +239,11 @@ static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb) hash = toeplitz_hash(key, len, &data, sizeof(data)); - type = PKT_HASH_TYPE_L4; - } + } else + return; } -done: - skb_set_hash(skb, hash, type); + __skb_set_sw_hash(skb, hash, is_l4); } static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 42569b994ea8..b88126f0b67b 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -287,6 +287,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb offset &= ~PAGE_MASK; while (size > 0) { + struct xenvif *vif = queue->vif; struct xen_page_foreign *foreign; BUG_ON(offset >= PAGE_SIZE); @@ -318,7 +319,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb } copy_gop->source.offset = offset; - copy_gop->dest.domid = queue->vif->domid; + copy_gop->dest.domid = vif->domid; copy_gop->dest.offset = npo->copy_off; copy_gop->dest.u.ref = npo->copy_gref; @@ -343,8 +344,14 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb gso_type = XEN_NETIF_GSO_TYPE_TCPV6; } - if (*head && ((1 << gso_type) & queue->vif->gso_mask)) - queue->rx.req_cons++; + if (*head) { + if ((1 << gso_type) & vif->gso_mask) + queue->rx.req_cons++; + + if (vif->hash_alg != XEN_NETBK_HASH_UNSPECIFIED && + skb->sw_hash) + queue->rx.req_cons++; + } *head = 0; /* There must be something in this buffer now. */ @@ -496,6 +503,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue) static void xenvif_rx_action(struct xenvif_queue *queue) { + struct xenvif *vif = queue->vif; s8 status; u16 flags; struct xen_netif_rx_response *resp; @@ -531,9 +539,10 @@ static void xenvif_rx_action(struct xenvif_queue *queue) gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { + struct xen_netif_extra_info *extra = NULL; if ((1 << queue->meta[npo.meta_cons].gso_type) & - queue->vif->gso_prefix_mask) { + vif->gso_prefix_mask) { resp = RING_GET_RESPONSE(&queue->rx, queue->rx.rsp_prod_pvt++); @@ -551,7 +560,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue) queue->stats.tx_bytes += skb->len; queue->stats.tx_packets++; - status = xenvif_check_gop(queue->vif, + status = xenvif_check_gop(vif, XENVIF_RX_CB(skb)->meta_slots_used, &npo); @@ -573,21 +582,46 @@ static void xenvif_rx_action(struct xenvif_queue *queue) flags); if ((1 << queue->meta[npo.meta_cons].gso_type) & - queue->vif->gso_mask) { - struct xen_netif_extra_info *gso = - (struct xen_netif_extra_info *) + vif->gso_mask) { + resp->flags |= XEN_NETRXF_extra_info; + + extra = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&queue->rx, queue->rx.rsp_prod_pvt++); - resp->flags |= XEN_NETRXF_extra_info; + extra->u.gso.type = queue->meta[npo.meta_cons].gso_type; + extra->u.gso.size = queue->meta[npo.meta_cons].gso_size; + extra->u.gso.pad = 0; + extra->u.gso.features = 0; + + extra->type = XEN_NETIF_EXTRA_TYPE_GSO; + extra->flags = 0; + } + + if (vif->hash_alg != XEN_NETBK_HASH_UNSPECIFIED && + skb->sw_hash) { + if (resp->flags & XEN_NETRXF_extra_info) + extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; + else + resp->flags |= XEN_NETRXF_extra_info; + + extra = (struct xen_netif_extra_info *) + RING_GET_RESPONSE(&queue->rx, + queue->rx.rsp_prod_pvt++); + + if (skb->l4_hash) + extra->u.hash.type = (skb->protocol == htons(ETH_P_IP)) ? + XEN_NETIF_HASH_TYPE_IP_TCPV4 : + XEN_NETIF_HASH_TYPE_IP_TCPV6; + else + extra->u.hash.type = (skb->protocol == htons(ETH_P_IP)) ? + XEN_NETIF_HASH_TYPE_IPV4 : + XEN_NETIF_HASH_TYPE_IPV6; - gso->u.gso.type = queue->meta[npo.meta_cons].gso_type; - gso->u.gso.size = queue->meta[npo.meta_cons].gso_size; - gso->u.gso.pad = 0; - gso->u.gso.features = 0; + extra->u.hash.value = skb->hash; - gso->type = XEN_NETIF_EXTRA_TYPE_GSO; - gso->flags = 0; + extra->type = XEN_NETIF_EXTRA_TYPE_HASH; + extra->flags = 0; } xenvif_add_frag_responses(queue, status, diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h index 252ffd4801ef..abd508bc6041 100644 --- a/include/xen/interface/io/netif.h +++ b/include/xen/interface/io/netif.h @@ -158,7 +158,8 @@ struct xen_netif_tx_request { #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ -#define XEN_NETIF_EXTRA_TYPE_MAX (4) +#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */ +#define XEN_NETIF_EXTRA_TYPE_MAX (5) /* xen_netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) @@ -169,6 +170,13 @@ struct xen_netif_tx_request { #define XEN_NETIF_GSO_TYPE_TCPV4 (1) #define XEN_NETIF_GSO_TYPE_TCPV6 (2) +/* Hash types */ +#define XEN_NETIF_HASH_TYPE_NONE (0) +#define XEN_NETIF_HASH_TYPE_IPV4 (1) +#define XEN_NETIF_HASH_TYPE_IP_TCPV4 (2) +#define XEN_NETIF_HASH_TYPE_IPV6 (3) +#define XEN_NETIF_HASH_TYPE_IP_TCPV6 (4) + /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. @@ -207,6 +215,20 @@ struct xen_netif_extra_info { uint8_t addr[6]; /* Address to add/remove. */ } mcast; + struct { + /* The calculated hash value */ + uint32_t value; + + /* + * Hash type. This determines the headers over + * which the has was calculated. + */ + uint8_t type; + + /* Future expansion. */ + uint8_t pad; + } hash; + uint16_t pad[3]; } u; };