static void xenvif_set_toeplitz_hash(struct xenvif *vif, struct sk_buff *skb)
{
struct flow_keys flow;
- u32 hash;
- enum pkt_hash_types type;
+ u32 hash = 0;
+ bool is_l4 = false;
const u8 *key = vif->hash_params.toeplitz.key;
const unsigned int len = ARRAY_SIZE(vif->hash_params.toeplitz.key);
- hash = 0;
- type = PKT_HASH_TYPE_NONE;
-
memset(&flow, 0, sizeof(flow));
if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
- goto done;
-
+ return;
+
if (flow.basic.n_proto == htons(ETH_P_IP)) {
if (vif->hash_params.toeplitz.ipv4_tcp_enabled &&
flow.basic.ip_proto == IPPROTO_TCP) {
hash = toeplitz_hash(key, len,
&data, sizeof(data));
- type = PKT_HASH_TYPE_L4;
+ is_l4 = true;
} else if (vif->hash_params.toeplitz.ipv4_enabled) {
struct {
struct flow_dissector_key_ipv4_addrs addrs;
hash = toeplitz_hash(key, len,
&data, sizeof(data));
- type = PKT_HASH_TYPE_L4;
- }
+ } else
+ return;
}
else if (flow.basic.n_proto == htons(ETH_P_IPV6)) {
if (vif->hash_params.toeplitz.ipv6_tcp_enabled &&
hash = toeplitz_hash(key, len,
&data, sizeof(data));
- type = PKT_HASH_TYPE_L4;
+ is_l4 = true;
} else if (vif->hash_params.toeplitz.ipv6_enabled) {
struct {
struct flow_dissector_key_ipv6_addrs addrs;
hash = toeplitz_hash(key, len,
&data, sizeof(data));
- type = PKT_HASH_TYPE_L4;
- }
+ } else
+ return;
}
-done:
- skb_set_hash(skb, hash, type);
+ __skb_set_sw_hash(skb, hash, is_l4);
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
offset &= ~PAGE_MASK;
while (size > 0) {
+ struct xenvif *vif = queue->vif;
struct xen_page_foreign *foreign;
BUG_ON(offset >= PAGE_SIZE);
}
copy_gop->source.offset = offset;
- copy_gop->dest.domid = queue->vif->domid;
+ copy_gop->dest.domid = vif->domid;
copy_gop->dest.offset = npo->copy_off;
copy_gop->dest.u.ref = npo->copy_gref;
gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
- if (*head && ((1 << gso_type) & queue->vif->gso_mask))
- queue->rx.req_cons++;
+ if (*head) {
+ if ((1 << gso_type) & vif->gso_mask)
+ queue->rx.req_cons++;
+
+ if (vif->hash_alg != XEN_NETBK_HASH_UNSPECIFIED &&
+ skb->sw_hash)
+ queue->rx.req_cons++;
+ }
*head = 0; /* There must be something in this buffer now. */
static void xenvif_rx_action(struct xenvif_queue *queue)
{
+ struct xenvif *vif = queue->vif;
s8 status;
u16 flags;
struct xen_netif_rx_response *resp;
gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
+ struct xen_netif_extra_info *extra = NULL;
if ((1 << queue->meta[npo.meta_cons].gso_type) &
- queue->vif->gso_prefix_mask) {
+ vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);
queue->stats.tx_bytes += skb->len;
queue->stats.tx_packets++;
- status = xenvif_check_gop(queue->vif,
+ status = xenvif_check_gop(vif,
XENVIF_RX_CB(skb)->meta_slots_used,
&npo);
flags);
if ((1 << queue->meta[npo.meta_cons].gso_type) &
- queue->vif->gso_mask) {
- struct xen_netif_extra_info *gso =
- (struct xen_netif_extra_info *)
+ vif->gso_mask) {
+ resp->flags |= XEN_NETRXF_extra_info;
+
+ extra = (struct xen_netif_extra_info *)
RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);
- resp->flags |= XEN_NETRXF_extra_info;
+ extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+ extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+ extra->u.gso.pad = 0;
+ extra->u.gso.features = 0;
+
+ extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ extra->flags = 0;
+ }
+
+ if (vif->hash_alg != XEN_NETBK_HASH_UNSPECIFIED &&
+ skb->sw_hash) {
+ if (resp->flags & XEN_NETRXF_extra_info)
+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+ else
+ resp->flags |= XEN_NETRXF_extra_info;
+
+ extra = (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&queue->rx,
+ queue->rx.rsp_prod_pvt++);
+
+ if (skb->l4_hash)
+ extra->u.hash.type = (skb->protocol == htons(ETH_P_IP)) ?
+ XEN_NETIF_HASH_TYPE_IP_TCPV4 :
+ XEN_NETIF_HASH_TYPE_IP_TCPV6;
+ else
+ extra->u.hash.type = (skb->protocol == htons(ETH_P_IP)) ?
+ XEN_NETIF_HASH_TYPE_IPV4 :
+ XEN_NETIF_HASH_TYPE_IPV6;
- gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
- gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
- gso->u.gso.pad = 0;
- gso->u.gso.features = 0;
+ extra->u.hash.value = skb->hash;
- gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
- gso->flags = 0;
+ extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+ extra->flags = 0;
}
xenvif_add_frag_responses(queue, status,
#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
-#define XEN_NETIF_EXTRA_TYPE_MAX (4)
+#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */
+#define XEN_NETIF_EXTRA_TYPE_MAX (5)
/* xen_netif_extra_info flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
#define XEN_NETIF_GSO_TYPE_TCPV6 (2)
+/* Hash types */
+#define XEN_NETIF_HASH_TYPE_NONE (0)
+#define XEN_NETIF_HASH_TYPE_IPV4 (1)
+#define XEN_NETIF_HASH_TYPE_IP_TCPV4 (2)
+#define XEN_NETIF_HASH_TYPE_IPV6 (3)
+#define XEN_NETIF_HASH_TYPE_IP_TCPV6 (4)
+
/*
* This structure needs to fit within both netif_tx_request and
* netif_rx_response for compatibility.
uint8_t addr[6]; /* Address to add/remove. */
} mcast;
+ struct {
+ /* The calculated hash value */
+ uint32_t value;
+
+ /*
+ * Hash type. This determines the headers over
+ * which the has was calculated.
+ */
+ uint8_t type;
+
+ /* Future expansion. */
+ uint8_t pad;
+ } hash;
+
uint16_t pad[3];
} u;
};