static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
{
- if (vif->gso_mask)
- return DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1;
+ int needed;
+
+ if (vif->gso_mask || vif->gso_prefix_mask)
+ needed = DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1;
else
- return DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+ needed = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+
+ if (vif->hash_alg != XEN_NETBK_HASH_UNSPECIFIED)
+ needed++;
+
+ return needed;
}
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
if ((1 << gso_type) & vif->gso_mask)
queue->rx.req_cons++;
+ /* Skip a hash extra segment, if necessary */
+ if (vif->hash_alg != XEN_NETBK_HASH_UNSPECIFIED &&
+ (skb->protocol == htons(ETH_P_IP) ||
+ skb->protocol == htons(ETH_P_IPV6)))
+ queue->rx.req_cons++;
+
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
struct xenvif *vif = queue->vif;
int gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ struct xen_netif_extra_info *extra = NULL;
if (skb_is_gso(skb)) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
flags);
if ((1 << gso_type) & vif->gso_mask) {
- struct xen_netif_extra_info *gso =
- (struct xen_netif_extra_info *)
+ resp->flags |= XEN_NETRXF_extra_info;
+
+ extra = (struct xen_netif_extra_info *)
RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);
- resp->flags |= XEN_NETRXF_extra_info;
+ extra->u.gso.type = gso_type;
+ extra->u.gso.size = skb_shinfo(skb)->gso_size;
+ extra->u.gso.pad = 0;
+ extra->u.gso.features = 0;
+
+ extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ extra->flags = 0;
+ }
- gso->u.gso.type = gso_type;
- gso->u.gso.size = skb_shinfo(skb)->gso_size;
- gso->u.gso.pad = 0;
- gso->u.gso.features = 0;
+ if (vif->hash_alg != XEN_NETBK_HASH_UNSPECIFIED &&
+ (skb->protocol == htons(ETH_P_IP) ||
+ skb->protocol == htons(ETH_P_IPV6))) {
+ if (resp->flags & XEN_NETRXF_extra_info)
+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+ else
+ resp->flags |= XEN_NETRXF_extra_info;
+
+ extra = (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&queue->rx,
+ queue->rx.rsp_prod_pvt++);
- gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
- gso->flags = 0;
+ if (skb_has_l4_hash(skb))
+ extra->u.hash.type =
+ skb->protocol == htons(ETH_P_IP) ?
+ XEN_NETIF_HASH_TYPE_IP_TCPV4 :
+ XEN_NETIF_HASH_TYPE_IP_TCPV6;
+ else if (skb_has_l3_hash(skb))
+ extra->u.hash.type =
+ skb->protocol == htons(ETH_P_IP) ?
+ XEN_NETIF_HASH_TYPE_IPV4 :
+ XEN_NETIF_HASH_TYPE_IPV6;
+ else
+ extra->u.hash.type = XEN_NETIF_HASH_TYPE_NONE;
+
+ *(uint32_t *)extra->u.hash.value = skb->hash;
+
+ extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+ extra->flags = 0;
}
xenvif_add_frag_responses(queue, status,
#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
-#define XEN_NETIF_EXTRA_TYPE_MAX (4)
+#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */
+#define XEN_NETIF_EXTRA_TYPE_MAX (5)
/* xen_netif_extra_info flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
#define XEN_NETIF_GSO_TYPE_TCPV6 (2)
+/* Hash types */
+#define XEN_NETIF_HASH_TYPE_NONE (0)
+#define XEN_NETIF_HASH_TYPE_IPV4 (1)
+#define XEN_NETIF_HASH_TYPE_IP_TCPV4 (2)
+#define XEN_NETIF_HASH_TYPE_IPV6 (3)
+#define XEN_NETIF_HASH_TYPE_IP_TCPV6 (4)
+
/*
* This structure needs to fit within both netif_tx_request and
* netif_rx_response for compatibility.
uint8_t addr[6]; /* Address to add/remove. */
} mcast;
+ struct {
+ /*
+ * Hash type. This determines the headers over
+ * which the has was calculated.
+ */
+ uint8_t type;
+
+ /* Future expansion. */
+ uint8_t pad;
+
+ /* The calculated hash value (LSB in 0) */
+ uint8_t value[4];
+ } hash;
+
uint16_t pad[3];
} u;
};