needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
needed++;
+ if (queue->vif->hash_extra)
+ needed++;
do {
prod = queue->rx.sring->req_prod;
struct xenvif_rx_meta *meta;
int head;
int gso_type;
+ int protocol;
+ int hash_type;
struct page *page;
};
npo->copy_off += *len;
info->meta->size += *len;
+ if (!info->head)
+ return;
+
/* Leave a gap for the GSO descriptor. */
- if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
+ if ((1 << info->gso_type) & queue->vif->gso_mask)
+ queue->rx.req_cons++;
+
+ /* Leave a gap for the hash extra segment. */
+ if (queue->vif->hash_extra &&
+ (info->hash_type == PKT_HASH_TYPE_L4 ||
+ info->hash_type == PKT_HASH_TYPE_L3) &&
+ (info->protocol == htons(ETH_P_IP) ||
+ info->protocol == htons(ETH_P_IPV6)))
queue->rx.req_cons++;
info->head = 0; /* There must be something in this buffer now */
.npo = npo,
.head = *head,
.gso_type = XEN_NETIF_GSO_TYPE_NONE,
+ .protocol = skb->protocol,
+ .hash_type = skb_hash_type(skb),
};
unsigned long bytes;
static void xenvif_rx_action(struct xenvif_queue *queue)
{
+ struct xenvif *vif = queue->vif;
s8 status;
u16 flags;
struct xen_netif_rx_response *resp;
skb_queue_head_init(&rxq);
+ vif->hash_extra = !!vif->toeplitz.flags;
+
while (xenvif_rx_ring_slots_available(queue)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
queue->last_rx_time = jiffies;
gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
+ struct xen_netif_extra_info *extra = NULL;
if ((1 << queue->meta[npo.meta_cons].gso_type) &
- queue->vif->gso_prefix_mask) {
+ vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);
queue->stats.tx_bytes += skb->len;
queue->stats.tx_packets++;
- status = xenvif_check_gop(queue->vif,
+ status = xenvif_check_gop(vif,
XENVIF_RX_CB(skb)->meta_slots_used,
&npo);
flags);
if ((1 << queue->meta[npo.meta_cons].gso_type) &
- queue->vif->gso_mask) {
- struct xen_netif_extra_info *gso =
- (struct xen_netif_extra_info *)
+ vif->gso_mask) {
+ extra = (struct xen_netif_extra_info *)
RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);
resp->flags |= XEN_NETRXF_extra_info;
- gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
- gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
- gso->u.gso.pad = 0;
- gso->u.gso.features = 0;
+ extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+ extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+ extra->u.gso.pad = 0;
+ extra->u.gso.features = 0;
+
+ extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ extra->flags = 0;
+ }
+
+ if (vif->hash_extra &&
+ (skb_hash_type(skb) == PKT_HASH_TYPE_L4 ||
+ skb_hash_type(skb) == PKT_HASH_TYPE_L3) &&
+ (skb->protocol == htons(ETH_P_IP) ||
+ skb->protocol == htons(ETH_P_IPV6))) {
+ if (resp->flags & XEN_NETRXF_extra_info)
+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+ else
+ resp->flags |= XEN_NETRXF_extra_info;
+
+ extra = (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&queue->rx,
+ queue->rx.rsp_prod_pvt++);
- gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
- gso->flags = 0;
+ if (skb_hash_type(skb) == PKT_HASH_TYPE_L4)
+ extra->u.toeplitz.type =
+ skb->protocol == htons(ETH_P_IP) ?
+ _XEN_NETIF_CTRL_TOEPLITZ_HASH_IPV4_TCP :
+ _XEN_NETIF_CTRL_TOEPLITZ_HASH_IPV6_TCP;
+ else if (skb_hash_type(skb) == PKT_HASH_TYPE_L3)
+ extra->u.toeplitz.type =
+ skb->protocol == htons(ETH_P_IP) ?
+ _XEN_NETIF_CTRL_TOEPLITZ_HASH_IPV4 :
+ _XEN_NETIF_CTRL_TOEPLITZ_HASH_IPV6;
+
+ *(uint32_t *)extra->u.toeplitz.value =
+ skb_get_hash_raw(skb);
+
+ extra->type = XEN_NETIF_EXTRA_TYPE_TOEPLITZ;
+ extra->flags = 0;
}
xenvif_add_frag_responses(queue, status,