struct xenvif_tx_cb {
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
u8 copy_count;
+ u32 split_mask;
};
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
struct sk_buff *skb =
alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
+
+ BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
if (unlikely(skb == NULL))
return NULL;
nr_slots = shinfo->nr_frags + 1;
copy_count(skb) = 0;
+ XENVIF_TX_CB(skb)->split_mask = 0;
/* Create copy ops for exactly data_len bytes into the skb head. */
__skb_put(skb, data_len);
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
- data_len);
+ /* Don't cross local page boundary! */
+ if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
+ amount = XEN_PAGE_SIZE - cop->dest.offset;
+ XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
+ }
+
cop->len = amount;
cop->flags = GNTCOPY_source_gref;
nr_slots--;
} else {
/* The copy op partially covered the tx_request.
- * The remainder will be mapped.
+ * The remainder will be mapped or copied in the next
+ * iteration.
*/
txp->offset += amount;
txp->size -= amount;
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx;
+ u16 last_copy_idx = copy_pending_idx(skb, copy_count(skb) - 1);
/* This always points to the shinfo of the skb being checked, which
* could be either the first or the one on the frag_list
*/
int nr_frags = shinfo->nr_frags;
const bool sharedslot = nr_frags &&
frag_get_pending_idx(&shinfo->frags[0]) ==
- copy_pending_idx(skb, copy_count(skb) - 1);
+ last_copy_idx;
int i, err = 0;
for (i = 0; i < copy_count(skb); i++) {
pending_idx = copy_pending_idx(skb, i);
newerr = (*gopp_copy)->status;
+
+ /* Split copies need to be handled together. */
+ if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
+ i++;
+ (*gopp_copy)++;
+ if (!newerr)
+ newerr = (*gopp_copy)->status;
+ }
if (likely(!newerr)) {
/* The first frag might still have this slot mapped */
- if (i < copy_count(skb) - 1 || !sharedslot)
+ if (pending_idx != last_copy_idx || !sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
} else {
pending_idx,
(*gopp_copy)->source.u.ref);
/* The first frag might still have this slot mapped */
- if (i < copy_count(skb) - 1 || !sharedslot)
+ if (pending_idx != last_copy_idx || !sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
}