#define EXIT() DEBUGMSG("<===")
#define RETURN(x) do { EXIT(); return (x); } while (0)
+#define POSTED_MIN_PAYLOAD_SIZE 128
+#define POSTED_MIN_HEAD_SIZE 66
+
/* After we send this number of frags, we request the other end to
* notify us when sending the corresponding finish packet message */
#define MAX_MAX_COUNT_FRAGS_NO_EVENT 192
unsigned nr_fragments;
grant_ref_t gref_pool;
enum transmit_policy policy;
+ struct nc2_rx_buffer *rx_buf;
+ unsigned rx_buf_size;
uint8_t failed;
uint8_t is_posted; /* indicates a received posted buffer packet */
uint8_t expecting_finish;
volatile void *msg);
void nc2_replenish_rx_buffers(struct netchannel2 *nc);
+void release_rx_buffer(struct netchannel2 *nc,
+ struct nc2_rx_buffer *rxb);
void queue_finish_packet_message(struct netchannel2_ring_pair *ncrp,
uint32_t id, uint8_t flags);
}
/* Release a single RX buffer and return it to the unused list. */
-static void release_rx_buffer(struct netchannel2 *nc,
- struct nc2_rx_buffer *rxb)
+void release_rx_buffer(struct netchannel2 *nc,
+ struct nc2_rx_buffer *rxb)
{
rxb->is_posted = 0;
nc2_end_foreign_access(rxb->gref,
unsigned frags_off)
{
struct netchannel2_fragment frag;
- struct netchannel2_fragment firstfrag;
- struct sk_buff *head_skb;
+ struct sk_buff *head_skb = NULL;
struct sk_buff *cur_skb;
struct sk_buff *new_skb;
unsigned x;
int dropped;
unsigned acc_len;
unsigned prefix_len;
- struct nc2_rx_buffer *first_rxb;
- void *firstbuffer;
- int first;
- int ncopy;
-
-#define SKB_MIN_PAYLOAD_SIZE 128
-#define SKB_MIN_HEAD_SIZE 66
+ struct skb_cb_overlay *sco = NULL;
+ int first;
+ void *buf;
dropped = 0;
is_bad = 0;
- if (msg->prefix_size < SKB_MIN_PAYLOAD_SIZE)
- prefix_len = SKB_MIN_PAYLOAD_SIZE;
+ first = 0;
+
+ if (msg->prefix_size < POSTED_MIN_PAYLOAD_SIZE)
+ prefix_len = POSTED_MIN_PAYLOAD_SIZE;
else
prefix_len = msg->prefix_size;
/* We don't enforce the MAX_PACKET_BYTES limit here. That's
dropped = 1;
nc->rx.nr_failed_no_skb++;
} else {
+ head_skb = cur_skb;
+ sco = get_skb_overlay(cur_skb);
skb_reserve(cur_skb, NET_IP_ALIGN);
- nc2_copy_from_ring_off(&nc->rings.cons_ring,
- skb_put(cur_skb, msg->prefix_size),
- msg->prefix_size,
- frags_off + nr_frags * sizeof(frag));
- }
- head_skb = cur_skb;
- acc_len = 0;
-
- first = 0;
- if (skb_headlen(head_skb) < SKB_MIN_HEAD_SIZE) {
- fetch_fragment(&nc->rings, 0, &firstfrag, frags_off);
- rxb = find_rx_buffer(nc, firstfrag.pre_post.id);
- if (rxb == NULL) {
- pr_debug("RX in bad frag %d.\n", firstfrag.pre_post.id);
- is_bad = 1;
- }
- else {
- first_rxb = rxb;
- firstbuffer = first_rxb->buffer;
+ if (msg->prefix_size)
+ nc2_copy_from_ring_off(&nc->rings.cons_ring,
+ skb_put(cur_skb, msg->prefix_size),
+ msg->prefix_size,
+ frags_off + nr_frags * sizeof(frag));
+
+ if (skb_headlen(cur_skb) < POSTED_MIN_HEAD_SIZE) {
+ fetch_fragment(&nc->rings, 0, &frag, frags_off);
+ rxb = find_rx_buffer(nc, frag.pre_post.id);
+ if (rxb == NULL) {
+ pr_debug("RX in bad frag %d.\n", frag.pre_post.id);
+ is_bad = 1;
+ }
+ else {
+ buf = rxb->buffer + rxb->offset;
+ if (frag.size + skb_headlen(cur_skb) <= POSTED_MIN_PAYLOAD_SIZE) {
+ /* first frag will be completely copied into head,
+ so skip it when adding frags to skb */
+ first = 1;
+ rxb->offset = frag.off;
+ /* save skipped frag as we will need to copy it
+ into skb head later */
+ sco->rx_buf = rxb;
+ sco->rx_buf_size = frag.size;
+ }
+ }
+
}
- if (firstfrag.size + skb_headlen(head_skb) <= SKB_MIN_PAYLOAD_SIZE)
- /* first frag will be completely copied into head,
- so skip it when adding frags to skb */
- first = 1;
}
+ acc_len = 0;
+
for (x = first; x < nr_frags-first; x++) {
fetch_fragment(&nc->rings, x, &frag, frags_off);
rxb = find_rx_buffer(nc, frag.pre_post.id);
if (is_bad) {
pr_debug("Received skb is bad!\n");
- if (head_skb)
+ if (head_skb) {
+ if (sco->rx_buf)
+ release_rx_buffer (nc, sco->rx_buf);
kfree_skb(head_skb);
+ }
head_skb = NULL;
if (dropped)
nc->stats.rx_dropped++;
head_skb->data_len = cur_skb->data_len + acc_len;
head_skb->truesize = cur_skb->truesize + acc_len;
get_skb_overlay(head_skb)->is_posted = 1;
-
- if (skb_headlen(head_skb) < SKB_MIN_HEAD_SIZE) {
- if (first)
- ncopy = firstfrag.size;
- else
- ncopy = SKB_MIN_HEAD_SIZE - skb_headlen(head_skb);
- /* Copy some extra bytes to make copy aligned to cache line boundary */
- memcpy(head_skb->tail - 16 - NET_IP_ALIGN,
- firstbuffer + firstfrag.off - 16 - NET_IP_ALIGN,
- ncopy + 16 + NET_IP_ALIGN);
- head_skb->tail += ncopy;
- if (first) {
- head_skb->len += ncopy;
- head_skb->truesize += ncopy;
- release_rx_buffer (nc, first_rxb);
- } else {
- head_skb->data_len -= ncopy;
- skb_shinfo(cur_skb)->frags[0].size -= ncopy;
- skb_shinfo(cur_skb)->frags[0].page_offset += ncopy;
- }
- }
}
return head_skb;
#endif /* CONFIG_XEN_NETDEV2_LRO */
-int static inline nc2_receive_skb(struct netchannel2 *nc,
- struct sk_buff *skb)
+static inline int receive_posted_skb(struct netchannel2 *nc,
+ struct sk_buff *skb,
+ struct skb_cb_overlay *sco)
+
{
- struct skb_cb_overlay *sco;
+ int ncopy;
+ void *va;
+ skb_frag_t *frag;
+
+ /* For posted buffers we may need to copy data from
+ first fragment to head. If the first fragment is
+ small it will be copied completely and it is
+ not attached as a skb frag. In that case the
+ frag info is passed through sco */
+ if (skb_headlen(skb) < POSTED_MIN_HEAD_SIZE) {
+ if (sco->rx_buf) {
+ ncopy = sco->rx_buf_size;
+ va = sco->rx_buf->buffer +
+ sco->rx_buf->offset;
+ /* Copy some extra bytes to make copy
+ aligned to cache line boundary */
+ memcpy(skb->tail - 16 - NET_IP_ALIGN,
+ va - 16 - NET_IP_ALIGN,
+ ncopy + 16 + NET_IP_ALIGN);
+ skb->tail += ncopy;
+ skb->len += ncopy;
+ skb->truesize += ncopy;
+ release_rx_buffer (nc, sco->rx_buf);
+ } else {
+ ncopy = POSTED_MIN_HEAD_SIZE -
+ skb_headlen(skb);
+ frag = skb_shinfo(skb)->frags;
+ va = page_address(frag->page) + frag->page_offset;
+ /* Copy some extra bytes to make copy
+ aligned to cache line boundary */
+ memcpy(skb->tail - 16 - NET_IP_ALIGN,
+ va - 16 - NET_IP_ALIGN,
+ ncopy + 16 + NET_IP_ALIGN);
+ skb->tail += ncopy;
+ skb->data_len -= ncopy;
+ frag->size -= ncopy;
+ frag->page_offset += ncopy;
+ }
+ }
+ skb->protocol = eth_type_trans(skb, skb->dev);
#ifdef CONFIG_XEN_NETDEV2_LRO
- int lro_used = 0;
+ lro_receive_skb(&nc->lro_mgr, skb, NULL);
+ return 1;
+#else
+ netif_receive_skb(skb);
+ return 0;
#endif
+}
+
+static inline int nc2_receive_skb(struct netchannel2 *nc,
+ struct sk_buff *skb)
+{
+ struct skb_cb_overlay *sco;
+
sco = get_skb_overlay(skb);
if (unlikely(sco->failed))
kfree_skb(skb);
else {
- skb->protocol = eth_type_trans(skb, skb->dev);
-#ifdef CONFIG_XEN_NETDEV2_LRO
- if (sco->is_posted) {
- lro_receive_skb(&nc->lro_mgr, skb, NULL);
- lro_used = 1;
- }
- else
+ if (sco->is_posted)
+ return receive_posted_skb(nc, skb, sco);
+ else {
+ skb->protocol = eth_type_trans(skb, skb->dev);
netif_receive_skb(skb);
- return lro_used;
-#else
- netif_receive_skb(skb);
-#endif
+ }
}
return 0;
while (!skb_queue_empty(&nc->pending_rx_queue)) {
next = __skb_dequeue(&nc->pending_rx_queue);
if (skb)
- lro_used = nc2_receive_skb(nc, skb);
+ lro_used |= nc2_receive_skb(nc, skb);
skb = next;
}
if (skb)
- lro_used = nc2_receive_skb(nc, skb);
+ lro_used |= nc2_receive_skb(nc, skb);
#ifdef CONFIG_XEN_NETDEV2_LRO
if (lro_used)