}
skb_co->policy = policy;
- msg_size = get_transmitted_packet_msg_size(skb);
+ msg_size = get_transmitted_packet_reserve_size(skb);
if (!nc2_reserve_payload_bytes(&rings->prod_ring, msg_size)) {
/* Uh oh. */
printk("<0>Drop no reserve.\n");
int prepare_xmit_allocate_grant(struct netchannel2_ring_pair *ncrp,
struct sk_buff *skb,
int use_subpage_grants);
+int prepare_xmit_allocate_grant2(struct netchannel2_ring_pair *ncrp,
+ struct sk_buff *skb,
+ int use_subpage_grants);
void xmit_grant(struct netchannel2_ring_pair *ncrp,
- struct sk_buff *skb,
- int use_subpage_grants,
- volatile void *msg);
+ struct sk_buff *skb,
+ int use_subpage_grants,
+ volatile void *msg);
int prepare_xmit_allocate_post(struct netchannel2 *nc,
struct sk_buff *skb);
void xmit_post(struct netchannel2 *nc,
domid_t otherend_id);
void queue_packet_to_interface(struct sk_buff *skb,
struct netchannel2_ring_pair *ncrp);
-unsigned get_transmitted_packet_msg_size(struct sk_buff *skb);
+unsigned get_transmitted_packet_reserve_size(struct sk_buff *skb);
void init_ring_pair(struct netchannel2_ring_pair *ncrp);
irqreturn_t nc2_int(int irq, void *dev_id);
int use_subpage_grants)
{
struct skb_cb_overlay *skb_co = get_skb_overlay(skb);
+
+ if (allocate_txp_slot(ncrp, skb) < 0)
+ return -1;
+
+ if (use_subpage_grants)
+ skb_co->type = NC2_PACKET_TYPE_receiver_copy;
+ else
+ skb_co->type = NC2_PACKET_TYPE_receiver_map;
+
+ skb_co->inline_prefix_size = skb_headlen(skb);
+ if (skb_co->inline_prefix_size > PACKET_PREFIX_SIZE)
+ skb_co->inline_prefix_size = PACKET_PREFIX_SIZE;
+
+ return 0;
+}
+
+int prepare_xmit_allocate_grant2(struct netchannel2_ring_pair *ncrp,
+ struct sk_buff *skb,
+ int use_subpage_grants)
+{
+ struct skb_cb_overlay *skb_co = get_skb_overlay(skb);
unsigned nr_fragments;
struct sk_buff *cur_skb;
grant_ref_t gref_pool;
unsigned inline_bytes_left;
unsigned inline_prefix_size;
- if (allocate_txp_slot(ncrp, skb) < 0)
- return -1;
-
- inline_prefix_size = PACKET_PREFIX_SIZE;
- if (skb_headlen(skb) < inline_prefix_size)
- inline_prefix_size = skb_headlen(skb);
+ inline_prefix_size = skb_co->inline_prefix_size;
if (skb_co->nr_fragments == 0) {
inline_bytes_left = inline_prefix_size;
release_txp_slot(ncrp, skb);
/* Leave skb_co->nr_fragments set, so that we don't
have to recompute it next time around. */
+ printk("<0>Oh no, no grant references.\n");
return -1;
}
skb_co->gref_pool = gref_pool;
- skb_co->inline_prefix_size = inline_prefix_size;
-
- if (use_subpage_grants)
- skb_co->type = NC2_PACKET_TYPE_receiver_copy;
- else
- skb_co->type = NC2_PACKET_TYPE_receiver_map;
return 0;
}
}
void xmit_grant(struct netchannel2_ring_pair *ncrp,
- struct sk_buff *skb,
- int use_subpage_grants,
- volatile void *msg_buf)
+ struct sk_buff *skb,
+ int use_subpage_grants,
+ volatile void *msg_buf)
{
volatile struct netchannel2_msg_packet *msg = msg_buf;
struct skb_cb_overlay *skb_co = get_skb_overlay(skb);
{
unsigned msg_size;
- msg_size = get_transmitted_packet_msg_size(skb);
+ msg_size = get_transmitted_packet_reserve_size(skb);
if (!nc2_reserve_payload_bytes(&nc->rings.prod_ring, msg_size))
return -1;
return 0;
/* Figure out how much space @tp will take up on the ring. */
-unsigned get_transmitted_packet_msg_size(struct sk_buff *skb)
+static unsigned get_transmitted_packet_msg_size(struct sk_buff *skb)
{
struct skb_cb_overlay *skb_co = get_skb_overlay(skb);
return (sizeof(struct netchannel2_msg_packet) +
skb_co->inline_prefix_size + 7) & ~7;
}
+/* How much space do we want to reserve on the ring for this packet?
+ This is an approximation of how much space it'll actually take up
+ which doesn't require us to walk over much memory, so that we can
+ stop the interface reasonably quickly when we know the ring is
+ going to be full. */
+unsigned get_transmitted_packet_reserve_size(struct sk_buff *skb)
+{
+ struct skb_cb_overlay *skb_co = get_skb_overlay(skb);
+ return (sizeof(struct netchannel2_msg_packet) +
+ sizeof(struct netchannel2_fragment) * skb_shinfo(skb)->nr_frags +
+ skb_co->inline_prefix_size + 7) & ~7;
+}
+
/* Do the minimum amount of work to be certain that when we come to
transmit this packet we won't run out of resources. This includes
figuring out how we're going to fragment the packet for
skb_co->policy = policy;
}
- msg_size = get_transmitted_packet_msg_size(skb);
+ msg_size = get_transmitted_packet_reserve_size(skb);
if (nc2_reserve_payload_bytes(&nc->rings.prod_ring, msg_size))
return 0;
unsigned msg_size;
volatile struct netchannel2_msg_packet *msg;
unsigned nr_credits;
+ int r;
ENTER();
- msg_size = get_transmitted_packet_msg_size(skb);
+ msg_size = get_transmitted_packet_reserve_size(skb);
/* Un-reserve the space we reserved for the packet. */
BUG_ON(ncrp->prod_ring.reserve < msg_size);
ncrp->prod_ring.reserve -= msg_size;
+
+ switch (skb_co->policy) {
+ case transmit_policy_grant:
+ r = prepare_xmit_allocate_grant2(ncrp, skb, 1);
+ break;
+ case transmit_policy_map:
+ r = prepare_xmit_allocate_grant2(ncrp, skb, 0);
+ break;
+ default:
+ r = 0;
+ break;
+ }
+
+ if (r < 0) {
+ /* Oops, out of grant references. Nevermind. */
+ printk("<0>Out of grefs.\n");
+ goto requeue;
+ }
+
+ msg_size = get_transmitted_packet_msg_size(skb);
if (!nc2_can_send_payload_bytes(&ncrp->prod_ring, msg_size)) {
+ requeue:
/* Aw, crud. We had to transmit a PAD message at just
the wrong time, and our attempt to reserve ring
space failed. Back all the way back out of
BUG();
}
- ncrp->prod_ring.prod_pvt += msg_size;
+ ncrp->prod_ring.prod_pvt += msg_size;
- BUG_ON(ncrp->prod_ring.bytes_available < msg_size);
+ BUG_ON(ncrp->prod_ring.bytes_available < msg_size);
- ncrp->prod_ring.bytes_available -= msg_size;
+ ncrp->prod_ring.bytes_available -= msg_size;
- ncrp->pending_time_sensitive_messages = 1;
+ ncrp->pending_time_sensitive_messages = 1;
if (skb_co->tp) {
ncrp->expected_finish_messages++;