]> xenbits.xensource.com Git - people/ssmith/nc2-2.6.27.bak/.git/commitdiff
Check in world state. master nc2/forklift-over-patchqueue
authorSteven Smith <ssmith@weybridge.uk.xensource.com>
Fri, 29 May 2009 12:06:42 +0000 (13:06 +0100)
committerSteven Smith <ssmith@weybridge.uk.xensource.com>
Fri, 29 May 2009 12:06:42 +0000 (13:06 +0100)
drivers/xen/netchannel2/bypass.c
drivers/xen/netchannel2/netchannel2_core.h
drivers/xen/netchannel2/rscb.c
drivers/xen/netchannel2/vmq.c
drivers/xen/netchannel2/xmit_packet.c

index 5e3e4452d15490b2a114121a481c782882be6873..e4ebef77a2ea97acf15d5f0e74574adfbcad9924 100644 (file)
@@ -59,7 +59,7 @@ int bypass_xmit_packet(struct netchannel2 *nc,
         }
 
         skb_co->policy = policy;
-        msg_size = get_transmitted_packet_msg_size(skb);
+        msg_size = get_transmitted_packet_reserve_size(skb);
         if (!nc2_reserve_payload_bytes(&rings->prod_ring, msg_size)) {
                 /* Uh oh. */
                 printk("<0>Drop no reserve.\n");
index 4a6fd36d452e578bf173e4777a83b965d3933695..f8f88c826348d36158a8cc9ae31c860626f711b1 100644 (file)
@@ -984,10 +984,13 @@ int prepare_xmit_allocate_small(struct netchannel2_ring_pair *ncrp,
 int prepare_xmit_allocate_grant(struct netchannel2_ring_pair *ncrp,
                                 struct sk_buff *skb,
                                 int use_subpage_grants);
+int prepare_xmit_allocate_grant2(struct netchannel2_ring_pair *ncrp,
+                                struct sk_buff *skb,
+                                int use_subpage_grants);
 void xmit_grant(struct netchannel2_ring_pair *ncrp,
-                struct sk_buff *skb,
-                int use_subpage_grants,
-                volatile void *msg);
+               struct sk_buff *skb,
+               int use_subpage_grants,
+               volatile void *msg);
 int prepare_xmit_allocate_post(struct netchannel2 *nc,
                                struct sk_buff *skb);
 void xmit_post(struct netchannel2 *nc,
@@ -1038,7 +1041,7 @@ void _nc2_attach_rings(struct netchannel2_ring_pair *ncrp,
                        domid_t otherend_id);
 void queue_packet_to_interface(struct sk_buff *skb,
                                struct netchannel2_ring_pair *ncrp);
-unsigned get_transmitted_packet_msg_size(struct sk_buff *skb);
+unsigned get_transmitted_packet_reserve_size(struct sk_buff *skb);
 void init_ring_pair(struct netchannel2_ring_pair *ncrp);
 
 irqreturn_t nc2_int(int irq, void *dev_id);
index 985e4760ad748cb14383c89ba8211664e6d652af..cb2da71e0509f37ab6b1eaa1f6ca49fc208cc772 100644 (file)
@@ -262,6 +262,27 @@ int prepare_xmit_allocate_grant(struct netchannel2_ring_pair *ncrp,
                                 int use_subpage_grants)
 {
         struct skb_cb_overlay *skb_co = get_skb_overlay(skb);
+
+        if (allocate_txp_slot(ncrp, skb) < 0)
+                return -1;
+
+        if (use_subpage_grants)
+                skb_co->type = NC2_PACKET_TYPE_receiver_copy;
+        else
+                skb_co->type = NC2_PACKET_TYPE_receiver_map;
+
+        skb_co->inline_prefix_size = skb_headlen(skb);
+       if (skb_co->inline_prefix_size > PACKET_PREFIX_SIZE)
+               skb_co->inline_prefix_size = PACKET_PREFIX_SIZE;
+
+        return 0;
+}
+
+int prepare_xmit_allocate_grant2(struct netchannel2_ring_pair *ncrp,
+                                struct sk_buff *skb,
+                                int use_subpage_grants)
+{
+        struct skb_cb_overlay *skb_co = get_skb_overlay(skb);
         unsigned nr_fragments;
         struct sk_buff *cur_skb;
         grant_ref_t gref_pool;
@@ -269,12 +290,7 @@ int prepare_xmit_allocate_grant(struct netchannel2_ring_pair *ncrp,
         unsigned inline_bytes_left;
         unsigned inline_prefix_size;
 
-        if (allocate_txp_slot(ncrp, skb) < 0)
-                return -1;
-
-        inline_prefix_size = PACKET_PREFIX_SIZE;
-        if (skb_headlen(skb) < inline_prefix_size)
-                inline_prefix_size = skb_headlen(skb);
+        inline_prefix_size = skb_co->inline_prefix_size;
 
         if (skb_co->nr_fragments == 0) {
                 inline_bytes_left = inline_prefix_size;
@@ -319,15 +335,10 @@ int prepare_xmit_allocate_grant(struct netchannel2_ring_pair *ncrp,
                 release_txp_slot(ncrp, skb);
                 /* Leave skb_co->nr_fragments set, so that we don't
                    have to recompute it next time around. */
+               printk("<0>Oh no, no grant references.\n");
                 return -1;
         }
         skb_co->gref_pool = gref_pool;
-        skb_co->inline_prefix_size = inline_prefix_size;
-
-        if (use_subpage_grants)
-                skb_co->type = NC2_PACKET_TYPE_receiver_copy;
-        else
-                skb_co->type = NC2_PACKET_TYPE_receiver_map;
 
         return 0;
 }
@@ -417,9 +428,9 @@ static int grant_data_area(struct netchannel2_ring_pair *ncrp,
 }
 
 void xmit_grant(struct netchannel2_ring_pair *ncrp,
-                struct sk_buff *skb,
-                int use_subpage_grants,
-                volatile void *msg_buf)
+               struct sk_buff *skb,
+               int use_subpage_grants,
+               volatile void *msg_buf)
 {
         volatile struct netchannel2_msg_packet *msg = msg_buf;
         struct skb_cb_overlay *skb_co = get_skb_overlay(skb);
index b98d910e4504a5766d3c0a385a3ddcd59771d9d9..d2eb3f1b0980e736433c2ae995e831ea53752922 100644 (file)
@@ -524,7 +524,7 @@ static int prepare_xmit_allocate_vmq(struct netchannel2 *nc,
 {
         unsigned msg_size;
 
-        msg_size = get_transmitted_packet_msg_size(skb);
+        msg_size = get_transmitted_packet_reserve_size(skb);
         if (!nc2_reserve_payload_bytes(&nc->rings.prod_ring, msg_size))
                 return -1;
         return 0;
index faaa75f8d1de08f5b6bcdceb71acb1514633997a..730553d7e685932e61d0c631b883a8ce8d16a1b2 100644 (file)
@@ -44,7 +44,7 @@ int prepare_xmit_allocate_small(struct netchannel2_ring_pair *ncrp,
 
 
 /* Figure out how much space @tp will take up on the ring. */
-unsigned get_transmitted_packet_msg_size(struct sk_buff *skb)
+static unsigned get_transmitted_packet_msg_size(struct sk_buff *skb)
 {
         struct skb_cb_overlay *skb_co = get_skb_overlay(skb);
         return (sizeof(struct netchannel2_msg_packet) +
@@ -52,6 +52,19 @@ unsigned get_transmitted_packet_msg_size(struct sk_buff *skb)
                 skb_co->inline_prefix_size + 7) & ~7;
 }
 
+/* How much space do we want to reserve on the ring for this packet?
+   This is an approximation of how much space it'll actually take up
+   which doesn't require us to walk over much memory, so that we can
+   stop the interface reasonably quickly when we know the ring is
+   going to be full. */
+unsigned get_transmitted_packet_reserve_size(struct sk_buff *skb)
+{
+        struct skb_cb_overlay *skb_co = get_skb_overlay(skb);
+        return (sizeof(struct netchannel2_msg_packet) +
+                sizeof(struct netchannel2_fragment) * skb_shinfo(skb)->nr_frags +
+                skb_co->inline_prefix_size + 7) & ~7;
+}
+
 /* Do the minimum amount of work to be certain that when we come to
    transmit this packet we won't run out of resources.  This includes
    figuring out how we're going to fragment the packet for
@@ -96,7 +109,7 @@ int prepare_xmit_allocate_resources(struct netchannel2 *nc,
                 skb_co->policy = policy;
         }
 
-        msg_size = get_transmitted_packet_msg_size(skb);
+        msg_size = get_transmitted_packet_reserve_size(skb);
         if (nc2_reserve_payload_bytes(&nc->rings.prod_ring, msg_size))
                 return 0;
 
@@ -144,14 +157,36 @@ void nc2_really_start_xmit(struct netchannel2_ring_pair *ncrp,
         unsigned msg_size;
         volatile struct netchannel2_msg_packet *msg;
         unsigned nr_credits;
+       int r;
 
         ENTER();
 
-        msg_size = get_transmitted_packet_msg_size(skb);
+        msg_size = get_transmitted_packet_reserve_size(skb);
         /* Un-reserve the space we reserved for the packet. */
         BUG_ON(ncrp->prod_ring.reserve < msg_size);
         ncrp->prod_ring.reserve -= msg_size;
+
+        switch (skb_co->policy) {
+        case transmit_policy_grant:
+                r = prepare_xmit_allocate_grant2(ncrp, skb, 1);
+                break;
+        case transmit_policy_map:
+                r = prepare_xmit_allocate_grant2(ncrp, skb, 0);
+                break;
+        default:
+               r = 0;
+               break;
+        }
+
+       if (r < 0) {
+               /* Oops, out of grant references.  Nevermind. */
+               printk("<0>Out of grefs.\n");
+               goto requeue;
+       }
+
+        msg_size = get_transmitted_packet_msg_size(skb);
         if (!nc2_can_send_payload_bytes(&ncrp->prod_ring, msg_size)) {
+       requeue:
                 /* Aw, crud.  We had to transmit a PAD message at just
                    the wrong time, and our attempt to reserve ring
                    space failed.  Back all the way back out of
@@ -221,13 +256,13 @@ void nc2_really_start_xmit(struct netchannel2_ring_pair *ncrp,
                 BUG();
         }
 
-        ncrp->prod_ring.prod_pvt += msg_size;
+       ncrp->prod_ring.prod_pvt += msg_size;
 
-        BUG_ON(ncrp->prod_ring.bytes_available < msg_size);
+       BUG_ON(ncrp->prod_ring.bytes_available < msg_size);
 
-        ncrp->prod_ring.bytes_available -= msg_size;
+       ncrp->prod_ring.bytes_available -= msg_size;
 
-        ncrp->pending_time_sensitive_messages = 1;
+       ncrp->pending_time_sensitive_messages = 1;
 
         if (skb_co->tp) {
                 ncrp->expected_finish_messages++;