ia64/xen-unstable

changeset 2528:157757c292d8

bitkeeper revision 1.1159.83.4 (4153e859Tp0c82bCwgrj-LS6BXue2g)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-nbsd
author cl349@freefall.cl.cam.ac.uk
date Fri Sep 24 09:26:49 2004 +0000 (2004-09-24)
parents d2d465977f58 28ffe4d8f416
children 6dde6c567e14 7d618b439da4
files linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c
line diff
     1.1 --- a/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c	Thu Sep 23 19:17:16 2004 +0000
     1.2 +++ b/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c	Fri Sep 24 09:26:49 2004 +0000
     1.3 @@ -13,6 +13,7 @@
     1.4  #include "common.h"
     1.5  
     1.6  static void netif_page_release(struct page *page);
     1.7 +static void netif_skb_release(struct sk_buff *skb);
     1.8  static void make_tx_response(netif_t *netif, 
     1.9                               u16      id,
    1.10                               s8       st);
    1.11 @@ -40,7 +41,8 @@ static unsigned char rx_notify[NR_EVENT_
    1.12  static unsigned long mmap_vstart;
    1.13  #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
    1.14  
    1.15 -#define PKT_PROT_LEN (ETH_HLEN + 20)
    1.16 +#define PKT_MIN_LEN (ETH_HLEN + 20)
    1.17 +#define PKT_PROT_LEN 64
    1.18  
    1.19  static struct {
    1.20      netif_tx_request_t req;
    1.21 @@ -385,6 +387,7 @@ static void net_tx_action(unsigned long 
    1.22      NETIF_RING_IDX i;
    1.23      multicall_entry_t *mcl;
    1.24      PEND_RING_IDX dc, dp;
    1.25 +    unsigned int data_len;
    1.26  
    1.27      if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
    1.28          goto skip_dealloc;
    1.29 @@ -497,7 +500,7 @@ static void net_tx_action(unsigned long 
    1.30  
    1.31          netif_schedule_work(netif);
    1.32  
    1.33 -        if ( unlikely(txreq.size <= PKT_PROT_LEN) || 
    1.34 +        if ( unlikely(txreq.size <= PKT_MIN_LEN) || 
    1.35               unlikely(txreq.size > ETH_FRAME_LEN) )
    1.36          {
    1.37              DPRINTK("Bad packet size: %d\n", txreq.size);
    1.38 @@ -519,7 +522,9 @@ static void net_tx_action(unsigned long 
    1.39  
    1.40          pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
    1.41  
    1.42 -        if ( unlikely((skb = alloc_skb(PKT_PROT_LEN+16, GFP_ATOMIC)) == NULL) )
    1.43 +        data_len = txreq.size > PKT_PROT_LEN ? PKT_PROT_LEN : txreq.size;
    1.44 +
    1.45 +        if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
    1.46          {
    1.47              DPRINTK("Can't allocate a skb in start_xmit.\n");
    1.48              make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
    1.49 @@ -578,19 +583,28 @@ static void net_tx_action(unsigned long 
    1.50          phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
    1.51              FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
    1.52  
    1.53 -        __skb_put(skb, PKT_PROT_LEN);
    1.54 +        data_len = txreq.size > PKT_PROT_LEN ? PKT_PROT_LEN : txreq.size;
    1.55 +
    1.56 +        __skb_put(skb, data_len);
    1.57          memcpy(skb->data, 
    1.58                 (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
    1.59 -               PKT_PROT_LEN);
    1.60 +               data_len);
    1.61  
    1.62 -        /* Append the packet payload as a fragment. */
    1.63 -        skb_shinfo(skb)->frags[0].page        = 
    1.64 -            virt_to_page(MMAP_VADDR(pending_idx));
    1.65 -        skb_shinfo(skb)->frags[0].size        = txreq.size - PKT_PROT_LEN;
    1.66 -        skb_shinfo(skb)->frags[0].page_offset = 
    1.67 -            (txreq.addr + PKT_PROT_LEN) & ~PAGE_MASK;
    1.68 -        skb_shinfo(skb)->nr_frags = 1;
    1.69 -        skb->data_len  = txreq.size - PKT_PROT_LEN;
    1.70 +        if (data_len < txreq.size) {
    1.71 +            /* Append the packet payload as a fragment. */
    1.72 +            skb_shinfo(skb)->frags[0].page        = 
    1.73 +                virt_to_page(MMAP_VADDR(pending_idx));
    1.74 +            skb_shinfo(skb)->frags[0].size        = txreq.size - data_len;
    1.75 +            skb_shinfo(skb)->frags[0].page_offset = 
    1.76 +                (txreq.addr + data_len) & ~PAGE_MASK;
    1.77 +            skb_shinfo(skb)->nr_frags = 1;
    1.78 +        } else {
    1.79 +            skb_shinfo(skb)->frags[0].page        = 
    1.80 +                virt_to_page(MMAP_VADDR(pending_idx));
    1.81 +            skb->destructor = netif_skb_release;
    1.82 +        }
    1.83 +
    1.84 +        skb->data_len  = txreq.size - data_len;
    1.85          skb->len      += skb->data_len;
    1.86  
    1.87          skb->dev      = netif->dev;
    1.88 @@ -606,13 +620,9 @@ static void net_tx_action(unsigned long 
    1.89      }
    1.90  }
    1.91  
    1.92 -static void netif_page_release(struct page *page)
    1.93 +static void netif_idx_release(u16 pending_idx)
    1.94  {
    1.95      unsigned long flags;
    1.96 -    u16 pending_idx = page - virt_to_page(mmap_vstart);
    1.97 -
    1.98 -    /* Ready for next use. */
    1.99 -    set_page_count(page, 1);
   1.100  
   1.101      spin_lock_irqsave(&dealloc_lock, flags);
   1.102      dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
   1.103 @@ -621,6 +631,24 @@ static void netif_page_release(struct pa
   1.104      tasklet_schedule(&net_tx_tasklet);
   1.105  }
   1.106  
   1.107 +static void netif_page_release(struct page *page)
   1.108 +{
   1.109 +    u16 pending_idx = page - virt_to_page(mmap_vstart);
   1.110 +
   1.111 +    /* Ready for next use. */
   1.112 +    set_page_count(page, 1);
   1.113 +
   1.114 +    netif_idx_release(pending_idx);
   1.115 +}
   1.116 +
   1.117 +static void netif_skb_release(struct sk_buff *skb)
   1.118 +{
   1.119 +    struct page *page = skb_shinfo(skb)->frags[0].page;
   1.120 +    u16 pending_idx = page - virt_to_page(mmap_vstart);
   1.121 +
   1.122 +    netif_idx_release(pending_idx);
   1.123 +}
   1.124 +
   1.125  #if 0
   1.126  long flush_bufs_for_netif(netif_t *netif)
   1.127  {