ia64/xen-unstable

changeset 2498:35090d6a8da4

bitkeeper revision 1.1159.79.3 (414c2251mzKL-aQm0trJwzhIuFOv8Q)

Stricter skbuff checking in net backend driver. Ensure skbs to be transferred
to other domains are allocated out of our secure cache.
author kaf24@freefall.cl.cam.ac.uk
date Sat Sep 18 11:56:01 2004 +0000 (2004-09-18)
parents de382cfbeb83
children 5e2937aebf2c
files linux-2.6.8.1-xen-sparse/arch/xen/kernel/skbuff.c linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c
line diff
     1.1 --- a/linux-2.6.8.1-xen-sparse/arch/xen/kernel/skbuff.c	Sat Sep 18 10:43:00 2004 +0000
     1.2 +++ b/linux-2.6.8.1-xen-sparse/arch/xen/kernel/skbuff.c	Sat Sep 18 11:56:01 2004 +0000
     1.3 @@ -17,7 +17,8 @@
     1.4  
     1.5  EXPORT_SYMBOL(__dev_alloc_skb);
     1.6  
     1.7 -static kmem_cache_t *skbuff_cachep;
     1.8 +/* Referenced in netback.c. */
     1.9 +/*static*/ kmem_cache_t *skbuff_cachep;
    1.10  
    1.11  struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
    1.12  {
     2.1 --- a/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c	Sat Sep 18 10:43:00 2004 +0000
     2.2 +++ b/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c	Sat Sep 18 11:56:01 2004 +0000
     2.3 @@ -108,6 +108,21 @@ static inline void maybe_schedule_tx_act
     2.4          tasklet_schedule(&net_tx_tasklet);
     2.5  }
     2.6  
     2.7 +/*
     2.8 + * A gross way of confirming the origin of an skb data page. The slab
     2.9 + * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
    2.10 + */
    2.11 +static inline int is_xen_skb(struct sk_buff *skb)
    2.12 +{
    2.13 +    extern kmem_cache_t *skbuff_cachep;
    2.14 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
    2.15 +    kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
    2.16 +#else
    2.17 +    kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next;
    2.18 +#endif
    2.19 +    return (cp == skbuff_cachep);
    2.20 +}
    2.21 +
    2.22  int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
    2.23  {
    2.24      netif_t *netif = (netif_t *)dev->priv;
    2.25 @@ -122,15 +137,12 @@ int netif_be_start_xmit(struct sk_buff *
    2.26      /*
    2.27       * We do not copy the packet unless:
    2.28       *  1. The data is shared; or
    2.29 -     *  2. It spans a page boundary; or
    2.30 -     *  3. We cannot be sure the whole data page is allocated.
    2.31 +     *  2. The data is not allocated from our special cache.
    2.32       * The copying method is taken from skb_copy().
    2.33       * NB. We also couldn't cope with fragmented packets, but we won't get
    2.34       *     any because we not advertise the NETIF_F_SG feature.
    2.35       */
    2.36 -    if ( skb_shared(skb) || skb_cloned(skb) || 
    2.37 -         (((unsigned long)skb->end ^ (unsigned long)skb->head) & PAGE_MASK) ||
    2.38 -         ((skb->end - skb->head) < (PAGE_SIZE/2)) )
    2.39 +    if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
    2.40      {
    2.41          struct sk_buff *nskb = dev_alloc_skb(PAGE_SIZE);
    2.42          int hlen = skb->data - skb->head;