ia64/xen-unstable

changeset 100:d59292c8dcab

bitkeeper revision 1.18 (3e33c6cdMqnqQnkIxpq_9HHmWHAHfA)

Merge boulderdash.cl.cam.ac.uk:/usr/groups/xeno/BK/xeno
into boulderdash.cl.cam.ac.uk:/local/scratch/bd240/xeno
author bd240@boulderdash.cl.cam.ac.uk
date Sun Jan 26 11:30:21 2003 +0000 (2003-01-26)
parents 2ca9cc27dbec a8063692097a
children 0ecf87d4739a
files .rootkeys BitKeeper/etc/logging_ok xen-2.4.16/Makefile xen-2.4.16/arch/i386/mm.c xen-2.4.16/common/domain.c xen-2.4.16/common/domain_page.c xen-2.4.16/common/memory.c xen-2.4.16/common/page_alloc.c xen-2.4.16/common/slab.c xen-2.4.16/include/asm-i386/domain_page.h xen-2.4.16/include/asm-i386/page.h xen-2.4.16/include/xeno/slab.h xen-2.4.16/net/dev.c
line diff
     1.1 --- a/.rootkeys	Sun Jan 26 11:30:17 2003 +0000
     1.2 +++ b/.rootkeys	Sun Jan 26 11:30:21 2003 +0000
     1.3 @@ -39,6 +39,7 @@ 3ddb79bddEYJbcURvqqcx99Yl2iAhQ xen-2.4.1
     1.4  3ddb79bdrqnW93GR9gZk1OJe1qK-iQ xen-2.4.16/common/brlock.c
     1.5  3ddb79bdLX_P6iB7ILiblRLWvebapg xen-2.4.16/common/dom0_ops.c
     1.6  3ddb79bdYO5D8Av12NHqPeSviav7cg xen-2.4.16/common/domain.c
     1.7 +3e32af9aRnYGl4GMOaDKp7JdfhOGhg xen-2.4.16/common/domain_page.c
     1.8  3ddb79bdeyutmaXEfpQvvxj7eQ0fCw xen-2.4.16/common/event.c
     1.9  3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen-2.4.16/common/kernel.c
    1.10  3ddb79bduhSEZI8xa7IbGQCpap5y2A xen-2.4.16/common/lib.c
     2.1 --- a/BitKeeper/etc/logging_ok	Sun Jan 26 11:30:17 2003 +0000
     2.2 +++ b/BitKeeper/etc/logging_ok	Sun Jan 26 11:30:21 2003 +0000
     2.3 @@ -1,6 +1,7 @@
     2.4  akw27@boulderdash.cl.cam.ac.uk
     2.5  akw27@labyrinth.cl.cam.ac.uk
     2.6  bd240@boulderdash.cl.cam.ac.uk
     2.7 +iap10@labyrinth.cl.cam.ac.uk
     2.8  kaf24@labyrinth.cl.cam.ac.uk
     2.9  kaf24@plym.cl.cam.ac.uk
    2.10  kaf24@striker.cl.cam.ac.uk
     4.1 --- a/xen-2.4.16/arch/i386/mm.c	Sun Jan 26 11:30:17 2003 +0000
     4.2 +++ b/xen-2.4.16/arch/i386/mm.c	Sun Jan 26 11:30:21 2003 +0000
     4.3 @@ -102,7 +102,8 @@ long do_stack_and_ldt_switch(
     4.4  
     4.5      if ( ldts != current->mm.ldt_sel )
     4.6      {
     4.7 -        unsigned long *ptabent = GET_GDT_ADDRESS(current);
     4.8 +        unsigned long *ptabent;
     4.9 +        ptabent = (unsigned long *)GET_GDT_ADDRESS(current);
    4.10          /* Out of range for GDT table? */
    4.11          if ( (ldts * 8) > GET_GDT_ENTRIES(current) ) return -1;
    4.12          ptabent += ldts * 2; /* 8 bytes per desc == 2 * unsigned long */
     5.1 --- a/xen-2.4.16/common/domain.c	Sun Jan 26 11:30:17 2003 +0000
     5.2 +++ b/xen-2.4.16/common/domain.c	Sun Jan 26 11:30:21 2003 +0000
     5.3 @@ -554,7 +554,7 @@ int setup_guestos(struct task_struct *p,
     5.4      unsigned int ft_size = 0;
     5.5      start_info_t  *virt_startinfo_address;
     5.6      unsigned long long time;
     5.7 -    l2_pgentry_t *l2tab;
     5.8 +    l2_pgentry_t *l2tab, *l2start;
     5.9      l1_pgentry_t *l1tab = NULL;
    5.10      struct pfn_info *page = NULL;
    5.11      net_ring_t *net_ring;
    5.12 @@ -609,7 +609,7 @@ int setup_guestos(struct task_struct *p,
    5.13       * filled in by now !!
    5.14       */
    5.15      phys_l2tab = ALLOC_FRAME_FROM_DOMAIN();
    5.16 -    l2tab = map_domain_mem(phys_l2tab);
    5.17 +    l2start = l2tab = map_domain_mem(phys_l2tab);
    5.18      memcpy(l2tab, idle_pg_table[p->processor], PAGE_SIZE);
    5.19      l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
    5.20          mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR);
    5.21 @@ -630,17 +630,16 @@ int setup_guestos(struct task_struct *p,
    5.22      if(dom == 0)
    5.23          ft_size = frame_table_size; 
    5.24  
    5.25 -    phys_l2tab += l2_table_offset(virt_load_address)*sizeof(l2_pgentry_t);    
    5.26 +    l2tab += l2_table_offset(virt_load_address);
    5.27      for ( cur_address  = start_address;
    5.28            cur_address != (end_address + PAGE_SIZE + ft_size);
    5.29            cur_address += PAGE_SIZE )
    5.30      {
    5.31          if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
    5.32          {
    5.33 +            if ( l1tab != NULL ) unmap_domain_mem(l1tab-1);
    5.34              phys_l1tab = ALLOC_FRAME_FROM_DOMAIN();
    5.35 -            l2tab = map_domain_mem(phys_l2tab);
    5.36 -            *l2tab = mk_l2_pgentry(phys_l1tab|L2_PROT);
    5.37 -            phys_l2tab += sizeof(l2_pgentry_t);
    5.38 +            *l2tab++ = mk_l2_pgentry(phys_l1tab|L2_PROT);
    5.39              l1tab = map_domain_mem(phys_l1tab);
    5.40              clear_page(l1tab);
    5.41              l1tab += l1_table_offset(
    5.42 @@ -656,43 +655,39 @@ int setup_guestos(struct task_struct *p,
    5.43              page->type_count = page->tot_count = 1;
    5.44          }
    5.45      }
    5.46 +    unmap_domain_mem(l1tab-1);
    5.47      
    5.48      /* Pages that are part of page tables must be read-only. */
    5.49      vaddr = virt_load_address + alloc_address - start_address;
    5.50 -    phys_l2tab = pagetable_val(p->mm.pagetable) +
    5.51 -        (l2_table_offset(vaddr) * sizeof(l2_pgentry_t));
    5.52 -    l2tab = map_domain_mem(phys_l2tab);
    5.53 -    phys_l1tab = l2_pgentry_to_phys(*l2tab) +
    5.54 -        (l1_table_offset(vaddr) * sizeof(l1_pgentry_t));
    5.55 -    phys_l2tab += sizeof(l2_pgentry_t);
    5.56 -    l1tab = map_domain_mem(phys_l1tab);
    5.57 +    l2tab = l2start + l2_table_offset(vaddr);
    5.58 +    l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
    5.59 +    l1tab += l1_table_offset(vaddr);
    5.60 +    l2tab++;
    5.61      for ( cur_address  = alloc_address;
    5.62            cur_address != end_address;
    5.63            cur_address += PAGE_SIZE )
    5.64      {
    5.65 -        *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
    5.66          if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
    5.67          {
    5.68 -            l2tab = map_domain_mem(phys_l2tab);
    5.69 -            phys_l1tab = l2_pgentry_to_phys(*l2tab);
    5.70 -            phys_l2tab += sizeof(l2_pgentry_t);
    5.71 -            l1tab = map_domain_mem(phys_l1tab);
    5.72 +            unmap_domain_mem(l1tab-1);
    5.73 +            l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
    5.74 +            l2tab++;
    5.75          }
    5.76 +        *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
    5.77          page = frame_table + (cur_address >> PAGE_SHIFT);
    5.78          page->flags = dom | PGT_l1_page_table;
    5.79          page->tot_count++;
    5.80      }
    5.81 +    unmap_domain_mem(l1tab-1);
    5.82      page->flags = dom | PGT_l2_page_table;
    5.83  
    5.84      /* Map in the the shared info structure. */
    5.85      virt_shinfo_address = end_address - start_address + virt_load_address;
    5.86 -    phys_l2tab = pagetable_val(p->mm.pagetable) +
    5.87 -        (l2_table_offset(virt_shinfo_address) * sizeof(l2_pgentry_t));
    5.88 -    l2tab = map_domain_mem(phys_l2tab);
    5.89 -    phys_l1tab = l2_pgentry_to_phys(*l2tab) +
    5.90 -        (l1_table_offset(virt_shinfo_address) * sizeof(l1_pgentry_t));
    5.91 -    l1tab = map_domain_mem(phys_l1tab);
    5.92 +    l2tab = l2start + l2_table_offset(virt_shinfo_address);
    5.93 +    l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
    5.94 +    l1tab += l1_table_offset(virt_shinfo_address);
    5.95      *l1tab = mk_l1_pgentry(__pa(p->shared_info)|L1_PROT);
    5.96 +    unmap_domain_mem(l1tab);
    5.97  
    5.98      /* Set up shared info area. */
    5.99      rdtscll(time);
   5.100 @@ -709,13 +704,11 @@ int setup_guestos(struct task_struct *p,
   5.101              cur_address < virt_ftable_end_addr;
   5.102              cur_address += PAGE_SIZE)
   5.103          {
   5.104 -            phys_l2tab = pagetable_val(p->mm.pagetable) +
   5.105 -                (l2_table_offset(cur_address) * sizeof(l2_pgentry_t));
   5.106 -            l2tab = map_domain_mem(phys_l2tab);
   5.107 -            phys_l1tab = l2_pgentry_to_phys(*l2tab) + 
   5.108 -                (l1_table_offset(cur_address) * sizeof(l1_pgentry_t)); 
   5.109 -            l1tab = map_domain_mem(phys_l1tab);
   5.110 +            l2tab = l2start + l2_table_offset(cur_address);
   5.111 +            l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
   5.112 +            l1tab += l1_table_offset(cur_address);
   5.113              *l1tab = mk_l1_pgentry(__pa(ft_mapping)|L1_PROT);
   5.114 +            unmap_domain_mem(l1tab);
   5.115              ft_mapping += PAGE_SIZE;
   5.116          }
   5.117      }
   5.118 @@ -724,6 +717,8 @@ int setup_guestos(struct task_struct *p,
   5.119          (alloc_address - start_address - PAGE_SIZE + virt_load_address);
   5.120      virt_stack_address  = (unsigned long)virt_startinfo_address;
   5.121  
   5.122 +    unmap_domain_mem(l2start);
   5.123 +
   5.124      /* Install the new page tables. */
   5.125      __cli();
   5.126      __asm__ __volatile__ (
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen-2.4.16/common/domain_page.c	Sun Jan 26 11:30:21 2003 +0000
     6.3 @@ -0,0 +1,67 @@
     6.4 +/******************************************************************************
     6.5 + * domain_page.h
     6.6 + * 
     6.7 + * Allow temporary mapping of domain pages. Based on ideas from the
     6.8 + * Linux PKMAP code -- the copyrights and credits are retained below.
     6.9 + */
    6.10 +
    6.11 +/*
    6.12 + * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
    6.13 + *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de *
    6.14 + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
    6.15 + */
    6.16 +
    6.17 +#include <xeno/config.h>
    6.18 +#include <xeno/sched.h>
    6.19 +#include <xeno/mm.h>
    6.20 +#include <asm/domain_page.h>
    6.21 +#include <asm/pgalloc.h>
    6.22 +
    6.23 +static unsigned int map_idx[NR_CPUS];
    6.24 +
    6.25 +/* Use a spare PTE bit to mark entries ready for recycling. */
    6.26 +#define READY_FOR_TLB_FLUSH (1<<10)
    6.27 +
    6.28 +static void flush_all_ready_maps(void)
    6.29 +{
    6.30 +    unsigned long *cache = mapcache[smp_processor_id()];
    6.31 +
    6.32 +    /* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */
    6.33 +    do { if ( (*cache & READY_FOR_TLB_FLUSH) ) *cache = 0; }
    6.34 +    while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 );
    6.35 +
    6.36 +    local_flush_tlb();
    6.37 +}
    6.38 +
    6.39 +
    6.40 +void *map_domain_mem(unsigned long pa)
    6.41 +{
    6.42 +    unsigned long va;
    6.43 +    int cpu = smp_processor_id();
    6.44 +    unsigned int idx;
    6.45 +    unsigned long *cache = mapcache[cpu];
    6.46 +    unsigned long flags;
    6.47 +
    6.48 +    local_irq_save(flags);
    6.49 +
    6.50 +    for ( ; ; )
    6.51 +    {
    6.52 +        idx = map_idx[cpu] = (map_idx[cpu] + 1) & (MAPCACHE_ENTRIES - 1);
    6.53 +        if ( idx == 0 ) flush_all_ready_maps();
    6.54 +        if ( cache[idx] == 0 ) break;
    6.55 +    }
    6.56 +
    6.57 +    cache[idx] = (pa & PAGE_MASK) | PAGE_HYPERVISOR;
    6.58 +
    6.59 +    local_irq_restore(flags);
    6.60 +
    6.61 +    va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK);
    6.62 +    return (void *)va;
    6.63 +}
    6.64 +
    6.65 +void unmap_domain_mem(void *va)
    6.66 +{
    6.67 +    unsigned int idx;
    6.68 +    idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
    6.69 +    mapcache[smp_processor_id()][idx] |= READY_FOR_TLB_FLUSH;
    6.70 +}
     7.1 --- a/xen-2.4.16/common/memory.c	Sun Jan 26 11:30:17 2003 +0000
     7.2 +++ b/xen-2.4.16/common/memory.c	Sun Jan 26 11:30:21 2003 +0000
     7.3 @@ -181,13 +181,17 @@
     7.4  #define MEM_LOG(_f, _a...) ((void)0)
     7.5  #endif
     7.6  
     7.7 +/* Domain 0 is allowed to submit requests on behalf of others. */
     7.8 +#define DOMAIN_OKAY(_f) \
     7.9 +    ((((_f) & PG_domain_mask) == current->domain) || (current->domain == 0))
    7.10 +
    7.11  /* 'get' checks parameter for validity before inc'ing refcnt. */
    7.12  static int get_l2_table(unsigned long page_nr);
    7.13  static int get_l1_table(unsigned long page_nr);
    7.14  static int get_page(unsigned long page_nr, int writeable);
    7.15  static int inc_page_refcnt(unsigned long page_nr, unsigned int type);
    7.16  /* 'put' does no checking because if refcnt not zero, entity must be valid. */
    7.17 -static int  put_l2_table(unsigned long page_nr);
    7.18 +static void put_l2_table(unsigned long page_nr);
    7.19  static void put_l1_table(unsigned long page_nr);
    7.20  static void put_page(unsigned long page_nr, int writeable);
    7.21  static int dec_page_refcnt(unsigned long page_nr, unsigned int type);
    7.22 @@ -247,14 +251,14 @@ static int inc_page_refcnt(unsigned long
    7.23      if ( page_nr >= max_page )
    7.24      {
    7.25          MEM_LOG("Page out of range (%08lx>%08lx)", page_nr, max_page);
    7.26 -        return(-1);
    7.27 +        return -1;
    7.28      }
    7.29      page = frame_table + page_nr;
    7.30      flags = page->flags;
    7.31 -    if ( (flags & PG_domain_mask) != current->domain )
    7.32 +    if ( !DOMAIN_OKAY(flags) )
    7.33      {
    7.34          MEM_LOG("Bad page domain (%ld)", flags & PG_domain_mask);
    7.35 -        return(-1);
    7.36 +        return -1;
    7.37      }
    7.38      if ( (flags & PG_type_mask) != type )
    7.39      {
    7.40 @@ -263,13 +267,13 @@ static int inc_page_refcnt(unsigned long
    7.41              MEM_LOG("Page %08lx bad type/count (%08lx!=%08x) cnt=%ld",
    7.42                      page_nr << PAGE_SHIFT,
    7.43                      flags & PG_type_mask, type, page_type_count(page));
    7.44 -            return(-1);
    7.45 +            return -1;
    7.46          }
    7.47          page->flags |= type;
    7.48      }
    7.49  
    7.50      get_page_tot(page);
    7.51 -    return(get_page_type(page));
    7.52 +    return get_page_type(page);
    7.53  }
    7.54  
    7.55  /* Return new refcnt, or -1 on error. */
    7.56 @@ -281,21 +285,46 @@ static int dec_page_refcnt(unsigned long
    7.57      if ( page_nr >= max_page )
    7.58      {
    7.59          MEM_LOG("Page out of range (%08lx>%08lx)", page_nr, max_page);
    7.60 -        return(-1);
    7.61 +        return -1;
    7.62      }
    7.63      page = frame_table + page_nr;
    7.64 -    if ( (page->flags & (PG_type_mask | PG_domain_mask)) != 
    7.65 -         (type | current->domain) ) 
    7.66 +    if ( !DOMAIN_OKAY(page->flags) || 
    7.67 +         ((page->flags & PG_type_mask) != type) ) 
    7.68      {
    7.69          MEM_LOG("Bad page type/domain (dom=%ld) (type %ld != expected %d)",
    7.70                  page->flags & PG_domain_mask, page->flags & PG_type_mask,
    7.71                  type);
    7.72 -        return(-1);
    7.73 +        return -1;
    7.74      }
    7.75      ASSERT(page_type_count(page) != 0);
    7.76      if ( (ret = put_page_type(page)) == 0 ) page->flags &= ~PG_type_mask;
    7.77      put_page_tot(page);
    7.78 -    return(ret);
    7.79 +    return ret;
    7.80 +}
    7.81 +
    7.82 +
    7.83 +/* We allow a L2 table to map itself, to achieve a linear pagetable. */
    7.84 +/* NB. There's no need for a put_twisted_l2_table() function!! */
    7.85 +static int get_twisted_l2_table(unsigned long entry_pfn, l2_pgentry_t l2e)
    7.86 +{
    7.87 +    unsigned long l2v = l2_pgentry_val(l2e);
    7.88 +
    7.89 +    /* Clearly the mapping must be read-only :-) */
    7.90 +    if ( (l2v & _PAGE_RW) )
    7.91 +    {
    7.92 +        MEM_LOG("Attempt to install twisted L2 entry with write permissions");
    7.93 +        return -1;
    7.94 +    }
    7.95 +
    7.96 +    /* This is a sufficient final check. */
    7.97 +    if ( (l2v >> PAGE_SHIFT) != entry_pfn )
    7.98 +    {
    7.99 +        MEM_LOG("L2 tables may not map _other_ L2 tables!\n");
   7.100 +        return -1;
   7.101 +    }
   7.102 +    
   7.103 +    /* We don't bump the reference counts. */
   7.104 +    return 0;
   7.105  }
   7.106  
   7.107  
   7.108 @@ -305,7 +334,7 @@ static int get_l2_table(unsigned long pa
   7.109      int i, ret=0;
   7.110      
   7.111      ret = inc_page_refcnt(page_nr, PGT_l2_page_table);
   7.112 -    if ( ret != 0 ) return((ret < 0) ? ret : 0);
   7.113 +    if ( ret != 0 ) return (ret < 0) ? ret : 0;
   7.114      
   7.115      /* NEW level-2 page table! Deal with every PDE in the table. */
   7.116      p_l2_entry = map_domain_mem(page_nr << PAGE_SHIFT);
   7.117 @@ -317,12 +346,13 @@ static int get_l2_table(unsigned long pa
   7.118          {
   7.119              MEM_LOG("Bad L2 page type settings %04lx",
   7.120                      l2_pgentry_val(l2_entry) & (_PAGE_GLOBAL|_PAGE_PSE));
   7.121 -            return(-1);
   7.122 +            ret = -1;
   7.123 +            goto out;
   7.124          }
   7.125 +        /* Assume we're mapping an L1 table, falling back to twisted L2. */
   7.126          ret = get_l1_table(l2_pgentry_to_pagenr(l2_entry));
   7.127 -        if ( ret ) return(ret);
   7.128 -        p_l2_entry = map_domain_mem((page_nr << PAGE_SHIFT) + 
   7.129 -                                    ((i+1) * sizeof(l2_pgentry_t)));
   7.130 +        if ( ret ) ret = get_twisted_l2_table(page_nr, l2_entry);
   7.131 +        if ( ret ) goto out;
   7.132      }
   7.133  
   7.134      /* Now we simply slap in our high mapping. */
   7.135 @@ -333,7 +363,9 @@ static int get_l2_table(unsigned long pa
   7.136                DOMAIN_ENTRIES_PER_L2_PAGETABLE] =
   7.137          mk_l2_pgentry(__pa(current->mm.perdomain_pt) | __PAGE_HYPERVISOR);
   7.138  
   7.139 -    return(ret);
   7.140 + out:
   7.141 +    unmap_domain_mem(p_l2_entry);
   7.142 +    return ret;
   7.143  }
   7.144  
   7.145  static int get_l1_table(unsigned long page_nr)
   7.146 @@ -343,7 +375,7 @@ static int get_l1_table(unsigned long pa
   7.147  
   7.148      /* Update ref count for page pointed at by PDE. */
   7.149      ret = inc_page_refcnt(page_nr, PGT_l1_page_table);
   7.150 -    if ( ret != 0 ) return((ret < 0) ? ret : 0);
   7.151 +    if ( ret != 0 ) return (ret < 0) ? ret : 0;
   7.152  
   7.153      /* NEW level-1 page table! Deal with every PTE in the table. */
   7.154      p_l1_entry = map_domain_mem(page_nr << PAGE_SHIFT);
   7.155 @@ -357,14 +389,18 @@ static int get_l1_table(unsigned long pa
   7.156              MEM_LOG("Bad L1 page type settings %04lx",
   7.157                      l1_pgentry_val(l1_entry) &
   7.158                      (_PAGE_GLOBAL|_PAGE_PAT));
   7.159 -            return(-1);
   7.160 +            ret = -1;
   7.161 +            goto out;
   7.162          }
   7.163          ret = get_page(l1_pgentry_to_pagenr(l1_entry),
   7.164                         l1_pgentry_val(l1_entry) & _PAGE_RW);
   7.165 -        if ( ret ) return(ret);
   7.166 +        if ( ret ) goto out;
   7.167      }
   7.168  
   7.169 -    return(ret);
   7.170 + out:
   7.171 +    /* Make sure we unmap the right page! */
   7.172 +    unmap_domain_mem(p_l1_entry-1);
   7.173 +    return ret;
   7.174  }
   7.175  
   7.176  static int get_page(unsigned long page_nr, int writeable)
   7.177 @@ -380,7 +416,7 @@ static int get_page(unsigned long page_n
   7.178      }
   7.179      page = frame_table + page_nr;
   7.180      flags = page->flags;
   7.181 -    if ( (flags & PG_domain_mask) != current->domain )
   7.182 +    if ( !DOMAIN_OKAY(flags) )
   7.183      {
   7.184          MEM_LOG("Bad page domain (%ld)", flags & PG_domain_mask);
   7.185          return(-1);
   7.186 @@ -407,28 +443,23 @@ static int get_page(unsigned long page_n
   7.187      return(0);
   7.188  }
   7.189  
   7.190 -static int put_l2_table(unsigned long page_nr)
   7.191 +static void put_l2_table(unsigned long page_nr)
   7.192  {
   7.193      l2_pgentry_t *p_l2_entry, l2_entry;
   7.194 -    int i, ret;
   7.195 +    int i;
   7.196  
   7.197 -    ret = dec_page_refcnt(page_nr, PGT_l2_page_table);
   7.198 -    if ( ret != 0 ) return((ret < 0) ? ret : 0);
   7.199 +    if ( dec_page_refcnt(page_nr, PGT_l2_page_table) ) return;
   7.200  
   7.201      /* We had last reference to level-2 page table. Free the PDEs. */
   7.202      p_l2_entry = map_domain_mem(page_nr << PAGE_SHIFT);
   7.203 -    for ( i = 0; i < HYPERVISOR_ENTRIES_PER_L2_PAGETABLE; i++ )
   7.204 +    for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   7.205      {
   7.206          l2_entry = *p_l2_entry++;
   7.207          if ( (l2_pgentry_val(l2_entry) & _PAGE_PRESENT) )
   7.208 -        { 
   7.209              put_l1_table(l2_pgentry_to_pagenr(l2_entry));
   7.210 -            p_l2_entry = map_domain_mem((page_nr << PAGE_SHIFT) + 
   7.211 -                                        ((i+1) * sizeof(l2_pgentry_t)));
   7.212 -        }
   7.213      }
   7.214  
   7.215 -    return(0);
   7.216 +    unmap_domain_mem(p_l2_entry);
   7.217  }
   7.218  
   7.219  static void put_l1_table(unsigned long page_nr)
   7.220 @@ -436,7 +467,7 @@ static void put_l1_table(unsigned long p
   7.221      l1_pgentry_t *p_l1_entry, l1_entry;
   7.222      int i;
   7.223  
   7.224 -    if ( dec_page_refcnt(page_nr, PGT_l1_page_table) != 0 ) return;
   7.225 +    if ( dec_page_refcnt(page_nr, PGT_l1_page_table) ) return;
   7.226  
   7.227      /* We had last reference to level-1 page table. Free the PTEs. */
   7.228      p_l1_entry = map_domain_mem(page_nr << PAGE_SHIFT);
   7.229 @@ -449,6 +480,9 @@ static void put_l1_table(unsigned long p
   7.230                       l1_pgentry_val(l1_entry) & _PAGE_RW);
   7.231          }
   7.232      }
   7.233 +
   7.234 +    /* Make sure we unmap the right page! */
   7.235 +    unmap_domain_mem(p_l1_entry-1);
   7.236  }
   7.237  
   7.238  static void put_page(unsigned long page_nr, int writeable)
   7.239 @@ -456,7 +490,7 @@ static void put_page(unsigned long page_
   7.240      struct pfn_info *page;
   7.241      ASSERT(page_nr < max_page);
   7.242      page = frame_table + page_nr;
   7.243 -    ASSERT((page->flags & PG_domain_mask) == current->domain);
   7.244 +    ASSERT(DOMAIN_OKAY(page->flags));
   7.245      ASSERT((!writeable) || 
   7.246             ((page_type_count(page) != 0) && 
   7.247              ((page->flags & PG_type_mask) == PGT_writeable_page)));
   7.248 @@ -484,12 +518,6 @@ static int mod_l2_entry(unsigned long pa
   7.249          goto fail;
   7.250      }
   7.251  
   7.252 -    /*
   7.253 -     * Write the new value while pointer is still valid. The mapping cache 
   7.254 -     * entry for p_l2_entry may get clobbered by {put,get}_l1_table.
   7.255 -     */
   7.256 -    *p_l2_entry = new_l2_entry;
   7.257 -
   7.258      if ( (l2_pgentry_val(new_l2_entry) & _PAGE_PRESENT) )
   7.259      {
   7.260          if ( (l2_pgentry_val(new_l2_entry) & (_PAGE_GLOBAL|_PAGE_PSE)) )
   7.261 @@ -508,7 +536,9 @@ static int mod_l2_entry(unsigned long pa
   7.262                  put_l1_table(l2_pgentry_to_pagenr(old_l2_entry));
   7.263              }
   7.264              
   7.265 -            if ( get_l1_table(l2_pgentry_to_pagenr(new_l2_entry)) )
   7.266 +            /* Assume we're mapping an L1 table, falling back to twisted L2. */
   7.267 +            if ( get_l1_table(l2_pgentry_to_pagenr(new_l2_entry)) &&
   7.268 +                 get_twisted_l2_table(pa >> PAGE_SHIFT, new_l2_entry) )
   7.269                  goto fail;
   7.270          } 
   7.271      }
   7.272 @@ -517,16 +547,13 @@ static int mod_l2_entry(unsigned long pa
   7.273          put_l1_table(l2_pgentry_to_pagenr(old_l2_entry));
   7.274      }
   7.275      
   7.276 -    return(0);
   7.277 +    *p_l2_entry = new_l2_entry;
   7.278 +    unmap_domain_mem(p_l2_entry);
   7.279 +    return 0;
   7.280  
   7.281   fail:
   7.282 -    /*
   7.283 -     * On failure we put the old value back. We need to regrab the
   7.284 -     * mapping of the physical page frame.
   7.285 -     */
   7.286 -    p_l2_entry = map_domain_mem(pa);
   7.287 -    *p_l2_entry = old_l2_entry;
   7.288 -    return(-1);
   7.289 +    unmap_domain_mem(p_l2_entry);
   7.290 +    return -1;
   7.291  }
   7.292  
   7.293  
   7.294 @@ -571,12 +598,13 @@ static int mod_l1_entry(unsigned long pa
   7.295                   l1_pgentry_val(old_l1_entry) & _PAGE_RW);
   7.296      }
   7.297  
   7.298 -    /* p_l1_entry is still valid here */
   7.299      *p_l1_entry = new_l1_entry;
   7.300 +    unmap_domain_mem(p_l1_entry);
   7.301 +    return 0;
   7.302  
   7.303 -    return(0);
   7.304   fail:
   7.305 -    return(-1);
   7.306 +    unmap_domain_mem(p_l1_entry);
   7.307 +    return -1;
   7.308  }
   7.309  
   7.310  
   7.311 @@ -614,7 +642,7 @@ static int do_extended_command(unsigned 
   7.312          break;
   7.313  
   7.314      case PGEXT_UNPIN_TABLE:
   7.315 -        if ( (page->flags & PG_domain_mask) != current->domain )
   7.316 +        if ( !DOMAIN_OKAY(page->flags) )
   7.317          {
   7.318              err = 1;
   7.319              MEM_LOG("Page %08lx bad domain (dom=%ld)",
   7.320 @@ -700,7 +728,7 @@ int do_process_page_updates(page_update_
   7.321          case PGREQ_NORMAL:
   7.322              page = frame_table + pfn;
   7.323              flags = page->flags;
   7.324 -            if ( (flags & PG_domain_mask) == current->domain )
   7.325 +            if ( DOMAIN_OKAY(flags) )
   7.326              {
   7.327                  switch ( (flags & PG_type_mask) )
   7.328                  {
   7.329 @@ -730,8 +758,9 @@ int do_process_page_updates(page_update_
   7.330              flags = page->flags;
   7.331              if ( (flags | current->domain) == PGT_l1_page_table )
   7.332              {
   7.333 -                
   7.334 -                *(unsigned long *)map_domain_mem(cur.ptr) = cur.val;
   7.335 +                unsigned long *va = map_domain_mem(cur.ptr);
   7.336 +                *va = cur.val;
   7.337 +                unmap_domain_mem(va);
   7.338                  err = 0;
   7.339              }
   7.340              else
     8.1 --- a/xen-2.4.16/common/page_alloc.c	Sun Jan 26 11:30:17 2003 +0000
     8.2 +++ b/xen-2.4.16/common/page_alloc.c	Sun Jan 26 11:30:21 2003 +0000
     8.3 @@ -12,7 +12,7 @@
     8.4  #include <xeno/lib.h>
     8.5  #include <asm/page.h>
     8.6  #include <xeno/spinlock.h>
     8.7 -
     8.8 +#include <xeno/slab.h>
     8.9  
    8.10  static spinlock_t alloc_lock = SPIN_LOCK_UNLOCKED;
    8.11  
    8.12 @@ -102,7 +102,7 @@ struct chunk_tail_st {
    8.13  
    8.14  /* Linked lists of free chunks of different powers-of-two in size. */
    8.15  #define FREELIST_SIZE ((sizeof(void*)<<3)-PAGE_SHIFT)
    8.16 -static chunk_head_t *free_list[FREELIST_SIZE];
    8.17 +static chunk_head_t *free_head[FREELIST_SIZE];
    8.18  static chunk_head_t  free_tail[FREELIST_SIZE];
    8.19  #define FREELIST_EMPTY(_l) ((_l)->next == NULL)
    8.20  
    8.21 @@ -120,8 +120,8 @@ void __init init_page_allocator(unsigned
    8.22  
    8.23      for ( i = 0; i < FREELIST_SIZE; i++ )
    8.24      {
    8.25 -        free_list[i]       = &free_tail[i];
    8.26 -        free_tail[i].pprev = &free_list[i];
    8.27 +        free_head[i]       = &free_tail[i];
    8.28 +        free_tail[i].pprev = &free_head[i];
    8.29          free_tail[i].next  = NULL;
    8.30      }
    8.31  
    8.32 @@ -159,10 +159,10 @@ void __init init_page_allocator(unsigned
    8.33          ct = (chunk_tail_t *)min-1;
    8.34          i -= PAGE_SHIFT;
    8.35          ch->level       = i;
    8.36 -        ch->next        = free_list[i];
    8.37 -        ch->pprev       = &free_list[i];
    8.38 +        ch->next        = free_head[i];
    8.39 +        ch->pprev       = &free_head[i];
    8.40          ch->next->pprev = &ch->next;
    8.41 -        free_list[i]    = ch;
    8.42 +        free_head[i]    = ch;
    8.43          ct->level       = i;
    8.44      }
    8.45  }
    8.46 @@ -171,29 +171,26 @@ void __init init_page_allocator(unsigned
    8.47  /* Allocate 2^@order contiguous pages. */
    8.48  unsigned long __get_free_pages(int mask, int order)
    8.49  {
    8.50 -    int i;
    8.51 +    int i, attempts = 0;
    8.52      chunk_head_t *alloc_ch, *spare_ch;
    8.53      chunk_tail_t            *spare_ct;
    8.54      unsigned long           flags;
    8.55  
    8.56 +retry:
    8.57      spin_lock_irqsave(&alloc_lock, flags);
    8.58  
    8.59  
    8.60      /* Find smallest order which can satisfy the request. */
    8.61      for ( i = order; i < FREELIST_SIZE; i++ ) {
    8.62 -	if ( !FREELIST_EMPTY(free_list[i]) ) 
    8.63 +	if ( !FREELIST_EMPTY(free_head[i]) ) 
    8.64  	    break;
    8.65      }
    8.66  
    8.67 -    if ( i == FREELIST_SIZE )
    8.68 -    {
    8.69 -        printk("Cannot handle page request order %d!\n", order);
    8.70 -	return 0;
    8.71 -    }
    8.72 +    if ( i == FREELIST_SIZE ) goto no_memory;
    8.73   
    8.74      /* Unlink a chunk. */
    8.75 -    alloc_ch = free_list[i];
    8.76 -    free_list[i] = alloc_ch->next;
    8.77 +    alloc_ch = free_head[i];
    8.78 +    free_head[i] = alloc_ch->next;
    8.79      alloc_ch->next->pprev = alloc_ch->pprev;
    8.80  
    8.81      /* We may have to break the chunk a number of times. */
    8.82 @@ -206,13 +203,13 @@ unsigned long __get_free_pages(int mask,
    8.83  
    8.84          /* Create new header for spare chunk. */
    8.85          spare_ch->level = i;
    8.86 -        spare_ch->next  = free_list[i];
    8.87 -        spare_ch->pprev = &free_list[i];
    8.88 +        spare_ch->next  = free_head[i];
    8.89 +        spare_ch->pprev = &free_head[i];
    8.90          spare_ct->level = i;
    8.91  
    8.92          /* Link in the spare chunk. */
    8.93          spare_ch->next->pprev = &spare_ch->next;
    8.94 -        free_list[i] = spare_ch;
    8.95 +        free_head[i] = spare_ch;
    8.96      }
    8.97      
    8.98      map_alloc(__pa(alloc_ch)>>PAGE_SHIFT, 1<<order);
    8.99 @@ -220,6 +217,19 @@ unsigned long __get_free_pages(int mask,
   8.100      spin_unlock_irqrestore(&alloc_lock, flags);
   8.101  
   8.102      return((unsigned long)alloc_ch);
   8.103 +
   8.104 + no_memory:
   8.105 +    if ( attempts++ < 8 )
   8.106 +    {
   8.107 +        spin_unlock_irqrestore(&alloc_lock, flags);
   8.108 +        kmem_cache_reap(0);
   8.109 +        goto retry;
   8.110 +    }
   8.111 +
   8.112 +    printk("Cannot handle page request order %d!\n", order);
   8.113 +    dump_slabinfo();
   8.114 +
   8.115 +    return 0;
   8.116  }
   8.117  
   8.118  
   8.119 @@ -269,10 +279,10 @@ void __free_pages(unsigned long p, int o
   8.120      ct = (chunk_tail_t *)(p+size)-1;
   8.121      ct->level = order;
   8.122      ch->level = order;
   8.123 -    ch->pprev = &free_list[order];
   8.124 -    ch->next  = free_list[order];
   8.125 +    ch->pprev = &free_head[order];
   8.126 +    ch->next  = free_head[order];
   8.127      ch->next->pprev = &ch->next;
   8.128 -    free_list[order] = ch;
   8.129 +    free_head[order] = ch;
   8.130  
   8.131      spin_unlock_irqrestore(&alloc_lock, flags);
   8.132  }
     9.1 --- a/xen-2.4.16/common/slab.c	Sun Jan 26 11:30:17 2003 +0000
     9.2 +++ b/xen-2.4.16/common/slab.c	Sun Jan 26 11:30:21 2003 +0000
     9.3 @@ -1837,3 +1837,109 @@ out:
     9.4  	return ret;
     9.5  }
     9.6  
     9.7 +void dump_slabinfo()
     9.8 +{
     9.9 +	struct list_head *p;
    9.10 +        unsigned long spin_flags;
    9.11 +
    9.12 +	/* Output format version, so at least we can change it without _too_
    9.13 +	 * many complaints.
    9.14 +	 */
    9.15 +	printk( "slabinfo - version: 1.1"
    9.16 +#if STATS
    9.17 +				" (statistics)"
    9.18 +#endif
    9.19 +#ifdef CONFIG_SMP
    9.20 +				" (SMP)"
    9.21 +#endif
    9.22 +				"\n");
    9.23 +	down(&cache_chain_sem);
    9.24 +	p = &cache_cache.next;
    9.25 +	do {
    9.26 +		kmem_cache_t	*cachep;
    9.27 +		struct list_head *q;
    9.28 +		slab_t		*slabp;
    9.29 +		unsigned long	active_objs;
    9.30 +		unsigned long	num_objs;
    9.31 +		unsigned long	active_slabs = 0;
    9.32 +		unsigned long	num_slabs;
    9.33 +		cachep = list_entry(p, kmem_cache_t, next);
    9.34 +
    9.35 +		spin_lock_irq(&cachep->spinlock);
    9.36 +		active_objs = 0;
    9.37 +		num_slabs = 0;
    9.38 +		list_for_each(q,&cachep->slabs_full) {
    9.39 +			slabp = list_entry(q, slab_t, list);
    9.40 +			if (slabp->inuse != cachep->num)
    9.41 +				BUG();
    9.42 +			active_objs += cachep->num;
    9.43 +			active_slabs++;
    9.44 +		}
    9.45 +		list_for_each(q,&cachep->slabs_partial) {
    9.46 +			slabp = list_entry(q, slab_t, list);
    9.47 +			if (slabp->inuse == cachep->num || !slabp->inuse)
    9.48 +				BUG();
    9.49 +			active_objs += slabp->inuse;
    9.50 +			active_slabs++;
    9.51 +		}
    9.52 +		list_for_each(q,&cachep->slabs_free) {
    9.53 +			slabp = list_entry(q, slab_t, list);
    9.54 +			if (slabp->inuse)
    9.55 +				BUG();
    9.56 +			num_slabs++;
    9.57 +		}
    9.58 +		num_slabs+=active_slabs;
    9.59 +		num_objs = num_slabs*cachep->num;
    9.60 +
    9.61 +		printk("%-17s %6lu %6lu %6u %4lu %4lu %4u",
    9.62 +			cachep->name, active_objs, num_objs, cachep->objsize,
    9.63 +			active_slabs, num_slabs, (1<<cachep->gfporder));
    9.64 +
    9.65 +#if STATS
    9.66 +		{
    9.67 +			unsigned long errors = cachep->errors;
    9.68 +			unsigned long high = cachep->high_mark;
    9.69 +			unsigned long grown = cachep->grown;
    9.70 +			unsigned long reaped = cachep->reaped;
    9.71 +			unsigned long allocs = cachep->num_allocations;
    9.72 +
    9.73 +			printk(" : %6lu %7lu %5lu %4lu %4lu",
    9.74 +					high, allocs, grown, reaped, errors);
    9.75 +		}
    9.76 +#endif
    9.77 +#ifdef CONFIG_SMP
    9.78 +		{
    9.79 +			unsigned int batchcount = cachep->batchcount;
    9.80 +			unsigned int limit;
    9.81 +
    9.82 +			if (cc_data(cachep))
    9.83 +				limit = cc_data(cachep)->limit;
    9.84 +			 else
    9.85 +				limit = 0;
    9.86 +			printk(" : %4u %4u",
    9.87 +					limit, batchcount);
    9.88 +		}
    9.89 +#endif
    9.90 +#if STATS && defined(CONFIG_SMP)
    9.91 +		{
    9.92 +			unsigned long allochit = atomic_read(&cachep->allochit);
    9.93 +			unsigned long allocmiss = atomic_read(&cachep->allocmiss);
    9.94 +			unsigned long freehit = atomic_read(&cachep->freehit);
    9.95 +			unsigned long freemiss = atomic_read(&cachep->freemiss);
    9.96 +			printk(" : %6lu %6lu %6lu %6lu",
    9.97 +					allochit, allocmiss, freehit, freemiss);
    9.98 +		}
    9.99 +#endif
   9.100 +		printk("\n");
   9.101 +		spin_unlock_irq(&cachep->spinlock);
   9.102 +
   9.103 +		p = cachep->next.next;
   9.104 +	} while (p != &cache_cache.next);
   9.105 +
   9.106 +	up(&cache_chain_sem);
   9.107 +
   9.108 +	return;
   9.109 +}
   9.110 +
   9.111 +
   9.112 +
    10.1 --- a/xen-2.4.16/include/asm-i386/domain_page.h	Sun Jan 26 11:30:17 2003 +0000
    10.2 +++ b/xen-2.4.16/include/asm-i386/domain_page.h	Sun Jan 26 11:30:21 2003 +0000
    10.3 @@ -9,6 +9,21 @@
    10.4  
    10.5  extern unsigned long *mapcache[NR_CPUS];
    10.6  #define MAPCACHE_ENTRIES        1024
    10.7 +
    10.8 +/*
    10.9 + * Maps a given physical address, returning corresponding virtual address.
   10.10 + * The entire page containing that VA is now accessible until a 
   10.11 + * corresponding call to unmap_domain_mem().
   10.12 + */
   10.13 +extern void *map_domain_mem(unsigned long pa);
   10.14 +
   10.15 +/*
   10.16 + * Pass a VA within a page previously mapped with map_domain_mem().
   10.17 + * That page will then be removed from the mapping lists.
   10.18 + */
   10.19 +extern void unmap_domain_mem(void *va);
   10.20 +
   10.21 +#if 0
   10.22  #define MAPCACHE_HASH(_pfn)     ((_pfn) & (MAPCACHE_ENTRIES-1))
   10.23  static inline void *map_domain_mem(unsigned long pa)
   10.24  {
   10.25 @@ -25,3 +40,4 @@ static inline void *map_domain_mem(unsig
   10.26      }
   10.27      return va;
   10.28  }
   10.29 +#endif
    11.1 --- a/xen-2.4.16/include/asm-i386/page.h	Sun Jan 26 11:30:17 2003 +0000
    11.2 +++ b/xen-2.4.16/include/asm-i386/page.h	Sun Jan 26 11:30:21 2003 +0000
    11.3 @@ -123,8 +123,8 @@ extern void paging_init(void);
    11.4  			: "memory");					\
    11.5  	} while (0)
    11.6  
    11.7 -#define __flush_tlb_one(addr) \
    11.8 -__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
    11.9 +#define __flush_tlb_one(__addr) \
   11.10 +__asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
   11.11  
   11.12  #endif /* !__ASSEMBLY__ */
   11.13  
    12.1 --- a/xen-2.4.16/include/xeno/slab.h	Sun Jan 26 11:30:17 2003 +0000
    12.2 +++ b/xen-2.4.16/include/xeno/slab.h	Sun Jan 26 11:30:21 2003 +0000
    12.3 @@ -59,6 +59,9 @@ extern void *kmalloc(size_t, int);
    12.4  extern void kfree(const void *);
    12.5  
    12.6  extern int FASTCALL(kmem_cache_reap(int));
    12.7 +
    12.8 +extern void dump_slabinfo();
    12.9 +
   12.10  #if 0
   12.11  extern int slabinfo_read_proc(char *page, char **start, off_t off,
   12.12  				 int count, int *eof, void *data);
    13.1 --- a/xen-2.4.16/net/dev.c	Sun Jan 26 11:30:17 2003 +0000
    13.2 +++ b/xen-2.4.16/net/dev.c	Sun Jan 26 11:30:21 2003 +0000
    13.3 @@ -690,6 +690,7 @@ int netif_rx(struct sk_buff *skb)
    13.4  	int this_cpu = smp_processor_id();
    13.5  	struct softnet_data *queue;
    13.6  	unsigned long flags;
    13.7 +        net_vif_t *vif;
    13.8  
    13.9  	if (skb->stamp.tv_sec == 0)
   13.10  		get_fast_time(&skb->stamp);
   13.11 @@ -703,13 +704,13 @@ int netif_rx(struct sk_buff *skb)
   13.12          
   13.13  	netdev_rx_stat[this_cpu].total++;
   13.14  
   13.15 -        if (skb->src_vif == VIF_UNKNOWN_INTERFACE)
   13.16 +        if ( skb->src_vif == VIF_UNKNOWN_INTERFACE )
   13.17              skb->src_vif = VIF_PHYSICAL_INTERFACE;
   13.18  
   13.19 -        if (skb->dst_vif == VIF_UNKNOWN_INTERFACE)
   13.20 +        if ( skb->dst_vif == VIF_UNKNOWN_INTERFACE )
   13.21              net_get_target_vif(skb);
   13.22          
   13.23 -        if (sys_vif_list[skb->dst_vif] == NULL)
   13.24 +        if ( (vif = sys_vif_list[skb->dst_vif]) == NULL )
   13.25          {
   13.26              // the target vif does not exist.
   13.27              goto drop;
   13.28 @@ -730,8 +731,9 @@ int netif_rx(struct sk_buff *skb)
   13.29              read_lock(&tasklist_lock);
   13.30              p = &idle0_task;
   13.31              do {
   13.32 -                if ( p->domain != sys_vif_list[skb->dst_vif]->domain ) continue;
   13.33 -                skb_queue_tail(&sys_vif_list[skb->dst_vif]->skb_list, skb);
   13.34 +                if ( p->domain != vif->domain ) continue;
   13.35 +                if ( vif->skb_list.qlen > 100 ) break;
   13.36 +                skb_queue_tail(&vif->skb_list, skb);
   13.37                  cpu_mask = mark_hyp_event(p, _HYP_EVENT_NET_RX);
   13.38                  read_unlock(&tasklist_lock);
   13.39                  goto found;
   13.40 @@ -1975,21 +1977,17 @@ long do_net_update(void)
   13.41  
   13.42              if ( skb != NULL )
   13.43              {
   13.44 -                skb_get(skb); /* get a reference for non-local delivery */
   13.45                  skb->protocol = eth_type_trans(skb, skb->dev);
   13.46                  skb->src_vif = current_vif->id; 
   13.47                  net_get_target_vif(skb);
   13.48                  if ( skb->dst_vif > VIF_PHYSICAL_INTERFACE )
   13.49                  {
   13.50 -                    if (netif_rx(skb) == 0)
   13.51 -                        /* Give up non-local reference. Packet delivered locally. */
   13.52 -                        kfree_skb(skb);
   13.53 +                    (void)netif_rx(skb);
   13.54                  }
   13.55                  else if ( skb->dst_vif == VIF_PHYSICAL_INTERFACE )
   13.56                  {
   13.57 -
   13.58 -                        skb_push(skb, skb->dev->hard_header_len);
   13.59 -                        dev_queue_xmit(skb);
   13.60 +                    skb_push(skb, skb->dev->hard_header_len);
   13.61 +                    dev_queue_xmit(skb);
   13.62                  } 
   13.63                  else
   13.64                  {