ia64/xen-unstable

changeset 1543:7d2b7e6dad4c

bitkeeper revision 1.1000 (40d85bb06kipcJda-qkxK3evII99Lg)

Merge ssh://xenbk@gandalf.hpl.hp.com//var/bk/xeno-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
author iap10@labyrinth.cl.cam.ac.uk
date Tue Jun 22 16:17:52 2004 +0000 (2004-06-22)
parents 4206a81e8dca e5ddcc390f7c
children aeb5e98fd252 7c46576a93c1
files tools/xc/lib/xc_domain.c tools/xc/py/Xc.c xen/common/dom_mem_ops.c xen/common/domain.c xen/common/event_channel.c xen/common/keyhandler.c xen/common/memory.c xen/drivers/char/console.c xen/include/asm-x86/spinlock.h xen/include/xen/mm.h xen/include/xen/sched.h xen/include/xen/spinlock.h
line diff
     1.1 --- a/tools/xc/lib/xc_domain.c	Tue Jun 22 13:52:03 2004 +0000
     1.2 +++ b/tools/xc/lib/xc_domain.c	Tue Jun 22 16:17:52 2004 +0000
     1.3 @@ -92,12 +92,12 @@ int xc_domain_getinfo(int xc_handle,
     1.4          info->cpu     =
     1.5              (op.u.getdomaininfo.flags>>DOMFLAGS_CPUSHIFT) & DOMFLAGS_CPUMASK;
     1.6  
     1.7 -        info->dying    = (op.u.getdomaininfo.flags & DOMFLAGS_DYING);
     1.8 -        info->crashed  = (op.u.getdomaininfo.flags & DOMFLAGS_CRASHED);
     1.9 -        info->shutdown = (op.u.getdomaininfo.flags & DOMFLAGS_SHUTDOWN);
    1.10 -        info->paused   = (op.u.getdomaininfo.flags & DOMFLAGS_PAUSED);
    1.11 -        info->blocked  = (op.u.getdomaininfo.flags & DOMFLAGS_BLOCKED);
    1.12 -        info->running  = (op.u.getdomaininfo.flags & DOMFLAGS_RUNNING);
    1.13 +        info->dying    = !!(op.u.getdomaininfo.flags & DOMFLAGS_DYING);
    1.14 +        info->crashed  = !!(op.u.getdomaininfo.flags & DOMFLAGS_CRASHED);
    1.15 +        info->shutdown = !!(op.u.getdomaininfo.flags & DOMFLAGS_SHUTDOWN);
    1.16 +        info->paused   = !!(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED);
    1.17 +        info->blocked  = !!(op.u.getdomaininfo.flags & DOMFLAGS_BLOCKED);
    1.18 +        info->running  = !!(op.u.getdomaininfo.flags & DOMFLAGS_RUNNING);
    1.19  
    1.20          info->shutdown_reason = 
    1.21              (op.u.getdomaininfo.flags>>DOMFLAGS_SHUTDOWNSHIFT) & 
     2.1 --- a/tools/xc/py/Xc.c	Tue Jun 22 13:52:03 2004 +0000
     2.2 +++ b/tools/xc/py/Xc.c	Tue Jun 22 16:17:52 2004 +0000
     2.3 @@ -1011,7 +1011,7 @@ static PyMethodDef pyxc_methods[] = {
     2.4        (PyCFunction)pyxc_domain_create, 
     2.5        METH_VARARGS | METH_KEYWORDS, "\n"
     2.6        "Create a new domain.\n"
     2.7 -      " mem_kb [int, 65536]:    Memory allocation, in kilobytes.\n"
     2.8 +      " mem_kb [int, 0]:        Memory allocation, in kilobytes.\n"
     2.9        " name   [str, '(anon)']: Informative textual name.\n\n"
    2.10        "Returns: [int] new domain identifier; -1 on error.\n" },
    2.11  
     3.1 --- a/xen/common/dom_mem_ops.c	Tue Jun 22 13:52:03 2004 +0000
     3.2 +++ b/xen/common/dom_mem_ops.c	Tue Jun 22 16:17:52 2004 +0000
     3.3 @@ -15,9 +15,9 @@
     3.4  #include <xen/event.h>
     3.5  #include <asm/domain_page.h>
     3.6  
     3.7 -static long alloc_dom_mem(struct domain *p, 
     3.8 -                          unsigned long      *pages, 
     3.9 -                          unsigned long       nr_pages)
    3.10 +static long alloc_dom_mem(struct domain *d, 
    3.11 +                          unsigned long *pages, 
    3.12 +                          unsigned long  nr_pages)
    3.13  {
    3.14      struct pfn_info *page;
    3.15      unsigned long    i;
    3.16 @@ -35,7 +35,7 @@ static long alloc_dom_mem(struct domain 
    3.17      for ( i = 0; i < nr_pages; i++ )
    3.18      {
    3.19          /* NB. 'alloc_domain_page' does limit-checking on pages per domain. */
    3.20 -        if ( unlikely((page = alloc_domain_page(p)) == NULL) )
    3.21 +        if ( unlikely((page = alloc_domain_page(d)) == NULL) )
    3.22          {
    3.23              DPRINTK("Could not allocate a frame\n");
    3.24              break;
    3.25 @@ -49,9 +49,9 @@ static long alloc_dom_mem(struct domain 
    3.26      return i;
    3.27  }
    3.28      
    3.29 -static long free_dom_mem(struct domain *p, 
    3.30 -                         unsigned long      *pages, 
    3.31 -                         unsigned long       nr_pages)
    3.32 +static long free_dom_mem(struct domain *d, 
    3.33 +                         unsigned long *pages, 
    3.34 +                         unsigned long  nr_pages)
    3.35  {
    3.36      struct pfn_info *page;
    3.37      unsigned long    i, mpfn;
    3.38 @@ -65,15 +65,15 @@ static long free_dom_mem(struct domain *
    3.39          if ( unlikely(mpfn >= max_page) )
    3.40          {
    3.41              DPRINTK("Domain %u page number out of range (%08lx>=%08lx)\n", 
    3.42 -                    p->domain, mpfn, max_page);
    3.43 +                    d->domain, mpfn, max_page);
    3.44              rc = -EINVAL;
    3.45              break;
    3.46          }
    3.47  
    3.48          page = &frame_table[mpfn];
    3.49 -        if ( unlikely(!get_page(page, p)) )
    3.50 +        if ( unlikely(!get_page(page, d)) )
    3.51          {
    3.52 -            DPRINTK("Bad page free for domain %u\n", p->domain);
    3.53 +            DPRINTK("Bad page free for domain %u\n", d->domain);
    3.54              rc = -EINVAL;
    3.55              break;
    3.56          }
     4.1 --- a/xen/common/domain.c	Tue Jun 22 13:52:03 2004 +0000
     4.2 +++ b/xen/common/domain.c	Tue Jun 22 16:17:52 2004 +0000
     4.3 @@ -175,6 +175,7 @@ void domain_kill(struct domain *d)
     4.4      if ( !test_and_set_bit(DF_DYING, &d->flags) )
     4.5      {
     4.6          sched_rem_domain(d);
     4.7 +        domain_relinquish_memory(d);
     4.8          put_domain(d);
     4.9      }
    4.10  }
    4.11 @@ -215,7 +216,7 @@ void domain_shutdown(u8 reason)
    4.12      __enter_scheduler();
    4.13  }
    4.14  
    4.15 -struct pfn_info *alloc_domain_page(struct domain *p)
    4.16 +struct pfn_info *alloc_domain_page(struct domain *d)
    4.17  {
    4.18      struct pfn_info *page = NULL;
    4.19      unsigned long flags, mask, pfn_stamp, cpu_stamp;
    4.20 @@ -255,23 +256,24 @@ struct pfn_info *alloc_domain_page(struc
    4.21          }
    4.22      }
    4.23  
    4.24 -    page->u.domain = p;
    4.25 +    page->u.domain = d;
    4.26      page->type_and_flags = 0;
    4.27 -    if ( p != NULL )
    4.28 +    if ( d != NULL )
    4.29      {
    4.30          wmb(); /* Domain pointer must be visible before updating refcnt. */
    4.31 -        spin_lock(&p->page_list_lock);
    4.32 -        if ( unlikely(p->tot_pages >= p->max_pages) )
    4.33 +        spin_lock(&d->page_list_lock);
    4.34 +        if ( unlikely(d->tot_pages >= d->max_pages) )
    4.35          {
    4.36              DPRINTK("Over-allocation for domain %u: %u >= %u\n",
    4.37 -                    p->domain, p->tot_pages, p->max_pages);
    4.38 -            spin_unlock(&p->page_list_lock);
    4.39 +                    d->domain, d->tot_pages, d->max_pages);
    4.40 +            spin_unlock(&d->page_list_lock);
    4.41              goto free_and_exit;
    4.42          }
    4.43 -        list_add_tail(&page->list, &p->page_list);
    4.44 -        p->tot_pages++;
    4.45 +        list_add_tail(&page->list, &d->page_list);
    4.46          page->count_and_flags = PGC_allocated | 1;
    4.47 -        spin_unlock(&p->page_list_lock);
    4.48 +        if ( unlikely(d->tot_pages++ == 0) )
    4.49 +            get_domain(d);
    4.50 +        spin_unlock(&d->page_list_lock);
    4.51      }
    4.52  
    4.53      return page;
    4.54 @@ -287,27 +289,28 @@ struct pfn_info *alloc_domain_page(struc
    4.55  void free_domain_page(struct pfn_info *page)
    4.56  {
    4.57      unsigned long flags;
    4.58 -    struct domain *p = page->u.domain;
    4.59 +    struct domain *d = page->u.domain;
    4.60  
    4.61      ASSERT(!in_irq());
    4.62  
    4.63      if ( likely(!IS_XEN_HEAP_FRAME(page)) )
    4.64      {
    4.65 -        /*
    4.66 -         * No race with setting of zombie bit. If it wasn't set before the
    4.67 -         * last reference was dropped, then it can't be set now.
    4.68 -         */
    4.69          page->u.cpu_mask = 0;
    4.70 -        if ( !(page->count_and_flags & PGC_zombie) )
    4.71 +        page->tlbflush_timestamp = tlbflush_clock;
    4.72 +        if ( likely(d != NULL) )
    4.73          {
    4.74 -            page->tlbflush_timestamp = tlbflush_clock;
    4.75 -            if ( likely(p != NULL) )
    4.76 +            page->u.cpu_mask = 1 << d->processor;
    4.77 +            /* NB. May recursively lock from domain_relinquish_memory(). */
    4.78 +            spin_lock_recursive(&d->page_list_lock);
    4.79 +            list_del(&page->list);
    4.80 +            if ( unlikely(--d->tot_pages == 0) )
    4.81              {
    4.82 -                page->u.cpu_mask = 1 << p->processor;
    4.83 -                spin_lock(&p->page_list_lock);
    4.84 -                list_del(&page->list);
    4.85 -                p->tot_pages--;
    4.86 -                spin_unlock(&p->page_list_lock);
    4.87 +                spin_unlock_recursive(&d->page_list_lock);
    4.88 +                put_domain(d); /* Domain 'd' can disappear now. */
    4.89 +            }
    4.90 +            else
    4.91 +            {
    4.92 +                spin_unlock_recursive(&d->page_list_lock);
    4.93              }
    4.94          }
    4.95  
    4.96 @@ -332,13 +335,11 @@ void free_domain_page(struct pfn_info *p
    4.97  }
    4.98  
    4.99  
   4.100 -void free_all_dom_mem(struct domain *p)
   4.101 +void domain_relinquish_memory(struct domain *d)
   4.102  {
   4.103 -    struct list_head *ent, zombies;
   4.104 -    struct pfn_info *page;
   4.105 -    unsigned long x, y;
   4.106 -
   4.107 -    INIT_LIST_HEAD(&zombies);
   4.108 +    struct list_head *ent, *tmp;
   4.109 +    struct pfn_info  *page;
   4.110 +    unsigned long     x, y;
   4.111  
   4.112      /*
   4.113       * If we're executing the idle task then we may still be running over the 
   4.114 @@ -348,51 +349,20 @@ void free_all_dom_mem(struct domain *p)
   4.115          write_ptbase(&current->mm);
   4.116  
   4.117      /* Exit shadow mode before deconstructing final guest page table. */
   4.118 -    if ( p->mm.shadow_mode )
   4.119 -        shadow_mode_disable(p);
   4.120 +    if ( d->mm.shadow_mode )
   4.121 +        shadow_mode_disable(d);
   4.122  
   4.123 -    /* STEP 1. Drop the in-use reference to the page-table base. */
   4.124 -    put_page_and_type(&frame_table[pagetable_val(p->mm.pagetable) >>
   4.125 -                                  PAGE_SHIFT]);
   4.126 +    /* Drop the in-use reference to the page-table base. */
   4.127 +    if ( pagetable_val(d->mm.pagetable) != 0 )
   4.128 +        put_page_and_type(&frame_table[pagetable_val(d->mm.pagetable) >>
   4.129 +                                      PAGE_SHIFT]);
   4.130  
   4.131 -    /* STEP 2. Zombify all pages on the domain's allocation list. */
   4.132 -    spin_lock(&p->page_list_lock);
   4.133 -    while ( (ent = p->page_list.next) != &p->page_list )
   4.134 +    /* Relinquish all pages on the domain's allocation list. */
   4.135 +    spin_lock_recursive(&d->page_list_lock); /* may enter free_domain_page() */
   4.136 +    list_for_each_safe ( ent, tmp, &d->page_list )
   4.137      {
   4.138          page = list_entry(ent, struct pfn_info, list);
   4.139  
   4.140 -        if ( unlikely(!get_page(page, p)) )
   4.141 -        {
   4.142 -            /*
   4.143 -             * Another CPU has dropped the last reference and is responsible 
   4.144 -             * for removing the page from this list. Wait for them to do so.
   4.145 -             */
   4.146 -            spin_unlock(&p->page_list_lock);
   4.147 -            while ( p->page_list.next == ent )
   4.148 -                barrier();
   4.149 -            spin_lock(&p->page_list_lock);
   4.150 -            continue;
   4.151 -        }
   4.152 -
   4.153 -        set_bit(_PGC_zombie, &page->count_and_flags);
   4.154 -
   4.155 -        list_del(&page->list);
   4.156 -        p->tot_pages--;
   4.157 -
   4.158 -        list_add(&page->list, &zombies);
   4.159 -    }
   4.160 -    spin_unlock(&p->page_list_lock);
   4.161 -
   4.162 -    /*
   4.163 -     * STEP 3. With the domain's list lock now released, we examine each zombie
   4.164 -     * page and drop references for guest-allocated and/or type-pinned pages.
   4.165 -     */
   4.166 -    while ( (ent = zombies.next) != &zombies )
   4.167 -    {
   4.168 -        page = list_entry(ent, struct pfn_info, list);
   4.169 -
   4.170 -        list_del(&page->list);
   4.171 -        
   4.172          if ( test_and_clear_bit(_PGC_guest_pinned, &page->count_and_flags) )
   4.173              put_page_and_type(page);
   4.174  
   4.175 @@ -416,28 +386,27 @@ void free_all_dom_mem(struct domain *p)
   4.176                  free_page_type(page, PGT_base_page_table);
   4.177          }
   4.178          while ( unlikely(y != x) );
   4.179 -
   4.180 -        put_page(page);
   4.181      }
   4.182 +    spin_unlock_recursive(&d->page_list_lock);
   4.183  }
   4.184  
   4.185  
   4.186 -unsigned int alloc_new_dom_mem(struct domain *p, unsigned int kbytes)
   4.187 +unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes)
   4.188  {
   4.189      unsigned int alloc_pfns, nr_pages;
   4.190      struct pfn_info *page;
   4.191  
   4.192      nr_pages = (kbytes + ((PAGE_SIZE-1)>>10)) >> (PAGE_SHIFT - 10);
   4.193 -    p->max_pages = nr_pages; /* this can now be controlled independently */
   4.194 +    d->max_pages = nr_pages; /* this can now be controlled independently */
   4.195  
   4.196 -    /* grow the allocation if necessary */
   4.197 -    for ( alloc_pfns = p->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ )
   4.198 +    /* Grow the allocation if necessary. */
   4.199 +    for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ )
   4.200      {
   4.201 -        if ( unlikely((page=alloc_domain_page(p)) == NULL) ||
   4.202 +        if ( unlikely((page=alloc_domain_page(d)) == NULL) ||
   4.203               unlikely(free_pfns < (SLACK_DOMAIN_MEM_KILOBYTES >> 
   4.204                                     (PAGE_SHIFT-10))) )
   4.205          {
   4.206 -            free_all_dom_mem(p);
   4.207 +            domain_relinquish_memory(d);
   4.208              return -ENOMEM;
   4.209          }
   4.210  
   4.211 @@ -447,55 +416,50 @@ unsigned int alloc_new_dom_mem(struct do
   4.212  #ifndef NDEBUG
   4.213          {
   4.214              /* Initialise with magic marker if in DEBUG mode. */
   4.215 -            void * a = map_domain_mem( (page-frame_table)<<PAGE_SHIFT );
   4.216 -            memset( a, 0x80 | (char) p->domain, PAGE_SIZE );
   4.217 -            unmap_domain_mem( a );
   4.218 +            void *a = map_domain_mem((page-frame_table)<<PAGE_SHIFT);
   4.219 +            memset(a, 0x80 | (char)d->domain, PAGE_SIZE);
   4.220 +            unmap_domain_mem(a);
   4.221          }
   4.222  #endif
   4.223 -
   4.224      }
   4.225  
   4.226 -    p->tot_pages = nr_pages;
   4.227 -
   4.228      return 0;
   4.229  }
   4.230   
   4.231  
   4.232  /* Release resources belonging to task @p. */
   4.233 -void domain_destruct(struct domain *p)
   4.234 +void domain_destruct(struct domain *d)
   4.235  {
   4.236 -    struct domain **pp;
   4.237 +    struct domain **pd;
   4.238      unsigned long flags;
   4.239  
   4.240 -    if ( !test_bit(DF_DYING, &p->flags) )
   4.241 +    if ( !test_bit(DF_DYING, &d->flags) )
   4.242          BUG();
   4.243  
   4.244      /* May be already destructed, or get_domain() can race us. */
   4.245 -    if ( cmpxchg(&p->refcnt.counter, 0, DOMAIN_DESTRUCTED) != 0 )
   4.246 +    if ( cmpxchg(&d->refcnt.counter, 0, DOMAIN_DESTRUCTED) != 0 )
   4.247          return;
   4.248  
   4.249 -    DPRINTK("Releasing task %u\n", p->domain);
   4.250 +    DPRINTK("Releasing task %u\n", d->domain);
   4.251  
   4.252      /* Delete from task list and task hashtable. */
   4.253      write_lock_irqsave(&tasklist_lock, flags);
   4.254 -    pp = &task_list;
   4.255 -    while ( *pp != p ) 
   4.256 -        pp = &(*pp)->next_list;
   4.257 -    *pp = p->next_list;
   4.258 -    pp = &task_hash[TASK_HASH(p->domain)];
   4.259 -    while ( *pp != p ) 
   4.260 -        pp = &(*pp)->next_hash;
   4.261 -    *pp = p->next_hash;
   4.262 +    pd = &task_list;
   4.263 +    while ( *pd != d ) 
   4.264 +        pd = &(*pd)->next_list;
   4.265 +    *pd = d->next_list;
   4.266 +    pd = &task_hash[TASK_HASH(d->domain)];
   4.267 +    while ( *pd != d ) 
   4.268 +        pd = &(*pd)->next_hash;
   4.269 +    *pd = d->next_hash;
   4.270      write_unlock_irqrestore(&tasklist_lock, flags);
   4.271  
   4.272 -    destroy_event_channels(p);
   4.273 +    destroy_event_channels(d);
   4.274  
   4.275 -    /* Free all memory associated with this domain. */
   4.276 -    free_page((unsigned long)p->mm.perdomain_pt);
   4.277 -    UNSHARE_PFN(virt_to_page(p->shared_info));
   4.278 -    free_all_dom_mem(p);
   4.279 +    free_page((unsigned long)d->mm.perdomain_pt);
   4.280 +    UNSHARE_PFN(virt_to_page(d->shared_info));
   4.281  
   4.282 -    free_domain_struct(p);
   4.283 +    free_domain_struct(d);
   4.284  }
   4.285  
   4.286  
     5.1 --- a/xen/common/event_channel.c	Tue Jun 22 13:52:03 2004 +0000
     5.2 +++ b/xen/common/event_channel.c	Tue Jun 22 16:17:52 2004 +0000
     5.3 @@ -91,7 +91,7 @@ static long evtchn_bind_interdomain(evtc
     5.4      }
     5.5  
     5.6      /* Avoid deadlock by first acquiring lock of domain with smaller id. */
     5.7 -    if ( dom1 < dom2 )
     5.8 +    if ( d1 < d2 )
     5.9      {
    5.10          spin_lock(&d1->event_channel_lock);
    5.11          spin_lock(&d2->event_channel_lock);
    5.12 @@ -271,7 +271,7 @@ static long __evtchn_close(struct domain
    5.13                  goto out;
    5.14              }
    5.15  
    5.16 -            if ( d1->domain < d2->domain )
    5.17 +            if ( d1 < d2 )
    5.18              {
    5.19                  spin_lock(&d2->event_channel_lock);
    5.20              }
     6.1 --- a/xen/common/keyhandler.c	Tue Jun 22 13:52:03 2004 +0000
     6.2 +++ b/xen/common/keyhandler.c	Tue Jun 22 16:17:52 2004 +0000
     6.3 @@ -71,27 +71,28 @@ static void halt_machine(u_char key, voi
     6.4  
     6.5  void do_task_queues(u_char key, void *dev_id, struct pt_regs *regs) 
     6.6  {
     6.7 -    unsigned long       flags;
     6.8 -    struct domain *p; 
     6.9 -    shared_info_t      *s; 
    6.10 -    s_time_t            now = NOW();
    6.11 +    unsigned long  flags;
    6.12 +    struct domain *d; 
    6.13 +    shared_info_t *s; 
    6.14 +    s_time_t       now = NOW();
    6.15  
    6.16      printk("'%c' pressed -> dumping task queues (now=0x%X:%08X)\n", key,
    6.17             (u32)(now>>32), (u32)now); 
    6.18  
    6.19      read_lock_irqsave(&tasklist_lock, flags); 
    6.20  
    6.21 -    for_each_domain ( p )
    6.22 +    for_each_domain ( d )
    6.23      {
    6.24 -        printk("Xen: DOM %u, CPU %d [has=%c]\n",
    6.25 -               p->domain, p->processor, 
    6.26 -               test_bit(DF_RUNNING, &p->flags) ? 'T':'F'); 
    6.27 -        s = p->shared_info; 
    6.28 +        printk("Xen: DOM %u, CPU %d [has=%c] refcnt=%d nr_pages=%d\n",
    6.29 +               d->domain, d->processor, 
    6.30 +               test_bit(DF_RUNNING, &d->flags) ? 'T':'F',
    6.31 +               atomic_read(&d->refcnt), d->tot_pages);
    6.32 +        s = d->shared_info; 
    6.33          printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n", 
    6.34                 s->vcpu_data[0].evtchn_upcall_pending, 
    6.35                 s->vcpu_data[0].evtchn_upcall_mask);
    6.36          printk("Notifying guest...\n"); 
    6.37 -        send_guest_virq(p, VIRQ_DEBUG);
    6.38 +        send_guest_virq(d, VIRQ_DEBUG);
    6.39      }
    6.40  
    6.41      read_unlock_irqrestore(&tasklist_lock, flags); 
     7.1 --- a/xen/common/memory.c	Tue Jun 22 13:52:03 2004 +0000
     7.2 +++ b/xen/common/memory.c	Tue Jun 22 16:17:52 2004 +0000
     7.3 @@ -151,10 +151,10 @@
     7.4  
     7.5  static int alloc_l2_table(struct pfn_info *page);
     7.6  static int alloc_l1_table(struct pfn_info *page);
     7.7 -static int get_page_from_pagenr(unsigned long page_nr, struct domain *p);
     7.8 +static int get_page_from_pagenr(unsigned long page_nr, struct domain *d);
     7.9  static int get_page_and_type_from_pagenr(unsigned long page_nr, 
    7.10                                           u32 type,
    7.11 -                                         struct domain *p);
    7.12 +                                         struct domain *d);
    7.13  
    7.14  static void free_l2_table(struct pfn_info *page);
    7.15  static void free_l1_table(struct pfn_info *page);
    7.16 @@ -241,35 +241,35 @@ void add_to_domain_alloc_list(unsigned l
    7.17      spin_unlock_irqrestore(&free_list_lock, flags);
    7.18  }
    7.19  
    7.20 -static void __invalidate_shadow_ldt(struct domain *p)
    7.21 +static void __invalidate_shadow_ldt(struct domain *d)
    7.22  {
    7.23      int i;
    7.24      unsigned long pfn;
    7.25      struct pfn_info *page;
    7.26      
    7.27 -    p->mm.shadow_ldt_mapcnt = 0;
    7.28 +    d->mm.shadow_ldt_mapcnt = 0;
    7.29  
    7.30      for ( i = 16; i < 32; i++ )
    7.31      {
    7.32 -        pfn = l1_pgentry_to_pagenr(p->mm.perdomain_pt[i]);
    7.33 +        pfn = l1_pgentry_to_pagenr(d->mm.perdomain_pt[i]);
    7.34          if ( pfn == 0 ) continue;
    7.35 -        p->mm.perdomain_pt[i] = mk_l1_pgentry(0);
    7.36 -        page = frame_table + pfn;
    7.37 +        d->mm.perdomain_pt[i] = mk_l1_pgentry(0);
    7.38 +        page = &frame_table[pfn];
    7.39          ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
    7.40 -        ASSERT_PAGE_IS_DOMAIN(page, p);
    7.41 +        ASSERT_PAGE_IS_DOMAIN(page, d);
    7.42          put_page_and_type(page);
    7.43      }
    7.44  
    7.45      /* Dispose of the (now possibly invalid) mappings from the TLB.  */
    7.46 -    percpu_info[p->processor].deferred_ops |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
    7.47 +    percpu_info[d->processor].deferred_ops |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
    7.48  }
    7.49  
    7.50  
    7.51  static inline void invalidate_shadow_ldt(void)
    7.52  {
    7.53 -    struct domain *p = current;
    7.54 -    if ( p->mm.shadow_ldt_mapcnt != 0 )
    7.55 -        __invalidate_shadow_ldt(p);
    7.56 +    struct domain *d = current;
    7.57 +    if ( d->mm.shadow_ldt_mapcnt != 0 )
    7.58 +        __invalidate_shadow_ldt(d);
    7.59  }
    7.60  
    7.61  
    7.62 @@ -294,28 +294,28 @@ int alloc_segdesc_page(struct pfn_info *
    7.63  /* Map shadow page at offset @off. */
    7.64  int map_ldt_shadow_page(unsigned int off)
    7.65  {
    7.66 -    struct domain *p = current;
    7.67 +    struct domain *d = current;
    7.68      unsigned long l1e;
    7.69  
    7.70      if ( unlikely(in_irq()) )
    7.71          BUG();
    7.72  
    7.73 -    __get_user(l1e, (unsigned long *)&linear_pg_table[(p->mm.ldt_base >> 
    7.74 +    __get_user(l1e, (unsigned long *)&linear_pg_table[(d->mm.ldt_base >> 
    7.75                                                         PAGE_SHIFT) + off]);
    7.76  
    7.77      if ( unlikely(!(l1e & _PAGE_PRESENT)) ||
    7.78           unlikely(!get_page_and_type(&frame_table[l1e >> PAGE_SHIFT], 
    7.79 -                                     p, PGT_ldt_page)) )
    7.80 +                                     d, PGT_ldt_page)) )
    7.81          return 0;
    7.82  
    7.83 -    p->mm.perdomain_pt[off + 16] = mk_l1_pgentry(l1e | _PAGE_RW);
    7.84 -    p->mm.shadow_ldt_mapcnt++;
    7.85 +    d->mm.perdomain_pt[off + 16] = mk_l1_pgentry(l1e | _PAGE_RW);
    7.86 +    d->mm.shadow_ldt_mapcnt++;
    7.87  
    7.88      return 1;
    7.89  }
    7.90  
    7.91  
    7.92 -static int get_page_from_pagenr(unsigned long page_nr, struct domain *p)
    7.93 +static int get_page_from_pagenr(unsigned long page_nr, struct domain *d)
    7.94  {
    7.95      struct pfn_info *page = &frame_table[page_nr];
    7.96  
    7.97 @@ -325,7 +325,7 @@ static int get_page_from_pagenr(unsigned
    7.98          return 0;
    7.99      }
   7.100  
   7.101 -    if ( unlikely(!get_page(page, p)) )
   7.102 +    if ( unlikely(!get_page(page, d)) )
   7.103      {
   7.104          MEM_LOG("Could not get page ref for pfn %08lx", page_nr);
   7.105          return 0;
   7.106 @@ -337,11 +337,11 @@ static int get_page_from_pagenr(unsigned
   7.107  
   7.108  static int get_page_and_type_from_pagenr(unsigned long page_nr, 
   7.109                                           u32 type,
   7.110 -                                         struct domain *p)
   7.111 +                                         struct domain *d)
   7.112  {
   7.113      struct pfn_info *page = &frame_table[page_nr];
   7.114  
   7.115 -    if ( unlikely(!get_page_from_pagenr(page_nr, p)) )
   7.116 +    if ( unlikely(!get_page_from_pagenr(page_nr, d)) )
   7.117          return 0;
   7.118  
   7.119      if ( unlikely(!get_page_type(page, type)) )
   7.120 @@ -412,7 +412,7 @@ static int get_page_from_l1e(l1_pgentry_
   7.121  {
   7.122      unsigned long l1v = l1_pgentry_val(l1e);
   7.123      unsigned long pfn = l1_pgentry_to_pagenr(l1e);
   7.124 -    extern int domain_iomem_in_pfn(struct domain *p, unsigned long pfn);
   7.125 +    extern int domain_iomem_in_pfn(struct domain *d, unsigned long pfn);
   7.126  
   7.127      if ( !(l1v & _PAGE_PRESENT) )
   7.128          return 1;
   7.129 @@ -720,21 +720,11 @@ int alloc_page_type(struct pfn_info *pag
   7.130                                       &page->count_and_flags)) )
   7.131      {
   7.132          struct domain *p = page->u.domain;
   7.133 -        mb(); /* Check zombie status before using domain ptr. */
   7.134 -        /*
   7.135 -         * NB. 'p' may no longer be valid by time we dereference it, so
   7.136 -         * p->processor might be garbage. We clamp it, just in case.
   7.137 -         */
   7.138 -        if ( likely(!test_bit(_PGC_zombie, &page->count_and_flags)) )
   7.139 +        if ( unlikely(NEED_FLUSH(tlbflush_time[p->processor],
   7.140 +                                 page->tlbflush_timestamp)) )
   7.141          {
   7.142 -            unsigned int cpu = p->processor;
   7.143 -            if ( likely(cpu <= smp_num_cpus) &&
   7.144 -                 unlikely(NEED_FLUSH(tlbflush_time[cpu],
   7.145 -                                     page->tlbflush_timestamp)) )
   7.146 -            {
   7.147 -                perfc_incr(need_flush_tlb_flush);
   7.148 -                flush_tlb_cpu(cpu);
   7.149 -            }
   7.150 +            perfc_incr(need_flush_tlb_flush);
   7.151 +            flush_tlb_cpu(p->processor);
   7.152          }
   7.153      }
   7.154  
   7.155 @@ -803,7 +793,8 @@ static int do_extended_command(unsigned 
   7.156      unsigned long pfn = ptr >> PAGE_SHIFT;
   7.157      unsigned long old_base_pfn;
   7.158      struct pfn_info *page = &frame_table[pfn];
   7.159 -    struct domain *p = current, *q;
   7.160 +    struct domain *d = current, *nd, *e;
   7.161 +    u32 x, y;
   7.162      domid_t domid;
   7.163  
   7.164      switch ( cmd )
   7.165 @@ -853,18 +844,18 @@ static int do_extended_command(unsigned 
   7.166          break;
   7.167  
   7.168      case MMUEXT_NEW_BASEPTR:
   7.169 -        okay = get_page_and_type_from_pagenr(pfn, PGT_l2_page_table, p);
   7.170 +        okay = get_page_and_type_from_pagenr(pfn, PGT_l2_page_table, d);
   7.171          if ( likely(okay) )
   7.172          {
   7.173              invalidate_shadow_ldt();
   7.174  
   7.175              percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
   7.176 -            old_base_pfn = pagetable_val(p->mm.pagetable) >> PAGE_SHIFT;
   7.177 -            p->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
   7.178 +            old_base_pfn = pagetable_val(d->mm.pagetable) >> PAGE_SHIFT;
   7.179 +            d->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
   7.180  
   7.181 -            shadow_mk_pagetable(&p->mm);
   7.182 +            shadow_mk_pagetable(&d->mm);
   7.183  
   7.184 -            write_ptbase(&p->mm);
   7.185 +            write_ptbase(&d->mm);
   7.186  
   7.187              put_page_and_type(&frame_table[old_base_pfn]);    
   7.188  
   7.189 @@ -900,13 +891,13 @@ static int do_extended_command(unsigned 
   7.190              okay = 0;
   7.191              MEM_LOG("Bad args to SET_LDT: ptr=%08lx, ents=%08lx", ptr, ents);
   7.192          }
   7.193 -        else if ( (p->mm.ldt_ents != ents) || 
   7.194 -                  (p->mm.ldt_base != ptr) )
   7.195 +        else if ( (d->mm.ldt_ents != ents) || 
   7.196 +                  (d->mm.ldt_base != ptr) )
   7.197          {
   7.198              invalidate_shadow_ldt();
   7.199 -            p->mm.ldt_base = ptr;
   7.200 -            p->mm.ldt_ents = ents;
   7.201 -            load_LDT(p);
   7.202 +            d->mm.ldt_base = ptr;
   7.203 +            d->mm.ldt_ents = ents;
   7.204 +            load_LDT(d);
   7.205              percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT;
   7.206              if ( ents != 0 )
   7.207                  percpu_info[cpu].deferred_ops |= DOP_RELOAD_LDT;
   7.208 @@ -917,10 +908,10 @@ static int do_extended_command(unsigned 
   7.209      case MMUEXT_SET_SUBJECTDOM:
   7.210          domid = ((domid_t)((ptr&~0xFFFF)|(val>>16)));
   7.211  
   7.212 -        if ( !IS_PRIV(p) )
   7.213 +        if ( !IS_PRIV(d) )
   7.214          {
   7.215              MEM_LOG("Dom %u has no privilege to set subject domain",
   7.216 -                    p->domain);
   7.217 +                    d->domain);
   7.218              okay = 0;
   7.219          }
   7.220          else
   7.221 @@ -939,31 +930,89 @@ static int do_extended_command(unsigned 
   7.222          break;
   7.223  
   7.224      case MMUEXT_REASSIGN_PAGE:
   7.225 -        if ( unlikely(!IS_PRIV(p)) )
   7.226 +        if ( unlikely(!IS_PRIV(d)) )
   7.227          {
   7.228 -            MEM_LOG("Dom %u has no privilege to reassign page ownership",
   7.229 -                    p->domain);
   7.230 +            MEM_LOG("Dom %u has no reassignment priv", d->domain);
   7.231              okay = 0;
   7.232 +            break;
   7.233          }
   7.234 -        else if ( likely((q = percpu_info[cpu].gps) != NULL) &&
   7.235 -                  likely(test_bit(_PGC_allocated, &page->count_and_flags)) &&
   7.236 -                  likely(page->u.domain == p) ) /* won't be smp-guest safe */
   7.237 +
   7.238 +        if ( unlikely((e = percpu_info[cpu].gps) == NULL) )
   7.239          {
   7.240 -            spin_lock(&p->page_list_lock);
   7.241 -            p->tot_pages--;
   7.242 -            list_del(&page->list);
   7.243 -            spin_unlock(&p->page_list_lock);
   7.244 -            page->u.domain = q;
   7.245 -            spin_lock(&q->page_list_lock);
   7.246 -            q->tot_pages++;
   7.247 -            list_add_tail(&page->list, &q->page_list);
   7.248 -            spin_unlock(&q->page_list_lock);
   7.249 +            MEM_LOG("No GPS to reassign pfn %08lx to\n", pfn);
   7.250 +            okay = 0;
   7.251 +            break;
   7.252 +        }
   7.253 +
   7.254 +        /*
   7.255 +         * Grab both page_list locks, in order. This prevents the page from
   7.256 +         * disappearing elsewhere while we modify the owner, and we'll need
   7.257 +         * both locks if we're successful so that we can change lists.
   7.258 +         */
   7.259 +        if ( d < e )
   7.260 +        {
   7.261 +            spin_lock(&d->page_list_lock);
   7.262 +            spin_lock(&e->page_list_lock);
   7.263          }
   7.264          else
   7.265          {
   7.266 -            MEM_LOG("No GPS to reassign pfn %08lx to\n", pfn);
   7.267 +            spin_lock(&e->page_list_lock);
   7.268 +            spin_lock(&d->page_list_lock);
   7.269 +        }
   7.270 +
   7.271 +        /* A domain shouldn't have PGC_allocated pages when it is dying. */
   7.272 +        if ( unlikely(test_bit(DF_DYING, &e->flags)) )
   7.273 +        {
   7.274              okay = 0;
   7.275 +            goto reassign_fail;
   7.276          }
   7.277 +
   7.278 +        /*
   7.279 +         * The tricky bit: atomically change owner while there is just one
   7.280 +         * benign reference to the page (PGC_allocated). If that reference
   7.281 +         * disappears then the deallocation routine will safely spin.
   7.282 +         */
   7.283 +        nd = page->u.domain;
   7.284 +        y  = page->count_and_flags;
   7.285 +        do {
   7.286 +            x = y;
   7.287 +            if ( unlikely((x & (PGC_count_mask|PGC_allocated)) != 
   7.288 +                          (1|PGC_allocated)) ||
   7.289 +                 unlikely(nd != d) )
   7.290 +            {
   7.291 +                MEM_LOG("Bad page values %08lx: ed=%p(%u), sd=%p,"
   7.292 +                        " caf=%08x, taf=%08x\n", page_to_pfn(page),
   7.293 +                        d, d->domain, nd, x, page->type_and_flags);
   7.294 +                okay = 0;
   7.295 +                goto reassign_fail;
   7.296 +            }
   7.297 +            __asm__ __volatile__(
   7.298 +                LOCK_PREFIX "cmpxchg8b %3"
   7.299 +                : "=a" (nd), "=d" (y), "=b" (e),
   7.300 +                "=m" (*(volatile u64 *)(&page->u.domain))
   7.301 +                : "0" (d), "1" (x), "b" (e), "c" (x) );
   7.302 +        } 
   7.303 +        while ( unlikely(nd != d) || unlikely(y != x) );
   7.304 +        
   7.305 +        /*
   7.306 +         * Unlink from 'd'. We transferred at least one reference to 'e', so
   7.307 +         * noone else is spinning to try to delete this page from 'd'.
   7.308 +         */
   7.309 +        d->tot_pages--;
   7.310 +        list_del(&page->list);
   7.311 +        
   7.312 +        /*
   7.313 +         * Add the page to 'e'. Someone may already have removed the last
   7.314 +         * reference and want to remove the page from 'e'. However, we have
   7.315 +         * the lock so they'll spin waiting for us.
   7.316 +         */
   7.317 +        if ( unlikely(e->tot_pages++ == 0) )
   7.318 +            get_domain(e);
   7.319 +        list_add_tail(&page->list, &e->page_list);
   7.320 +
   7.321 +    reassign_fail:        
   7.322 +        spin_unlock(&d->page_list_lock);
   7.323 +        spin_unlock(&e->page_list_lock);
   7.324          break;
   7.325  
   7.326      case MMUEXT_RESET_SUBJECTDOM:
   7.327 @@ -1228,14 +1277,14 @@ int do_update_va_mapping_otherdomain(uns
   7.328                                       domid_t domid)
   7.329  {
   7.330      unsigned int cpu = smp_processor_id();
   7.331 -    struct domain *p;
   7.332 +    struct domain *d;
   7.333      int rc;
   7.334  
   7.335      if ( unlikely(!IS_PRIV(current)) )
   7.336          return -EPERM;
   7.337  
   7.338 -    percpu_info[cpu].gps = p = find_domain_by_id(domid);
   7.339 -    if ( unlikely(p == NULL) )
   7.340 +    percpu_info[cpu].gps = d = find_domain_by_id(domid);
   7.341 +    if ( unlikely(d == NULL) )
   7.342      {
   7.343          MEM_LOG("Unknown domain '%u'", domid);
   7.344          return -ESRCH;
   7.345 @@ -1243,7 +1292,7 @@ int do_update_va_mapping_otherdomain(uns
   7.346  
   7.347      rc = do_update_va_mapping(page_nr, val, flags);
   7.348  
   7.349 -    put_domain(p);
   7.350 +    put_domain(d);
   7.351      percpu_info[cpu].gps = NULL;
   7.352  
   7.353      return rc;
   7.354 @@ -1257,8 +1306,6 @@ int do_update_va_mapping_otherdomain(uns
   7.355   * audit_page():      in addition maintains a history of audited pages
   7.356   * reaudit_pages():   re-audit previously audited pages
   7.357   * audit_all_pages(): check the ref-count for all leaf pages
   7.358 - *                    also checks for zombie pages
   7.359 - * 
   7.360   * reaudit_page() and audit_all_pages() are designed to be
   7.361   * keyhandler functions so that they can be easily invoked from the console.
   7.362   */
   7.363 @@ -1285,8 +1332,6 @@ void __audit_page(unsigned long pfn) {
   7.364      {
   7.365          if ( (frame_table[i].count_and_flags & PGC_count_mask) == 0 )
   7.366              continue;
   7.367 -        if ( (frame_table[i].count_and_flags & PGC_zombie) != 0 )
   7.368 -            continue;
   7.369  
   7.370          /* check if entry is a page table (L1 page table) and in use */
   7.371          if ( ((frame_table[i].type_and_flags & PGT_type_mask) ==
   7.372 @@ -1359,7 +1404,6 @@ void reaudit_pages(u_char key, void *dev
   7.373  /*
   7.374   * do various checks on all pages.
   7.375   * Currently:
   7.376 - * - check for zombie pages
   7.377   * - check for pages with corrupt ref-count
   7.378   * Interrupts are diabled completely. use with care.
   7.379   */
   7.380 @@ -1376,16 +1420,6 @@ void audit_all_pages(u_char key, void *d
   7.381      /* walk the frame table */
   7.382      for ( i = 0; i < max_page; i++ )
   7.383      {
   7.384 -        /* check for zombies */
   7.385 -        if ( ((frame_table[i].count_and_flags & PGC_count_mask) != 0) &&
   7.386 -             ((frame_table[i].count_and_flags & PGC_zombie) != 0) )
   7.387 -        { 
   7.388 -            printk("zombie: pfn=%08lx cf=%08x tf=%08x dom=%08lx\n", 
   7.389 -                   i, frame_table[i].count_and_flags,
   7.390 -                   frame_table[i].type_and_flags,
   7.391 -                   (unsigned long)frame_table[i].u.domain);
   7.392 -        }
   7.393 -
   7.394          /* check ref count for leaf pages */
   7.395          if ( ((frame_table[i].type_and_flags & PGT_type_mask) ==
   7.396                PGT_writeable_page) )
     8.1 --- a/xen/drivers/char/console.c	Tue Jun 22 13:52:03 2004 +0000
     8.2 +++ b/xen/drivers/char/console.c	Tue Jun 22 16:17:52 2004 +0000
     8.3 @@ -290,7 +290,7 @@ long do_console_io(int cmd, int count, c
     8.4      long  rc;
     8.5  
     8.6  #ifdef NDEBUG
     8.7 -    /* Only domain-0 may access the emrgency console. */
     8.8 +    /* Only domain-0 may access the emergency console. */
     8.9      if ( current->domain != 0 )
    8.10          return -EPERM;
    8.11  #endif
    8.12 @@ -445,7 +445,7 @@ long do_console_write(char *str, unsigne
    8.13  
    8.14      return 0;
    8.15  #else
    8.16 -    if ( !test_and_set_bit(DF_CONSOLEWRITEBUG, &current->flags) )
    8.17 +    if ( !test_and_set_bit(DF_CONWRITEBUG, &current->flags) )
    8.18      {
    8.19          printk("DOM%u is attempting to use the deprecated "
    8.20                 "HYPERVISOR_console_write() interface.\n", current->domain);
     9.1 --- a/xen/include/asm-x86/spinlock.h	Tue Jun 22 13:52:03 2004 +0000
     9.2 +++ b/xen/include/asm-x86/spinlock.h	Tue Jun 22 16:17:52 2004 +0000
     9.3 @@ -6,200 +6,123 @@
     9.4  #include <asm/atomic.h>
     9.5  #include <asm/rwlock.h>
     9.6  
     9.7 -#if 0
     9.8 -#define SPINLOCK_DEBUG	1
     9.9 -#else
    9.10 -#define SPINLOCK_DEBUG	0
    9.11 -#endif
    9.12 -
    9.13 -/*
    9.14 - * Your basic SMP spinlocks, allowing only a single CPU anywhere
    9.15 - */
    9.16 -
    9.17  typedef struct {
    9.18 -	volatile unsigned int lock;
    9.19 -#if SPINLOCK_DEBUG
    9.20 -	unsigned magic;
    9.21 -#endif
    9.22 +    volatile s16 lock;
    9.23 +    s8 recurse_cpu;
    9.24 +    u8 recurse_cnt;
    9.25  } spinlock_t;
    9.26  
    9.27 -#define SPINLOCK_MAGIC	0xdead4ead
    9.28 -
    9.29 -#if SPINLOCK_DEBUG
    9.30 -#define SPINLOCK_MAGIC_INIT	, SPINLOCK_MAGIC
    9.31 -#else
    9.32 -#define SPINLOCK_MAGIC_INIT	/* */
    9.33 -#endif
    9.34 -
    9.35 -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
    9.36 +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1, -1, 0 }
    9.37  
    9.38  #define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
    9.39 -
    9.40 -/*
    9.41 - * Simple spin lock operations.  There are two variants, one clears IRQ's
    9.42 - * on the local processor, one does not.
    9.43 - *
    9.44 - * We make no fairness assumptions. They have a cost.
    9.45 - */
    9.46 -
    9.47  #define spin_is_locked(x)	(*(volatile char *)(&(x)->lock) <= 0)
    9.48 -#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
    9.49  
    9.50 -#define spin_lock_string \
    9.51 -	"\n1:\t" \
    9.52 -	"lock ; decb %0\n\t" \
    9.53 -	"js 2f\n" \
    9.54 -	".section .text.lock,\"ax\"\n" \
    9.55 -	"2:\t" \
    9.56 -	"cmpb $0,%0\n\t" \
    9.57 -	"rep;nop\n\t" \
    9.58 -	"jle 2b\n\t" \
    9.59 -	"jmp 1b\n" \
    9.60 -	".previous"
    9.61 -
    9.62 -/*
    9.63 - * This works. Despite all the confusion.
    9.64 - * (except on PPro SMP or if we are using OOSTORE)
    9.65 - * (PPro errata 66, 92)
    9.66 - */
    9.67 - 
    9.68 -#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
    9.69 -
    9.70 -#define spin_unlock_string \
    9.71 -	"movb $1,%0" \
    9.72 -		:"=m" (lock->lock) : : "memory"
    9.73 -
    9.74 +static inline void spin_lock(spinlock_t *lock)
    9.75 +{
    9.76 +    __asm__ __volatile__ (
    9.77 +        "1:  lock; decb %0         \n"
    9.78 +        "    js 2f                 \n"
    9.79 +        ".section .text.lock,\"ax\"\n"
    9.80 +        "2:  cmpb $0,%0            \n"
    9.81 +        "    rep; nop              \n"
    9.82 +        "    jle 2b                \n"
    9.83 +        "    jmp 1b                \n"
    9.84 +        ".previous"
    9.85 +        : "=m" (lock->lock) : : "memory" );
    9.86 +}
    9.87  
    9.88  static inline void spin_unlock(spinlock_t *lock)
    9.89  {
    9.90 -#if SPINLOCK_DEBUG
    9.91 -	if (lock->magic != SPINLOCK_MAGIC)
    9.92 -		BUG();
    9.93 -	if (!spin_is_locked(lock))
    9.94 -		BUG();
    9.95 -#endif
    9.96 -	__asm__ __volatile__(
    9.97 -		spin_unlock_string
    9.98 -	);
    9.99 -}
   9.100 -
   9.101 +#if !defined(CONFIG_X86_OOSTORE)
   9.102 +    ASSERT(spin_is_locked(lock));
   9.103 +    __asm__ __volatile__ (
   9.104 +	"movb $1,%0" 
   9.105 +        : "=m" (lock->lock) : : "memory" );
   9.106  #else
   9.107 -
   9.108 -#define spin_unlock_string \
   9.109 -	"xchgb %b0, %1" \
   9.110 -		:"=q" (oldval), "=m" (lock->lock) \
   9.111 -		:"0" (oldval) : "memory"
   9.112 -
   9.113 -static inline void spin_unlock(spinlock_t *lock)
   9.114 -{
   9.115 -	char oldval = 1;
   9.116 -#if SPINLOCK_DEBUG
   9.117 -	if (lock->magic != SPINLOCK_MAGIC)
   9.118 -		BUG();
   9.119 -	if (!spin_is_locked(lock))
   9.120 -		BUG();
   9.121 +    char oldval = 1;
   9.122 +    ASSERT(spin_is_locked(lock));
   9.123 +    __asm__ __volatile__ (
   9.124 +	"xchgb %b0, %1"
   9.125 +        : "=q" (oldval), "=m" (lock->lock) : "0" (oldval) : "memory" );
   9.126  #endif
   9.127 -	__asm__ __volatile__(
   9.128 -		spin_unlock_string
   9.129 -	);
   9.130  }
   9.131  
   9.132 -#endif
   9.133 -
   9.134  static inline int spin_trylock(spinlock_t *lock)
   9.135  {
   9.136 -	char oldval;
   9.137 -	__asm__ __volatile__(
   9.138 -		"xchgb %b0,%1"
   9.139 -		:"=q" (oldval), "=m" (lock->lock)
   9.140 -		:"0" (0) : "memory");
   9.141 -	return oldval > 0;
   9.142 +    char oldval;
   9.143 +    __asm__ __volatile__(
   9.144 +        "xchgb %b0,%1"
   9.145 +        :"=q" (oldval), "=m" (lock->lock)
   9.146 +        :"0" (0) : "memory");
   9.147 +    return oldval > 0;
   9.148  }
   9.149  
   9.150 -static inline void spin_lock(spinlock_t *lock)
   9.151 -{
   9.152 -#if SPINLOCK_DEBUG
   9.153 -	__label__ here;
   9.154 -here:
   9.155 -	if (lock->magic != SPINLOCK_MAGIC) {
   9.156 -printk("eip: %p\n", &&here);
   9.157 -		BUG();
   9.158 -	}
   9.159 -#endif
   9.160 -	__asm__ __volatile__(
   9.161 -		spin_lock_string
   9.162 -		:"=m" (lock->lock) : : "memory");
   9.163 -}
   9.164 -
   9.165 -
   9.166  /*
   9.167 - * Read-write spinlocks, allowing multiple readers
   9.168 - * but only one writer.
   9.169 - *
   9.170 - * NOTE! it is quite common to have readers in interrupts
   9.171 - * but no interrupt writers. For those circumstances we
   9.172 - * can "mix" irq-safe locks - any writer needs to get a
   9.173 - * irq-safe write-lock, but readers can get non-irqsafe
   9.174 - * read-locks.
   9.175 + * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
   9.176 + * reentered recursively on the same CPU. All critical regions that may form
   9.177 + * part of a recursively-nested set must be protected by these forms. If there
   9.178 + * are any critical regions that cannot form part of such a set, they can use
   9.179 + * standard spin_[un]lock().
   9.180   */
   9.181 +#define spin_lock_recursive(_lock)                 \
   9.182 +    do {                                           \
   9.183 +        int cpu = smp_processor_id();              \
   9.184 +        if ( likely((_lock)->recurse_cpu != cpu) ) \
   9.185 +        {                                          \
   9.186 +            spin_lock(_lock);                      \
   9.187 +            (_lock)->recurse_cpu = cpu;            \
   9.188 +        }                                          \
   9.189 +        (_lock)->recurse_cnt++;                    \
   9.190 +    } while ( 0 )
   9.191 +
   9.192 +#define spin_unlock_recursive(_lock)               \
   9.193 +    do {                                           \
   9.194 +        if ( likely(--(_lock)->recurse_cnt == 0) ) \
   9.195 +        {                                          \
   9.196 +            (_lock)->recurse_cpu = -1;             \
   9.197 +            spin_unlock(_lock);                    \
   9.198 +        }                                          \
   9.199 +    } while ( 0 )
   9.200 +
   9.201 +
   9.202  typedef struct {
   9.203 -	volatile unsigned int lock;
   9.204 -#if SPINLOCK_DEBUG
   9.205 -	unsigned magic;
   9.206 -#endif
   9.207 +    volatile unsigned int lock;
   9.208  } rwlock_t;
   9.209  
   9.210 -#define RWLOCK_MAGIC	0xdeaf1eed
   9.211 -
   9.212 -#if SPINLOCK_DEBUG
   9.213 -#define RWLOCK_MAGIC_INIT	, RWLOCK_MAGIC
   9.214 -#else
   9.215 -#define RWLOCK_MAGIC_INIT	/* */
   9.216 -#endif
   9.217 -
   9.218 -#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
   9.219 +#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS }
   9.220  
   9.221  #define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
   9.222  
   9.223  /*
   9.224   * On x86, we implement read-write locks as a 32-bit counter
   9.225   * with the high bit (sign) being the "contended" bit.
   9.226 - *
   9.227 - * The inline assembly is non-obvious. Think about it.
   9.228 - *
   9.229 - * Changed to use the same technique as rw semaphores.  See
   9.230 - * semaphore.h for details.  -ben
   9.231   */
   9.232 -/* the spinlock helpers are in arch/x86/kernel/semaphore.c */
   9.233 -
   9.234  static inline void read_lock(rwlock_t *rw)
   9.235  {
   9.236 -#if SPINLOCK_DEBUG
   9.237 -	if (rw->magic != RWLOCK_MAGIC)
   9.238 -		BUG();
   9.239 -#endif
   9.240 -	__build_read_lock(rw, "__read_lock_failed");
   9.241 +    __build_read_lock(rw, "__read_lock_failed");
   9.242  }
   9.243  
   9.244  static inline void write_lock(rwlock_t *rw)
   9.245  {
   9.246 -#if SPINLOCK_DEBUG
   9.247 -	if (rw->magic != RWLOCK_MAGIC)
   9.248 -		BUG();
   9.249 -#endif
   9.250 -	__build_write_lock(rw, "__write_lock_failed");
   9.251 +    __build_write_lock(rw, "__write_lock_failed");
   9.252  }
   9.253  
   9.254 -#define read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
   9.255 -#define write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
   9.256 +#define read_unlock(rw)                            \
   9.257 +    __asm__ __volatile__ (                         \
   9.258 +        "lock ; incl %0" :                         \
   9.259 +        "=m" ((rw)->lock) : : "memory" )
   9.260 +#define write_unlock(rw)                           \
   9.261 +    __asm__ __volatile__ (                         \
   9.262 +        "lock ; addl $" RW_LOCK_BIAS_STR ",%0" :   \
   9.263 +        "=m" ((rw)->lock) : : "memory" )
   9.264  
   9.265  static inline int write_trylock(rwlock_t *lock)
   9.266  {
   9.267 -	atomic_t *count = (atomic_t *)lock;
   9.268 -	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
   9.269 -		return 1;
   9.270 -	atomic_add(RW_LOCK_BIAS, count);
   9.271 -	return 0;
   9.272 +    atomic_t *count = (atomic_t *)lock;
   9.273 +    if ( atomic_sub_and_test(RW_LOCK_BIAS, count) )
   9.274 +        return 1;
   9.275 +    atomic_add(RW_LOCK_BIAS, count);
   9.276 +    return 0;
   9.277  }
   9.278  
   9.279  #endif /* __ASM_SPINLOCK_H */
    10.1 --- a/xen/include/xen/mm.h	Tue Jun 22 13:52:03 2004 +0000
    10.2 +++ b/xen/include/xen/mm.h	Tue Jun 22 16:17:52 2004 +0000
    10.3 @@ -54,12 +54,10 @@ struct pfn_info
    10.4      struct list_head list;
    10.5      /* The following possible uses are context-dependent. */
    10.6      union {
    10.7 -        /* Page is in use and not a zombie: we keep a pointer to its owner. */
    10.8 +        /* Page is in use: we keep a pointer to its owner. */
    10.9          struct domain *domain;
   10.10          /* Page is not currently allocated: mask of possibly-tainted TLBs. */
   10.11          unsigned long cpu_mask;
   10.12 -        /* Page is a zombie: this word currently has no use. */
   10.13 -        unsigned long _unused;
   10.14      } u;
   10.15      /* Reference count and various PGC_xxx flags and fields. */
   10.16      u32 count_and_flags;
   10.17 @@ -85,20 +83,17 @@ struct pfn_info
   10.18   /* 28-bit count of uses of this frame as its current type. */
   10.19  #define PGT_count_mask      ((1<<28)-1)
   10.20  
   10.21 - /* The owner of this page is dead: 'u.domain' is no longer valid. */
   10.22 -#define _PGC_zombie                   31
   10.23 -#define PGC_zombie                    (1<<_PGC_zombie)
   10.24   /* For safety, force a TLB flush when this page's type changes. */
   10.25 -#define _PGC_tlb_flush_on_type_change 30
   10.26 +#define _PGC_tlb_flush_on_type_change 31
   10.27  #define PGC_tlb_flush_on_type_change  (1<<_PGC_tlb_flush_on_type_change)
   10.28   /* Owning guest has pinned this page to its current type? */
   10.29 -#define _PGC_guest_pinned             29
   10.30 +#define _PGC_guest_pinned             30
   10.31  #define PGC_guest_pinned              (1<<_PGC_guest_pinned)
   10.32   /* Cleared when the owning guest 'frees' this page. */
   10.33 -#define _PGC_allocated                28
   10.34 +#define _PGC_allocated                29
   10.35  #define PGC_allocated                 (1<<_PGC_allocated)
   10.36   /* 28-bit count of references to this frame. */
   10.37 -#define PGC_count_mask                ((1<<28)-1)
   10.38 +#define PGC_count_mask                ((1<<29)-1)
   10.39  
   10.40  
   10.41  /* We trust the slab allocator in slab.c, and our use of it. */
   10.42 @@ -160,12 +155,11 @@ static inline int get_page(struct pfn_in
   10.43          p  = np;
   10.44          if ( unlikely((x & PGC_count_mask) == 0) ||  /* Not allocated? */
   10.45               unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
   10.46 -             unlikely(x & PGC_zombie) ||             /* Zombie? */
   10.47               unlikely(p != domain) )                 /* Wrong owner? */
   10.48          {
   10.49              DPRINTK("Error pfn %08lx: ed=%p(%u), sd=%p(%u),"
   10.50                      " caf=%08x, taf=%08x\n",
   10.51 -                    page_to_pfn(page), domain, (domain)?domain->domain:999, 
   10.52 +                    page_to_pfn(page), domain, domain->domain,
   10.53                      p, (p && !((x & PGC_count_mask) == 0))?p->domain:999, 
   10.54                      x, page->type_and_flags);
   10.55              return 0;
   10.56 @@ -173,7 +167,7 @@ static inline int get_page(struct pfn_in
   10.57          __asm__ __volatile__(
   10.58              LOCK_PREFIX "cmpxchg8b %3"
   10.59              : "=a" (np), "=d" (y), "=b" (p),
   10.60 -              "=m" (*(volatile unsigned long long *)(&page->u.domain))
   10.61 +              "=m" (*(volatile u64 *)(&page->u.domain))
   10.62              : "0" (p), "1" (x), "b" (p), "c" (nx) );
   10.63      }
   10.64      while ( unlikely(np != p) || unlikely(y != x) );
    11.1 --- a/xen/include/xen/sched.h	Tue Jun 22 13:52:03 2004 +0000
    11.2 +++ b/xen/include/xen/sched.h	Tue Jun 22 16:17:52 2004 +0000
    11.3 @@ -194,8 +194,8 @@ extern void domain_destruct(struct domai
    11.4  extern void domain_kill(struct domain *d);
    11.5  extern void domain_crash(void);
    11.6  extern void domain_shutdown(u8 reason);
    11.7 +extern void domain_relinquish_memory(struct domain *d);
    11.8  
    11.9 -/* arch/process.c */
   11.10  void new_thread(struct domain *d,
   11.11                  unsigned long start_pc,
   11.12                  unsigned long start_stack,
   11.13 @@ -206,9 +206,6 @@ extern unsigned long wait_init_idle;
   11.14  
   11.15  extern spinlock_t schedule_lock[NR_CPUS] __cacheline_aligned;
   11.16  
   11.17 -/*
   11.18 - * Scheduler functions (in schedule.c)
   11.19 - */
   11.20  #define set_current_state(_s) do { current->state = (_s); } while (0)
   11.21  void scheduler_init(void);
   11.22  void schedulers_start(void);
   11.23 @@ -226,7 +223,6 @@ void __enter_scheduler(void);
   11.24  extern void switch_to(struct domain *prev, 
   11.25                        struct domain *next);
   11.26  
   11.27 -
   11.28  void domain_init(void);
   11.29  
   11.30  int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
   11.31 @@ -251,7 +247,7 @@ extern struct domain *task_list;
   11.32  #define DF_CONSTRUCTED  3 /* Has the guest OS been fully built yet?         */
   11.33  #define DF_IDLETASK     4 /* Is this one of the per-CPU idle domains?       */
   11.34  #define DF_PRIVILEGED   5 /* Is this domain privileged?                     */
   11.35 -#define DF_CONSOLEWRITEBUG 6 /* Has this domain used the obsolete console?  */
   11.36 +#define DF_CONWRITEBUG  6 /* Has this domain used the obsolete console?     */
   11.37  #define DF_PHYSDEV      7 /* May this domain do IO to physical devices?     */
   11.38  #define DF_BLOCKED      8 /* Domain is blocked waiting for an event.        */
   11.39  #define DF_CTRLPAUSE    9 /* Domain is paused by controller software.       */
    12.1 --- a/xen/include/xen/spinlock.h	Tue Jun 22 13:52:03 2004 +0000
    12.2 +++ b/xen/include/xen/spinlock.h	Tue Jun 22 16:17:52 2004 +0000
    12.3 @@ -1,132 +1,74 @@
    12.4 -#ifndef __LINUX_SPINLOCK_H
    12.5 -#define __LINUX_SPINLOCK_H
    12.6 +#ifndef __SPINLOCK_H__
    12.7 +#define __SPINLOCK_H__
    12.8  
    12.9  #include <xen/config.h>
   12.10  #include <asm/system.h>
   12.11  
   12.12 -/*
   12.13 - * These are the generic versions of the spinlocks and read-write
   12.14 - * locks..
   12.15 - */
   12.16 -#define spin_lock_irqsave(lock, flags)		do { local_irq_save(flags);       spin_lock(lock); } while (0)
   12.17 -#define spin_lock_irq(lock)			do { local_irq_disable();         spin_lock(lock); } while (0)
   12.18 +#define spin_lock_irqsave(lock, flags) \
   12.19 +    do { local_irq_save(flags); spin_lock(lock); } while ( 0 )
   12.20 +#define spin_lock_irq(lock) \
   12.21 +    do { local_irq_disable(); spin_lock(lock); } while ( 0 )
   12.22  
   12.23 -#define read_lock_irqsave(lock, flags)		do { local_irq_save(flags);       read_lock(lock); } while (0)
   12.24 -#define read_lock_irq(lock)			do { local_irq_disable();         read_lock(lock); } while (0)
   12.25 +#define read_lock_irqsave(lock, flags) \
   12.26 +    do { local_irq_save(flags); read_lock(lock); } while ( 0 )
   12.27 +#define read_lock_irq(lock) \
   12.28 +    do { local_irq_disable(); read_lock(lock); } while ( 0 )
   12.29 +
   12.30 +#define write_lock_irqsave(lock, flags) \
   12.31 +    do { local_irq_save(flags); write_lock(lock); } while ( 0 )
   12.32 +#define write_lock_irq(lock) \
   12.33 +    do { local_irq_disable(); write_lock(lock); } while ( 0 )
   12.34  
   12.35 -#define write_lock_irqsave(lock, flags)		do { local_irq_save(flags);      write_lock(lock); } while (0)
   12.36 -#define write_lock_irq(lock)			do { local_irq_disable();        write_lock(lock); } while (0)
   12.37 -
   12.38 -#define spin_unlock_irqrestore(lock, flags)	do { spin_unlock(lock);  local_irq_restore(flags); } while (0)
   12.39 -#define spin_unlock_irq(lock)			do { spin_unlock(lock);  local_irq_enable();       } while (0)
   12.40 +#define spin_unlock_irqrestore(lock, flags) \
   12.41 +    do { spin_unlock(lock); local_irq_restore(flags); } while ( 0 )
   12.42 +#define spin_unlock_irq(lock) \
   12.43 +    do { spin_unlock(lock); local_irq_enable(); } while ( 0 )
   12.44  
   12.45 -#define read_unlock_irqrestore(lock, flags)	do { read_unlock(lock);  local_irq_restore(flags); } while (0)
   12.46 -#define read_unlock_irq(lock)			do { read_unlock(lock);  local_irq_enable();       } while (0)
   12.47 +#define read_unlock_irqrestore(lock, flags) \
   12.48 +    do { read_unlock(lock); local_irq_restore(flags); } while ( 0 )
   12.49 +#define read_unlock_irq(lock) \
   12.50 +    do { read_unlock(lock); local_irq_enable(); } while ( 0 )
   12.51  
   12.52 -#define write_unlock_irqrestore(lock, flags)	do { write_unlock(lock); local_irq_restore(flags); } while (0)
   12.53 -#define write_unlock_irq(lock)			do { write_unlock(lock); local_irq_enable();       } while (0)
   12.54 +#define write_unlock_irqrestore(lock, flags) \
   12.55 +    do { write_unlock(lock); local_irq_restore(flags); } while ( 0 )
   12.56 +#define write_unlock_irq(lock) \
   12.57 +    do { write_unlock(lock); local_irq_enable(); } while ( 0 )
   12.58  
   12.59  #ifdef CONFIG_SMP
   12.60 +
   12.61  #include <asm/spinlock.h>
   12.62  
   12.63 -#elif !defined(spin_lock_init) /* !SMP and spin_lock_init not previously
   12.64 -                                  defined (e.g. by including asm/spinlock.h */
   12.65 -
   12.66 -#define DEBUG_SPINLOCKS	0	/* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
   12.67 -
   12.68 -#if (DEBUG_SPINLOCKS < 1)
   12.69 -
   12.70 -#define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
   12.71 -#define ATOMIC_DEC_AND_LOCK
   12.72 +#else
   12.73  
   12.74 -/*
   12.75 - * Your basic spinlocks, allowing only a single CPU anywhere
   12.76 - *
   12.77 - * Most gcc versions have a nasty bug with empty initializers.
   12.78 - */
   12.79  #if (__GNUC__ > 2)
   12.80 -  typedef struct { } spinlock_t;
   12.81 -  #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
   12.82 +typedef struct { } spinlock_t;
   12.83 +#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
   12.84  #else
   12.85 -  typedef struct { int gcc_is_buggy; } spinlock_t;
   12.86 -  #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
   12.87 +typedef struct { int gcc_is_buggy; } spinlock_t;
   12.88 +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
   12.89  #endif
   12.90  
   12.91 -#define spin_lock_init(lock)	do { } while(0)
   12.92 -#define spin_lock(lock)		(void)(lock) /* Not "unused variable". */
   12.93 -#define spin_is_locked(lock)	(0)
   12.94 -#define spin_trylock(lock)	({1; })
   12.95 -#define spin_unlock_wait(lock)	do { } while(0)
   12.96 -#define spin_unlock(lock)	do { } while(0)
   12.97 -
   12.98 -#elif (DEBUG_SPINLOCKS < 2)
   12.99 -
  12.100 -typedef struct {
  12.101 -	volatile unsigned long lock;
  12.102 -} spinlock_t;
  12.103 -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
  12.104 -
  12.105 -#define spin_lock_init(x)	do { (x)->lock = 0; } while (0)
  12.106 -#define spin_is_locked(lock)	(test_bit(0,(lock)))
  12.107 -#define spin_trylock(lock)	(!test_and_set_bit(0,(lock)))
  12.108 -
  12.109 -#define spin_lock(x)		do { (x)->lock = 1; } while (0)
  12.110 -#define spin_unlock_wait(x)	do { } while (0)
  12.111 -#define spin_unlock(x)		do { (x)->lock = 0; } while (0)
  12.112 -
  12.113 -#else /* (DEBUG_SPINLOCKS >= 2) */
  12.114 -
  12.115 -typedef struct {
  12.116 -	volatile unsigned long lock;
  12.117 -	volatile unsigned int babble;
  12.118 -	const char *module;
  12.119 -} spinlock_t;
  12.120 -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 25, __BASE_FILE__ }
  12.121 +#define spin_lock_init(lock)    do { } while(0)
  12.122 +#define spin_lock(lock)         (void)(lock) /* Not "unused variable". */
  12.123 +#define spin_is_locked(lock)    (0)
  12.124 +#define spin_trylock(lock)      ({1; })
  12.125 +#define spin_unlock_wait(lock)  do { } while(0)
  12.126 +#define spin_unlock(lock)       do { } while(0)
  12.127  
  12.128 -/*#include <xen/kernel.h>*/
  12.129 -
  12.130 -#define spin_lock_init(x)	do { (x)->lock = 0; } while (0)
  12.131 -#define spin_is_locked(lock)	(test_bit(0,(lock)))
  12.132 -#define spin_trylock(lock)	(!test_and_set_bit(0,(lock)))
  12.133 -
  12.134 -#define spin_lock(x)		do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
  12.135 -#define spin_unlock_wait(x)	do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
  12.136 -#define spin_unlock(x)		do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
  12.137 -
  12.138 -#endif	/* DEBUG_SPINLOCKS */
  12.139 -
  12.140 -/*
  12.141 - * Read-write spinlocks, allowing multiple readers
  12.142 - * but only one writer.
  12.143 - *
  12.144 - * NOTE! it is quite common to have readers in interrupts
  12.145 - * but no interrupt writers. For those circumstances we
  12.146 - * can "mix" irq-safe locks - any writer needs to get a
  12.147 - * irq-safe write-lock, but readers can get non-irqsafe
  12.148 - * read-locks.
  12.149 - *
  12.150 - * Most gcc versions have a nasty bug with empty initializers.
  12.151 - */
  12.152  #if (__GNUC__ > 2)
  12.153 -  typedef struct { } rwlock_t;
  12.154 -  #define RW_LOCK_UNLOCKED (rwlock_t) { }
  12.155 +typedef struct { } rwlock_t;
  12.156 +#define RW_LOCK_UNLOCKED (rwlock_t) { }
  12.157  #else
  12.158 -  typedef struct { int gcc_is_buggy; } rwlock_t;
  12.159 -  #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
  12.160 +typedef struct { int gcc_is_buggy; } rwlock_t;
  12.161 +#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
  12.162  #endif
  12.163  
  12.164 -#define rwlock_init(lock)	do { } while(0)
  12.165 -#define read_lock(lock)		(void)(lock) /* Not "unused variable". */
  12.166 -#define read_unlock(lock)	do { } while(0)
  12.167 -#define write_lock(lock)	(void)(lock) /* Not "unused variable". */
  12.168 -#define write_unlock(lock)	do { } while(0)
  12.169 +#define rwlock_init(lock)       do { } while(0)
  12.170 +#define read_lock(lock)         (void)(lock) /* Not "unused variable". */
  12.171 +#define read_unlock(lock)       do { } while(0)
  12.172 +#define write_lock(lock)        (void)(lock) /* Not "unused variable". */
  12.173 +#define write_unlock(lock)      do { } while(0)
  12.174  
  12.175 -#endif /* !SMP */
  12.176 -
  12.177 -/* "lock on reference count zero" */
  12.178 -#ifndef ATOMIC_DEC_AND_LOCK
  12.179 -#include <asm/atomic.h>
  12.180 -extern int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  12.181  #endif
  12.182  
  12.183 -#endif /* __LINUX_SPINLOCK_H */
  12.184 +#endif /* __SPINLOCK_H__ */