ia64/xen-unstable

changeset 1554:da46c0d041f1

bitkeeper revision 1.1007 (40d9a9b67QWaIdCVPMQU8ujlBGA1nQ)

Remove GFP_* flags from memory allocators.
Something similar may get added back to the buddy allocator if it
gets used for allocating domain memory on some architectures.
author kaf24@scramble.cl.cam.ac.uk
date Wed Jun 23 16:03:02 2004 +0000 (2004-06-23)
parents d1bba652eb21
children 3ec9f0898ed8
files xen/arch/x86/acpi.c xen/arch/x86/apic.c xen/arch/x86/io_apic.c xen/arch/x86/irq.c xen/arch/x86/mm.c xen/arch/x86/mpparse.c xen/arch/x86/pci-pc.c xen/arch/x86/pdb-stub.c xen/arch/x86/smpboot.c xen/common/ac_timer.c xen/common/dom0_ops.c xen/common/domain.c xen/common/event_channel.c xen/common/page_alloc.c xen/common/physdev.c xen/common/resource.c xen/common/sched_atropos.c xen/common/sched_bvt.c xen/common/schedule.c xen/common/shadow.c xen/common/slab.c xen/common/trace.c xen/drivers/char/console.c xen/drivers/pci/pci.c xen/drivers/pci/setup-res.c xen/include/asm-x86/page.h xen/include/asm-x86/x86_64/page.h xen/include/xen/mm.h xen/include/xen/shadow.h xen/include/xen/slab.h
line diff
     1.1 --- a/xen/arch/x86/acpi.c	Wed Jun 23 16:02:02 2004 +0000
     1.2 +++ b/xen/arch/x86/acpi.c	Wed Jun 23 16:03:02 2004 +0000
     1.3 @@ -578,7 +578,7 @@ static void acpi_create_identity_pmd (vo
     1.4  	pgd_t *pgd;
     1.5  	int i;
     1.6  
     1.7 -	ptep = (pte_t*)__get_free_page(GFP_KERNEL);
     1.8 +	ptep = (pte_t*)__get_free_page();
     1.9  
    1.10  	/* fill page with low mapping */
    1.11  	for (i = 0; i < PTRS_PER_PTE; i++)
     2.1 --- a/xen/arch/x86/apic.c	Wed Jun 23 16:02:02 2004 +0000
     2.2 +++ b/xen/arch/x86/apic.c	Wed Jun 23 16:03:02 2004 +0000
     2.3 @@ -445,7 +445,7 @@ void __init init_apic_mappings(void)
     2.4       * simulate the local APIC and another one for the IO-APIC.
     2.5       */
     2.6      if (!smp_found_config && detect_init_APIC()) {
     2.7 -        apic_phys = get_free_page(GFP_KERNEL);
     2.8 +        apic_phys = get_free_page();
     2.9          apic_phys = __pa(apic_phys);
    2.10      } else
    2.11          apic_phys = mp_lapic_addr;
     3.1 --- a/xen/arch/x86/io_apic.c	Wed Jun 23 16:02:02 2004 +0000
     3.2 +++ b/xen/arch/x86/io_apic.c	Wed Jun 23 16:03:02 2004 +0000
     3.3 @@ -688,7 +688,7 @@ static struct hw_interrupt_type ioapic_l
     3.4  void __init setup_IO_APIC_irqs(void)
     3.5  {
     3.6  	struct IO_APIC_route_entry entry;
     3.7 -	int apic, pin, idx, irq, first_notcon = 1, vector;
     3.8 +	int apic, pin, idx, irq, vector;
     3.9  	unsigned long flags;
    3.10  
    3.11  	printk(KERN_DEBUG "init IO_APIC IRQs\n");
    3.12 @@ -707,14 +707,8 @@ void __init setup_IO_APIC_irqs(void)
    3.13  		entry.dest.logical.logical_dest = target_cpus();
    3.14  
    3.15  		idx = find_irq_entry(apic,pin,mp_INT);
    3.16 -		if (idx == -1) {
    3.17 -			if (first_notcon) {
    3.18 -				printk(KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
    3.19 -				first_notcon = 0;
    3.20 -			} else
    3.21 -				printk(", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
    3.22 +		if (idx == -1)
    3.23  			continue;
    3.24 -		}
    3.25  
    3.26  		entry.trigger = irq_trigger(idx);
    3.27  		entry.polarity = irq_polarity(idx);
    3.28 @@ -758,9 +752,6 @@ void __init setup_IO_APIC_irqs(void)
    3.29  		spin_unlock_irqrestore(&ioapic_lock, flags);
    3.30  	}
    3.31  	}
    3.32 -
    3.33 -	if (!first_notcon)
    3.34 -		printk(" not connected.\n");
    3.35  }
    3.36  
    3.37  /*
     4.1 --- a/xen/arch/x86/irq.c	Wed Jun 23 16:02:02 2004 +0000
     4.2 +++ b/xen/arch/x86/irq.c	Wed Jun 23 16:03:02 2004 +0000
     4.3 @@ -254,7 +254,7 @@ int pirq_guest_bind(struct domain *p, in
     4.4              goto out;
     4.5          }
     4.6  
     4.7 -        action = kmalloc(sizeof(irq_guest_action_t), GFP_KERNEL);
     4.8 +        action = kmalloc(sizeof(irq_guest_action_t));
     4.9          if ( (desc->action = (struct irqaction *)action) == NULL )
    4.10          {
    4.11              DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
     5.1 --- a/xen/arch/x86/mm.c	Wed Jun 23 16:02:02 2004 +0000
     5.2 +++ b/xen/arch/x86/mm.c	Wed Jun 23 16:03:02 2004 +0000
     5.3 @@ -70,7 +70,7 @@ static void __init fixrange_init(unsigne
     5.4      {
     5.5          if ( !l2_pgentry_empty(*l2e) )
     5.6              continue;
     5.7 -        page = (unsigned long)get_free_page(GFP_KERNEL);
     5.8 +        page = (unsigned long)get_free_page();
     5.9          clear_page(page);
    5.10          *l2e = mk_l2_pgentry(__pa(page) | __PAGE_HYPERVISOR);
    5.11          vaddr += 1 << L2_PAGETABLE_SHIFT;
    5.12 @@ -97,7 +97,7 @@ void __init paging_init(void)
    5.13      fixrange_init(addr, 0, idle_pg_table);
    5.14  
    5.15      /* Create page table for ioremap(). */
    5.16 -    ioremap_pt = (void *)get_free_page(GFP_KERNEL);
    5.17 +    ioremap_pt = (void *)get_free_page();
    5.18      clear_page(ioremap_pt);
    5.19      idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] = 
    5.20          mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
    5.21 @@ -109,7 +109,7 @@ void __init paging_init(void)
    5.22                     (RO_MPT_VIRT_START >> L2_PAGETABLE_SHIFT));
    5.23  
    5.24      /* Set up mapping cache for domain pages. */
    5.25 -    mapcache = (unsigned long *)get_free_page(GFP_KERNEL);
    5.26 +    mapcache = (unsigned long *)get_free_page();
    5.27      clear_page(mapcache);
    5.28      idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
    5.29          mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
     6.1 --- a/xen/arch/x86/mpparse.c	Wed Jun 23 16:02:02 2004 +0000
     6.2 +++ b/xen/arch/x86/mpparse.c	Wed Jun 23 16:03:02 2004 +0000
     6.3 @@ -509,7 +509,7 @@ static int __init smp_read_mpc(struct mp
     6.4  	
     6.5  	count = (max_mp_busses * sizeof(int)) * 4;
     6.6  	count += (max_irq_sources * sizeof(struct mpc_config_intsrc));
     6.7 -	bus_data = (void *)__get_free_pages(GFP_KERNEL, get_order(count));
     6.8 +	bus_data = (void *)__get_free_pages(get_order(count));
     6.9  	if (!bus_data) {
    6.10  		printk(KERN_ERR "SMP mptable: out of memory!\n");
    6.11  		return 0;
    6.12 @@ -694,7 +694,7 @@ static inline void __init construct_defa
    6.13  		struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
    6.14  	} *bus_data;
    6.15  
    6.16 -	bus_data = (void *)__get_free_pages(GFP_KERNEL, get_order(sizeof(*bus_data)));
    6.17 +	bus_data = (void *)__get_free_pages(get_order(sizeof(*bus_data)));
    6.18  	if (!bus_data)
    6.19  		panic("SMP mptable: out of memory!\n");
    6.20  	mp_bus_id_to_type = bus_data->mp_bus_id_to_type;
    6.21 @@ -1171,7 +1171,7 @@ void __init mp_config_acpi_legacy_irqs (
    6.22  
    6.23  	count = (MAX_MP_BUSSES * sizeof(int)) * 4;
    6.24  	count += (MAX_IRQ_SOURCES * sizeof(int)) * 4;
    6.25 -	bus_data = (void *)__get_free_pages(GFP_KERNEL, get_order(count));
    6.26 +	bus_data = (void *)__get_free_pages(get_order(count));
    6.27  	if (!bus_data) {
    6.28  		panic("Fatal: can't allocate bus memory for ACPI legacy IRQ!");
    6.29  	}
     7.1 --- a/xen/arch/x86/pci-pc.c	Wed Jun 23 16:02:02 2004 +0000
     7.2 +++ b/xen/arch/x86/pci-pc.c	Wed Jun 23 16:03:02 2004 +0000
     7.3 @@ -1003,7 +1003,7 @@ struct irq_routing_table * __devinit pci
     7.4  
     7.5  	if (!pci_bios_present)
     7.6  		return NULL;
     7.7 -	page = __get_free_page(GFP_KERNEL);
     7.8 +	page = __get_free_page();
     7.9  	if (!page)
    7.10  		return NULL;
    7.11  	opt.table = (struct irq_info *) page;
    7.12 @@ -1030,7 +1030,7 @@ struct irq_routing_table * __devinit pci
    7.13  	if (ret & 0xff00)
    7.14  		printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
    7.15  	else if (opt.size) {
    7.16 -		rt = kmalloc(sizeof(struct irq_routing_table) + opt.size, GFP_KERNEL);
    7.17 +		rt = kmalloc(sizeof(struct irq_routing_table) + opt.size);
    7.18  		if (rt) {
    7.19  			memset(rt, 0, sizeof(struct irq_routing_table));
    7.20  			rt->size = opt.size + sizeof(struct irq_routing_table);
     8.1 --- a/xen/arch/x86/pdb-stub.c	Wed Jun 23 16:02:02 2004 +0000
     8.2 +++ b/xen/arch/x86/pdb-stub.c	Wed Jun 23 16:03:02 2004 +0000
     8.3 @@ -836,7 +836,7 @@ struct pdb_breakpoint breakpoints;
     8.4  
     8.5  void pdb_bkpt_add (unsigned long cr3, unsigned long address)
     8.6  {
     8.7 -    struct pdb_breakpoint *bkpt = kmalloc(sizeof(*bkpt), GFP_KERNEL);
     8.8 +    struct pdb_breakpoint *bkpt = kmalloc(sizeof(*bkpt));
     8.9      bkpt->cr3 = cr3;
    8.10      bkpt->address = address;
    8.11      list_add(&bkpt->list, &breakpoints.list);
     9.1 --- a/xen/arch/x86/smpboot.c	Wed Jun 23 16:02:02 2004 +0000
     9.2 +++ b/xen/arch/x86/smpboot.c	Wed Jun 23 16:03:02 2004 +0000
     9.3 @@ -406,7 +406,7 @@ void __init start_secondary(void)
     9.4       * At this point, boot CPU has fully initialised the IDT. It is
     9.5       * now safe to make ourselves a private copy.
     9.6       */
     9.7 -    idt_tables[cpu] = kmalloc(IDT_ENTRIES*8, GFP_KERNEL);
     9.8 +    idt_tables[cpu] = kmalloc(IDT_ENTRIES*8);
     9.9      memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*8);
    9.10      *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*8)-1;
    9.11      *(unsigned long  *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
    9.12 @@ -669,7 +669,7 @@ static void __init do_boot_cpu (int apic
    9.13      /* So we see what's up. */
    9.14      printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
    9.15  
    9.16 -    stack = __pa(__get_free_pages(GFP_KERNEL, 1));
    9.17 +    stack = __pa(__get_free_pages(1));
    9.18      stack_start.esp = stack + STACK_SIZE - STACK_RESERVED;
    9.19  
    9.20      /* Debug build: detect stack overflow by setting up a guard page. */
    10.1 --- a/xen/common/ac_timer.c	Wed Jun 23 16:02:02 2004 +0000
    10.2 +++ b/xen/common/ac_timer.c	Wed Jun 23 16:03:02 2004 +0000
    10.3 @@ -130,10 +130,9 @@ static int add_entry(struct ac_timer **h
    10.4      if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
    10.5      {
    10.6          int i, limit = (GET_HEAP_LIMIT(heap)+1) << 1;
    10.7 -        struct ac_timer **new_heap = kmalloc(
    10.8 -            limit * sizeof(struct ac_timer *), GFP_KERNEL);
    10.9 +        struct ac_timer **new_heap = kmalloc(limit*sizeof(struct ac_timer *));
   10.10          if ( new_heap == NULL ) BUG();
   10.11 -        memcpy(new_heap, heap, (limit>>1) * sizeof(struct ac_timer *));
   10.12 +        memcpy(new_heap, heap, (limit>>1)*sizeof(struct ac_timer *));
   10.13          for ( i = 0; i < smp_num_cpus; i++ )
   10.14              if ( ac_timers[i].heap == heap )
   10.15                  ac_timers[i].heap = new_heap;
   10.16 @@ -280,7 +279,7 @@ void __init ac_timer_init(void)
   10.17      for ( i = 0; i < smp_num_cpus; i++ )
   10.18      {
   10.19          ac_timers[i].heap = kmalloc(
   10.20 -            (DEFAULT_HEAP_LIMIT+1) * sizeof(struct ac_timer *), GFP_KERNEL);
   10.21 +            (DEFAULT_HEAP_LIMIT+1) * sizeof(struct ac_timer *));
   10.22          if ( ac_timers[i].heap == NULL ) BUG();
   10.23          SET_HEAP_SIZE(ac_timers[i].heap, 0);
   10.24          SET_HEAP_LIMIT(ac_timers[i].heap, DEFAULT_HEAP_LIMIT);
    11.1 --- a/xen/common/dom0_ops.c	Wed Jun 23 16:02:02 2004 +0000
    11.2 +++ b/xen/common/dom0_ops.c	Wed Jun 23 16:03:02 2004 +0000
    11.3 @@ -318,7 +318,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    11.4  
    11.5          if ( op->u.getdomaininfo.ctxt != NULL )
    11.6          {
    11.7 -            if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
    11.8 +            if ( (c = kmalloc(sizeof(*c))) == NULL )
    11.9              {
   11.10                  ret = -ENOMEM;
   11.11                  put_domain(d);
    12.1 --- a/xen/common/domain.c	Wed Jun 23 16:02:02 2004 +0000
    12.2 +++ b/xen/common/domain.c	Wed Jun 23 16:03:02 2004 +0000
    12.3 @@ -84,13 +84,13 @@ struct domain *do_createdomain(domid_t d
    12.4          INIT_LIST_HEAD(&p->page_list);
    12.5          p->max_pages = p->tot_pages = 0;
    12.6  
    12.7 -        p->shared_info = (void *)get_free_page(GFP_KERNEL);
    12.8 +        p->shared_info = (void *)get_free_page();
    12.9          memset(p->shared_info, 0, PAGE_SIZE);
   12.10          SHARE_PFN_WITH_DOMAIN(virt_to_page(p->shared_info), p);
   12.11          machine_to_phys_mapping[virt_to_phys(p->shared_info) >> 
   12.12                                 PAGE_SHIFT] = 0x80000000UL;  /* debug */
   12.13  
   12.14 -        p->mm.perdomain_pt = (l1_pgentry_t *)get_free_page(GFP_KERNEL);
   12.15 +        p->mm.perdomain_pt = (l1_pgentry_t *)get_free_page();
   12.16          memset(p->mm.perdomain_pt, 0, PAGE_SIZE);
   12.17          machine_to_phys_mapping[virt_to_phys(p->mm.perdomain_pt) >> 
   12.18                                 PAGE_SHIFT] = 0x0fffdeadUL;  /* debug */
   12.19 @@ -474,7 +474,7 @@ int final_setup_guestos(struct domain *p
   12.20      int i, rc = 0;
   12.21      full_execution_context_t *c;
   12.22  
   12.23 -    if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
   12.24 +    if ( (c = kmalloc(sizeof(*c))) == NULL )
   12.25          return -ENOMEM;
   12.26  
   12.27      if ( test_bit(DF_CONSTRUCTED, &p->flags) )
    13.1 --- a/xen/common/event_channel.c	Wed Jun 23 16:02:02 2004 +0000
    13.2 +++ b/xen/common/event_channel.c	Wed Jun 23 16:03:02 2004 +0000
    13.3 @@ -48,7 +48,7 @@ static int get_free_port(struct domain *
    13.4          
    13.5          max *= 2;
    13.6          
    13.7 -        chn = kmalloc(max * sizeof(event_channel_t), GFP_KERNEL);
    13.8 +        chn = kmalloc(max * sizeof(event_channel_t));
    13.9          if ( unlikely(chn == NULL) )
   13.10              return -ENOMEM;
   13.11  
   13.12 @@ -483,8 +483,7 @@ long do_event_channel_op(evtchn_op_t *uo
   13.13  int init_event_channels(struct domain *d)
   13.14  {
   13.15      spin_lock_init(&d->event_channel_lock);
   13.16 -    d->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t), 
   13.17 -                               GFP_KERNEL);
   13.18 +    d->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t));
   13.19      if ( unlikely(d->event_channel == NULL) )
   13.20          return -ENOMEM;
   13.21      d->max_event_channel = INIT_EVENT_CHANNELS;
    14.1 --- a/xen/common/page_alloc.c	Wed Jun 23 16:02:02 2004 +0000
    14.2 +++ b/xen/common/page_alloc.c	Wed Jun 23 16:03:02 2004 +0000
    14.3 @@ -263,7 +263,7 @@ void __init init_page_allocator(unsigned
    14.4  
    14.5  
    14.6  /* Allocate 2^@order contiguous pages. Returns a VIRTUAL address. */
    14.7 -unsigned long __get_free_pages(int mask, int order)
    14.8 +unsigned long __get_free_pages(int order)
    14.9  {
   14.10      int i, attempts = 0;
   14.11      chunk_head_t *alloc_ch, *spare_ch;
   14.12 @@ -321,7 +321,7 @@ retry:
   14.13          
   14.14      if ( attempts++ < 8 )
   14.15      {
   14.16 -        kmem_cache_reap(0);
   14.17 +        kmem_cache_reap();
   14.18          goto retry;
   14.19      }
   14.20  
    15.1 --- a/xen/common/physdev.c	Wed Jun 23 16:02:02 2004 +0000
    15.2 +++ b/xen/common/physdev.c	Wed Jun 23 16:03:02 2004 +0000
    15.3 @@ -98,7 +98,7 @@ static void add_dev_to_task(struct domai
    15.4          return;
    15.5      }
    15.6  
    15.7 -    if ( !(pdev = kmalloc(sizeof(phys_dev_t), GFP_KERNEL)) )
    15.8 +    if ( (pdev = kmalloc(sizeof(phys_dev_t))) == NULL )
    15.9      {
   15.10          INFO("Error allocating pdev structure.\n");
   15.11          return;
   15.12 @@ -171,8 +171,7 @@ int physdev_pci_access_modify(
   15.13  
   15.14      if ( p->io_bitmap == NULL )
   15.15      {
   15.16 -        p->io_bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
   15.17 -        if ( p->io_bitmap == NULL )
   15.18 +        if ( (p->io_bitmap = kmalloc(IO_BITMAP_BYTES)) == NULL )
   15.19          {
   15.20              rc = -ENOMEM;
   15.21              goto out;
   15.22 @@ -738,7 +737,7 @@ void physdev_init_dom0(struct domain *p)
   15.23          /* Skip bridges and other peculiarities for now. */
   15.24          if ( dev->hdr_type != PCI_HEADER_TYPE_NORMAL )
   15.25              continue;
   15.26 -        pdev = kmalloc(sizeof(phys_dev_t), GFP_KERNEL);
   15.27 +        pdev = kmalloc(sizeof(phys_dev_t));
   15.28          pdev->dev = dev;
   15.29          pdev->flags = ACC_WRITE;
   15.30          pdev->state = 0;
    16.1 --- a/xen/common/resource.c	Wed Jun 23 16:02:02 2004 +0000
    16.2 +++ b/xen/common/resource.c	Wed Jun 23 16:03:02 2004 +0000
    16.3 @@ -220,7 +220,7 @@ int allocate_resource(struct resource *r
    16.4   */
    16.5  struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name)
    16.6  {
    16.7 -	struct resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
    16.8 +	struct resource *res = kmalloc(sizeof(*res));
    16.9  
   16.10  	if (res) {
   16.11  		memset(res, 0, sizeof(*res));
    17.1 --- a/xen/common/sched_atropos.c	Wed Jun 23 16:02:02 2004 +0000
    17.2 +++ b/xen/common/sched_atropos.c	Wed Jun 23 16:03:02 2004 +0000
    17.3 @@ -528,8 +528,7 @@ static int at_init_scheduler()
    17.4      
    17.5      for ( i = 0; i < NR_CPUS; i++ )
    17.6      {
    17.7 -        schedule_data[i].sched_priv = kmalloc(sizeof(struct at_cpu_info),
    17.8 -                                              GFP_KERNEL);
    17.9 +        schedule_data[i].sched_priv = kmalloc(sizeof(struct at_cpu_info));
   17.10          if ( schedule_data[i].sched_priv == NULL )
   17.11              return -1;
   17.12          WAITQ(i)->next = WAITQ(i);
   17.13 @@ -592,7 +591,7 @@ static int at_alloc_task(struct domain *
   17.14  {
   17.15      ASSERT(p != NULL);
   17.16  
   17.17 -    p->sched_priv = kmem_cache_alloc(dom_info_cache, GFP_KERNEL);
   17.18 +    p->sched_priv = kmem_cache_alloc(dom_info_cache);
   17.19      if( p->sched_priv == NULL )
   17.20          return -1;
   17.21  
    18.1 --- a/xen/common/sched_bvt.c	Wed Jun 23 16:02:02 2004 +0000
    18.2 +++ b/xen/common/sched_bvt.c	Wed Jun 23 16:03:02 2004 +0000
    18.3 @@ -96,7 +96,7 @@ static void __calc_evt(struct bvt_dom_in
    18.4   */
    18.5  int bvt_alloc_task(struct domain *p)
    18.6  {
    18.7 -    if ( (BVT_INFO(p) = kmem_cache_alloc(dom_info_cache,GFP_KERNEL)) == NULL )
    18.8 +    if ( (BVT_INFO(p) = kmem_cache_alloc(dom_info_cache)) == NULL )
    18.9          return -1;
   18.10      
   18.11      return 0;
   18.12 @@ -410,8 +410,7 @@ int bvt_init_scheduler()
   18.13  
   18.14      for ( i = 0; i < NR_CPUS; i++ )
   18.15      {
   18.16 -        CPU_INFO(i) = kmalloc(sizeof(struct bvt_cpu_info), GFP_KERNEL);
   18.17 -
   18.18 +        CPU_INFO(i) = kmalloc(sizeof(struct bvt_cpu_info));
   18.19          if ( CPU_INFO(i) == NULL )
   18.20          {
   18.21              printk("Failed to allocate BVT scheduler per-CPU memory!\n");
    19.1 --- a/xen/common/schedule.c	Wed Jun 23 16:02:02 2004 +0000
    19.2 +++ b/xen/common/schedule.c	Wed Jun 23 16:03:02 2004 +0000
    19.3 @@ -110,7 +110,7 @@ struct domain *alloc_domain_struct(void)
    19.4  {
    19.5      struct domain *d;
    19.6  
    19.7 -    if ( (d = kmem_cache_alloc(domain_struct_cachep,GFP_KERNEL)) == NULL )
    19.8 +    if ( (d = kmem_cache_alloc(domain_struct_cachep)) == NULL )
    19.9          return NULL;
   19.10      
   19.11      memset(d, 0, sizeof(*d));
    20.1 --- a/xen/common/shadow.c	Wed Jun 23 16:02:02 2004 +0000
    20.2 +++ b/xen/common/shadow.c	Wed Jun 23 16:03:02 2004 +0000
    20.3 @@ -242,22 +242,18 @@ int shadow_mode_enable( struct domain *p
    20.4      m->shadow_mode = mode;
    20.5   
    20.6      // allocate hashtable
    20.7 -    m->shadow_ht = kmalloc( shadow_ht_buckets * 
    20.8 -                            sizeof(struct shadow_status), GFP_KERNEL );
    20.9 -    if( ! m->shadow_ht )
   20.10 +    m->shadow_ht = kmalloc(shadow_ht_buckets * 
   20.11 +                           sizeof(struct shadow_status));
   20.12 +    if( m->shadow_ht == NULL )
   20.13          goto nomem;
   20.14  
   20.15 -    memset( m->shadow_ht, 0, shadow_ht_buckets * 
   20.16 -            sizeof(struct shadow_status) );
   20.17 -
   20.18 +    memset(m->shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status));
   20.19  
   20.20      // allocate space for first lot of extra nodes
   20.21 -    m->shadow_ht_extras = kmalloc( sizeof(void*) + 
   20.22 -								   (shadow_ht_extra_size * 
   20.23 -									sizeof(struct shadow_status)),
   20.24 -								   GFP_KERNEL );
   20.25 -
   20.26 -    if( ! m->shadow_ht_extras )
   20.27 +    m->shadow_ht_extras = kmalloc(sizeof(void*) + 
   20.28 +                                  (shadow_ht_extra_size * 
   20.29 +                                   sizeof(struct shadow_status)));
   20.30 +    if( m->shadow_ht_extras == NULL )
   20.31          goto nomem;
   20.32  
   20.33      memset( m->shadow_ht_extras, 0, sizeof(void*) + (shadow_ht_extra_size * 
   20.34 @@ -280,9 +276,8 @@ int shadow_mode_enable( struct domain *p
   20.35      {
   20.36          m->shadow_dirty_bitmap_size = (p->max_pages+63)&(~63);
   20.37          m->shadow_dirty_bitmap = 
   20.38 -            kmalloc( m->shadow_dirty_bitmap_size/8, GFP_KERNEL );
   20.39 -
   20.40 -        if( !m->shadow_dirty_bitmap  )
   20.41 +            kmalloc( m->shadow_dirty_bitmap_size/8);
   20.42 +        if( m->shadow_dirty_bitmap == NULL )
   20.43          {
   20.44              m->shadow_dirty_bitmap_size = 0;
   20.45              goto nomem;
    21.1 --- a/xen/common/slab.c	Wed Jun 23 16:02:02 2004 +0000
    21.2 +++ b/xen/common/slab.c	Wed Jun 23 16:03:02 2004 +0000
    21.3 @@ -23,10 +23,6 @@
    21.4   * page long) and always contiguous), and each slab contains multiple
    21.5   * initialized objects.
    21.6   *
    21.7 - * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
    21.8 - * normal). If you need a special memory type, then must create a new
    21.9 - * cache for that memory type.
   21.10 - *
   21.11   * In order to reduce fragmentation, the slabs are sorted in 3 groups:
   21.12   *   full slabs with 0 free objects
   21.13   *   partial slabs
   21.14 @@ -51,32 +47,6 @@
   21.15   *	are accessed without any locking.
   21.16   *  The per-cpu arrays are never accessed from the wrong cpu, no locking.
   21.17   *  The non-constant members are protected with a per-cache irq spinlock.
   21.18 - *
   21.19 - * Further notes from the original documentation:
   21.20 - *
   21.21 - * 11 April '97.  Started multi-threading - markhe
   21.22 - *	The global cache-chain is protected by the semaphore 'cache_chain_sem'.
   21.23 - *	The sem is only needed when accessing/extending the cache-chain, which
   21.24 - *	can never happen inside an interrupt (kmem_cache_create(),
   21.25 - *	kmem_cache_shrink() and kmem_cache_reap()).
   21.26 - *
   21.27 - *	To prevent kmem_cache_shrink() trying to shrink a 'growing' cache (which
   21.28 - *	maybe be sleeping and therefore not holding the semaphore/lock), the
   21.29 - *	growing field is used.  This also prevents reaping from a cache.
   21.30 - *
   21.31 - *	At present, each engine can be growing a cache.  This should be blocked.
   21.32 - *
   21.33 - */
   21.34 -
   21.35 -/*
   21.36 - * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
   21.37 - *		  SLAB_RED_ZONE & SLAB_POISON.
   21.38 - *		  0 for faster, smaller code (especially in the critical paths).
   21.39 - *
   21.40 - * STATS	- 1 to collect stats for /proc/slabinfo.
   21.41 - *		  0 for faster, smaller code (especially in the critical paths).
   21.42 - *
   21.43 - * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
   21.44   */
   21.45  
   21.46  #include <xen/config.h>
   21.47 @@ -90,7 +60,16 @@
   21.48  #include <xen/smp.h>
   21.49  #include <xen/sched.h>
   21.50  
   21.51 -
   21.52 +/*
   21.53 + * DEBUG  - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
   21.54 + *	    SLAB_RED_ZONE & SLAB_POISON.
   21.55 + *	    0 for faster, smaller code (especially in the critical paths).
   21.56 + *
   21.57 + * STATS  - 1 to collect stats for /proc/slabinfo.
   21.58 + *	    0 for faster, smaller code (especially in the critical paths).
   21.59 + *
   21.60 + * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
   21.61 + */
   21.62  #ifdef CONFIG_DEBUG_SLAB
   21.63  #define	DEBUG		1
   21.64  #define	STATS		1
   21.65 @@ -112,11 +91,11 @@
   21.66  
   21.67  /* Legal flag mask for kmem_cache_create(). */
   21.68  #if DEBUG
   21.69 -# define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
   21.70 +#define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
   21.71  			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
   21.72 -			 SLAB_NO_REAP | SLAB_CACHE_DMA)
   21.73 +			 SLAB_NO_REAP)
   21.74  #else
   21.75 -# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | SLAB_CACHE_DMA)
   21.76 +#define CREATE_MASK	(SLAB_HWCACHE_ALIGN | SLAB_NO_REAP)
   21.77  #endif
   21.78  
   21.79  /*
   21.80 @@ -155,11 +134,11 @@ static unsigned long offslab_limit;
   21.81   * Slabs are chained into three list: fully used, partial, fully free slabs.
   21.82   */
   21.83  typedef struct slab_s {
   21.84 -	struct list_head	list;
   21.85 -	unsigned long		colouroff;
   21.86 -	void			*s_mem;		/* including colour offset */
   21.87 -	unsigned int		inuse;		/* num of objs active in slab */
   21.88 -	kmem_bufctl_t		free;
   21.89 +    struct list_head list;
   21.90 +    unsigned long    colouroff;
   21.91 +    void            *s_mem;    /* including colour offset */
   21.92 +    unsigned int     inuse;    /* num of objs active in slab */
   21.93 +    kmem_bufctl_t    free;
   21.94  } slab_t;
   21.95  
   21.96  #define slab_bufctl(slabp) \
   21.97 @@ -173,8 +152,8 @@ typedef struct slab_s {
   21.98   * footprint.
   21.99   */
  21.100  typedef struct cpucache_s {
  21.101 -	unsigned int avail;
  21.102 -	unsigned int limit;
  21.103 +    unsigned int avail;
  21.104 +    unsigned int limit;
  21.105  } cpucache_t;
  21.106  
  21.107  #define cc_entry(cpucache) \
  21.108 @@ -191,59 +170,55 @@ typedef struct cpucache_s {
  21.109  
  21.110  struct kmem_cache_s {
  21.111  /* 1) each alloc & free */
  21.112 -	/* full, partial first, then free */
  21.113 -	struct list_head	slabs_full;
  21.114 -	struct list_head	slabs_partial;
  21.115 -	struct list_head	slabs_free;
  21.116 -	unsigned int		objsize;
  21.117 -	unsigned int	 	flags;	/* constant flags */
  21.118 -	unsigned int		num;	/* # of objs per slab */
  21.119 -	spinlock_t		spinlock;
  21.120 +    /* full, partial first, then free */
  21.121 +    struct list_head	slabs_full;
  21.122 +    struct list_head	slabs_partial;
  21.123 +    struct list_head	slabs_free;
  21.124 +    unsigned int		objsize;
  21.125 +    unsigned int	 	flags;	/* constant flags */
  21.126 +    unsigned int		num;	/* # of objs per slab */
  21.127 +    spinlock_t		spinlock;
  21.128  #ifdef CONFIG_SMP
  21.129 -	unsigned int		batchcount;
  21.130 +    unsigned int		batchcount;
  21.131  #endif
  21.132  
  21.133  /* 2) slab additions /removals */
  21.134 -	/* order of pgs per slab (2^n) */
  21.135 -	unsigned int		gfporder;
  21.136 -
  21.137 -	/* force GFP flags, e.g. GFP_DMA */
  21.138 -	unsigned int		gfpflags;
  21.139 +    /* order of pgs per slab (2^n) */
  21.140 +    unsigned int		gfporder;
  21.141 +    size_t			colour;		/* cache colouring range */
  21.142 +    unsigned int		colour_off;	/* colour offset */
  21.143 +    unsigned int		colour_next;	/* cache colouring */
  21.144 +    kmem_cache_t		*slabp_cache;
  21.145 +    unsigned int		growing;
  21.146 +    unsigned int		dflags;		/* dynamic flags */
  21.147  
  21.148 -	size_t			colour;		/* cache colouring range */
  21.149 -	unsigned int		colour_off;	/* colour offset */
  21.150 -	unsigned int		colour_next;	/* cache colouring */
  21.151 -	kmem_cache_t		*slabp_cache;
  21.152 -	unsigned int		growing;
  21.153 -	unsigned int		dflags;		/* dynamic flags */
  21.154 +    /* constructor func */
  21.155 +    void (*ctor)(void *, kmem_cache_t *, unsigned long);
  21.156  
  21.157 -	/* constructor func */
  21.158 -	void (*ctor)(void *, kmem_cache_t *, unsigned long);
  21.159 +    /* de-constructor func */
  21.160 +    void (*dtor)(void *, kmem_cache_t *, unsigned long);
  21.161  
  21.162 -	/* de-constructor func */
  21.163 -	void (*dtor)(void *, kmem_cache_t *, unsigned long);
  21.164 -
  21.165 -	unsigned long		failures;
  21.166 +    unsigned long		failures;
  21.167  
  21.168  /* 3) cache creation/removal */
  21.169 -	char			name[CACHE_NAMELEN];
  21.170 -	struct list_head	next;
  21.171 +    char			name[CACHE_NAMELEN];
  21.172 +    struct list_head	next;
  21.173  #ifdef CONFIG_SMP
  21.174  /* 4) per-cpu data */
  21.175 -	cpucache_t		*cpudata[NR_CPUS];
  21.176 +    cpucache_t		*cpudata[NR_CPUS];
  21.177  #endif
  21.178  #if STATS
  21.179 -	unsigned long		num_active;
  21.180 -	unsigned long		num_allocations;
  21.181 -	unsigned long		high_mark;
  21.182 -	unsigned long		grown;
  21.183 -	unsigned long		reaped;
  21.184 -	unsigned long 		errors;
  21.185 +    unsigned long		num_active;
  21.186 +    unsigned long		num_allocations;
  21.187 +    unsigned long		high_mark;
  21.188 +    unsigned long		grown;
  21.189 +    unsigned long		reaped;
  21.190 +    unsigned long 		errors;
  21.191  #ifdef CONFIG_SMP
  21.192 -	atomic_t		allochit;
  21.193 -	atomic_t		allocmiss;
  21.194 -	atomic_t		freehit;
  21.195 -	atomic_t		freemiss;
  21.196 +    atomic_t		allochit;
  21.197 +    atomic_t		allocmiss;
  21.198 +    atomic_t		freehit;
  21.199 +    atomic_t		freemiss;
  21.200  #endif
  21.201  #endif
  21.202  };
  21.203 @@ -331,40 +306,34 @@ static int slab_break_gfp_order = BREAK_
  21.204  
  21.205  /* Size description struct for general caches. */
  21.206  typedef struct cache_sizes {
  21.207 -	size_t		 cs_size;
  21.208 -	kmem_cache_t	*cs_cachep;
  21.209 -	kmem_cache_t	*cs_dmacachep;
  21.210 +    size_t		 cs_size;
  21.211 +    kmem_cache_t	*cs_cachep;
  21.212  } cache_sizes_t;
  21.213  
  21.214  static cache_sizes_t cache_sizes[] = {
  21.215 -#if PAGE_SIZE == 4096
  21.216 -	{    32,	NULL, NULL},
  21.217 -#endif
  21.218 -	{    64,	NULL, NULL},
  21.219 -	{   128,	NULL, NULL},
  21.220 -	{   256,	NULL, NULL},
  21.221 -	{   512,	NULL, NULL},
  21.222 -	{  1024,	NULL, NULL},
  21.223 -	{  2048,	NULL, NULL},
  21.224 -	{  4096,	NULL, NULL},
  21.225 -	{  8192,	NULL, NULL},
  21.226 -	{ 16384,	NULL, NULL},
  21.227 -	{ 32768,	NULL, NULL},
  21.228 -	{ 65536,	NULL, NULL},
  21.229 -	{131072,	NULL, NULL},
  21.230 -	{     0,	NULL, NULL}
  21.231 +    {    32,	NULL},
  21.232 +    {    64,	NULL},
  21.233 +    {   128,	NULL},
  21.234 +    {   256,	NULL},
  21.235 +    {   512,	NULL},
  21.236 +    {  1024,	NULL},
  21.237 +    {  2048,	NULL},
  21.238 +    {  4096,	NULL},
  21.239 +    {  8192,	NULL},
  21.240 +    { 16384,	NULL},
  21.241 +    {     0,	NULL}
  21.242  };
  21.243  
  21.244  /* internal cache of cache description objs */
  21.245  static kmem_cache_t cache_cache = {
  21.246 -	slabs_full:	LIST_HEAD_INIT(cache_cache.slabs_full),
  21.247 -	slabs_partial:	LIST_HEAD_INIT(cache_cache.slabs_partial),
  21.248 -	slabs_free:	LIST_HEAD_INIT(cache_cache.slabs_free),
  21.249 -	objsize:	sizeof(kmem_cache_t),
  21.250 -	flags:		SLAB_NO_REAP,
  21.251 -	spinlock:	SPIN_LOCK_UNLOCKED,
  21.252 -	colour_off:	L1_CACHE_BYTES,
  21.253 -	name:		"kmem_cache",
  21.254 +    slabs_full:    LIST_HEAD_INIT(cache_cache.slabs_full),
  21.255 +    slabs_partial: LIST_HEAD_INIT(cache_cache.slabs_partial),
  21.256 +    slabs_free:    LIST_HEAD_INIT(cache_cache.slabs_free),
  21.257 +    objsize:       sizeof(kmem_cache_t),
  21.258 +    flags:         SLAB_NO_REAP,
  21.259 +    spinlock:      SPIN_LOCK_UNLOCKED,
  21.260 +    colour_off:    L1_CACHE_BYTES,
  21.261 +    name:          "kmem_cache"
  21.262  };
  21.263  
  21.264  /* Guard access to the cache-chain. */
  21.265 @@ -392,47 +361,47 @@ static void enable_all_cpucaches (void);
  21.266  
  21.267  /* Cal the num objs, wastage, and bytes left over for a given slab size. */
  21.268  static void kmem_cache_estimate (unsigned long gfporder, size_t size,
  21.269 -		 int flags, size_t *left_over, unsigned int *num)
  21.270 +                                 int flags, size_t *left_over, unsigned int *num)
  21.271  {
  21.272 -	int i;
  21.273 -	size_t wastage = PAGE_SIZE<<gfporder;
  21.274 -	size_t extra = 0;
  21.275 -	size_t base = 0;
  21.276 +    int i;
  21.277 +    size_t wastage = PAGE_SIZE<<gfporder;
  21.278 +    size_t extra = 0;
  21.279 +    size_t base = 0;
  21.280  
  21.281 -	if (!(flags & CFLGS_OFF_SLAB)) {
  21.282 -		base = sizeof(slab_t);
  21.283 -		extra = sizeof(kmem_bufctl_t);
  21.284 -	}
  21.285 -	i = 0;
  21.286 -	while (i*size + L1_CACHE_ALIGN(base+i*extra) <= wastage)
  21.287 -		i++;
  21.288 -	if (i > 0)
  21.289 -		i--;
  21.290 +    if (!(flags & CFLGS_OFF_SLAB)) {
  21.291 +        base = sizeof(slab_t);
  21.292 +        extra = sizeof(kmem_bufctl_t);
  21.293 +    }
  21.294 +    i = 0;
  21.295 +    while (i*size + L1_CACHE_ALIGN(base+i*extra) <= wastage)
  21.296 +        i++;
  21.297 +    if (i > 0)
  21.298 +        i--;
  21.299  
  21.300 -	if (i > SLAB_LIMIT)
  21.301 -		i = SLAB_LIMIT;
  21.302 +    if (i > SLAB_LIMIT)
  21.303 +        i = SLAB_LIMIT;
  21.304  
  21.305 -	*num = i;
  21.306 -	wastage -= i*size;
  21.307 -	wastage -= L1_CACHE_ALIGN(base+i*extra);
  21.308 -	*left_over = wastage;
  21.309 +    *num = i;
  21.310 +    wastage -= i*size;
  21.311 +    wastage -= L1_CACHE_ALIGN(base+i*extra);
  21.312 +    *left_over = wastage;
  21.313  }
  21.314  
  21.315  /* Initialisation - setup the `cache' cache. */
  21.316  void __init kmem_cache_init(void)
  21.317  {
  21.318 -	size_t left_over;
  21.319 +    size_t left_over;
  21.320  
  21.321 -	init_MUTEX(&cache_chain_sem);
  21.322 -	INIT_LIST_HEAD(&cache_chain);
  21.323 +    init_MUTEX(&cache_chain_sem);
  21.324 +    INIT_LIST_HEAD(&cache_chain);
  21.325  
  21.326 -	kmem_cache_estimate(0, cache_cache.objsize, 0,
  21.327 +    kmem_cache_estimate(0, cache_cache.objsize, 0,
  21.328  			&left_over, &cache_cache.num);
  21.329 -	if (!cache_cache.num)
  21.330 -		BUG();
  21.331 +    if (!cache_cache.num)
  21.332 +        BUG();
  21.333  
  21.334 -	cache_cache.colour = left_over/cache_cache.colour_off;
  21.335 -	cache_cache.colour_next = 0;
  21.336 +    cache_cache.colour = left_over/cache_cache.colour_off;
  21.337 +    cache_cache.colour_next = 0;
  21.338  }
  21.339  
  21.340  
  21.341 @@ -441,117 +410,106 @@ void __init kmem_cache_init(void)
  21.342   */
  21.343  void __init kmem_cache_sizes_init(unsigned long num_physpages)
  21.344  {
  21.345 -	cache_sizes_t *sizes = cache_sizes;
  21.346 -	char name[20];
  21.347 -	/*
  21.348 -	 * Fragmentation resistance on low memory - only use bigger
  21.349 -	 * page orders on machines with more than 32MB of memory.
  21.350 -	 */
  21.351 -	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
  21.352 -		slab_break_gfp_order = BREAK_GFP_ORDER_HI;
  21.353 -	do {
  21.354 -		/* For performance, all the general caches are L1 aligned.
  21.355 -		 * This should be particularly beneficial on SMP boxes, as it
  21.356 -		 * eliminates "false sharing".
  21.357 -		 * Note for systems short on memory removing the alignment will
  21.358 -		 * allow tighter packing of the smaller caches. */
  21.359 -		sprintf(name,"size-%Zd",sizes->cs_size);
  21.360 -		if (!(sizes->cs_cachep =
  21.361 -			kmem_cache_create(name, sizes->cs_size,
  21.362 -					0, SLAB_HWCACHE_ALIGN, NULL, NULL))) {
  21.363 -			BUG();
  21.364 -		}
  21.365 +    cache_sizes_t *sizes = cache_sizes;
  21.366 +    char name[20];
  21.367 +    /*
  21.368 +     * Fragmentation resistance on low memory - only use bigger
  21.369 +     * page orders on machines with more than 32MB of memory.
  21.370 +     */
  21.371 +    if (num_physpages > (32 << 20) >> PAGE_SHIFT)
  21.372 +        slab_break_gfp_order = BREAK_GFP_ORDER_HI;
  21.373 +    do {
  21.374 +        /* For performance, all the general caches are L1 aligned.
  21.375 +         * This should be particularly beneficial on SMP boxes, as it
  21.376 +         * eliminates "false sharing".
  21.377 +         * Note for systems short on memory removing the alignment will
  21.378 +         * allow tighter packing of the smaller caches. */
  21.379 +        sprintf(name,"size-%Zd",sizes->cs_size);
  21.380 +        if (!(sizes->cs_cachep =
  21.381 +              kmem_cache_create(name, sizes->cs_size,
  21.382 +                                0, SLAB_HWCACHE_ALIGN, NULL, NULL))) {
  21.383 +            BUG();
  21.384 +        }
  21.385  
  21.386 -		/* Inc off-slab bufctl limit until the ceiling is hit. */
  21.387 -		if (!(OFF_SLAB(sizes->cs_cachep))) {
  21.388 -			offslab_limit = sizes->cs_size-sizeof(slab_t);
  21.389 -			offslab_limit /= 2;
  21.390 -		}
  21.391 -		sprintf(name, "size-%Zd(DMA)",sizes->cs_size);
  21.392 -		sizes->cs_dmacachep = kmem_cache_create(name, sizes->cs_size, 0,
  21.393 -			      SLAB_CACHE_DMA|SLAB_HWCACHE_ALIGN, NULL, NULL);
  21.394 -		if (!sizes->cs_dmacachep)
  21.395 -			BUG();
  21.396 -		sizes++;
  21.397 -	} while (sizes->cs_size);
  21.398 +        /* Inc off-slab bufctl limit until the ceiling is hit. */
  21.399 +        if (!(OFF_SLAB(sizes->cs_cachep))) {
  21.400 +            offslab_limit = sizes->cs_size-sizeof(slab_t);
  21.401 +            offslab_limit /= 2;
  21.402 +        }
  21.403 +        sizes++;
  21.404 +    } while (sizes->cs_size);
  21.405  }
  21.406  
  21.407  int __init kmem_cpucache_init(void)
  21.408  {
  21.409  #ifdef CONFIG_SMP
  21.410 -	g_cpucache_up = 1;
  21.411 -	enable_all_cpucaches();
  21.412 +    g_cpucache_up = 1;
  21.413 +    enable_all_cpucaches();
  21.414  #endif
  21.415 -	return 0;
  21.416 +    return 0;
  21.417  }
  21.418  
  21.419  /*__initcall(kmem_cpucache_init);*/
  21.420  
  21.421  /* Interface to system's page allocator. No need to hold the cache-lock.
  21.422   */
  21.423 -static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags)
  21.424 +static inline void *kmem_getpages(kmem_cache_t *cachep)
  21.425  {
  21.426 -	void	*addr;
  21.427 +    void *addr;
  21.428  
  21.429 -	/*
  21.430 -	 * If we requested dmaable memory, we will get it. Even if we
  21.431 -	 * did not request dmaable memory, we might get it, but that
  21.432 -	 * would be relatively rare and ignorable.
  21.433 -	 */
  21.434 -	flags |= cachep->gfpflags;
  21.435 -	addr = (void*) __get_free_pages(flags, cachep->gfporder);
  21.436 -	/* Assume that now we have the pages no one else can legally
  21.437 -	 * messes with the 'struct page's.
  21.438 -	 * However vm_scan() might try to test the structure to see if
  21.439 -	 * it is a named-page or buffer-page.  The members it tests are
  21.440 -	 * of no interest here.....
  21.441 -	 */
  21.442 -	return addr;
  21.443 +    addr = (void*) __get_free_pages(cachep->gfporder);
  21.444 +    /* Assume that now we have the pages no one else can legally
  21.445 +     * messes with the 'struct page's.
  21.446 +     * However vm_scan() might try to test the structure to see if
  21.447 +     * it is a named-page or buffer-page.  The members it tests are
  21.448 +     * of no interest here.....
  21.449 +     */
  21.450 +    return addr;
  21.451  }
  21.452  
  21.453  /* Interface to system's page release. */
  21.454  static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
  21.455  {
  21.456 -	unsigned long i = (1<<cachep->gfporder);
  21.457 -	struct pfn_info *page = virt_to_page(addr);
  21.458 +    unsigned long i = (1<<cachep->gfporder);
  21.459 +    struct pfn_info *page = virt_to_page(addr);
  21.460  
  21.461 -	/* free_pages() does not clear the type bit - we do that.
  21.462 -	 * The pages have been unlinked from their cache-slab,
  21.463 -	 * but their 'struct page's might be accessed in
  21.464 -	 * vm_scan(). Shouldn't be a worry.
  21.465 -	 */
  21.466 -	while (i--) {
  21.467 -		PageClearSlab(page);
  21.468 -		page++;
  21.469 -	}
  21.470 +    /* free_pages() does not clear the type bit - we do that.
  21.471 +     * The pages have been unlinked from their cache-slab,
  21.472 +     * but their 'struct page's might be accessed in
  21.473 +     * vm_scan(). Shouldn't be a worry.
  21.474 +     */
  21.475 +    while (i--) {
  21.476 +        PageClearSlab(page);
  21.477 +        page++;
  21.478 +    }
  21.479  
  21.480 -	free_pages((unsigned long)addr, cachep->gfporder);
  21.481 +    free_pages((unsigned long)addr, cachep->gfporder);
  21.482  }
  21.483  
  21.484  #if DEBUG
  21.485  static inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr)
  21.486  {
  21.487 -	int size = cachep->objsize;
  21.488 -	if (cachep->flags & SLAB_RED_ZONE) {
  21.489 -		addr += BYTES_PER_WORD;
  21.490 -		size -= 2*BYTES_PER_WORD;
  21.491 -	}
  21.492 -	memset(addr, POISON_BYTE, size);
  21.493 -	*(unsigned char *)(addr+size-1) = POISON_END;
  21.494 +    int size = cachep->objsize;
  21.495 +    if (cachep->flags & SLAB_RED_ZONE) {
  21.496 +        addr += BYTES_PER_WORD;
  21.497 +        size -= 2*BYTES_PER_WORD;
  21.498 +    }
  21.499 +    memset(addr, POISON_BYTE, size);
  21.500 +    *(unsigned char *)(addr+size-1) = POISON_END;
  21.501  }
  21.502  
  21.503  static inline int kmem_check_poison_obj (kmem_cache_t *cachep, void *addr)
  21.504  {
  21.505 -	int size = cachep->objsize;
  21.506 -	void *end;
  21.507 -	if (cachep->flags & SLAB_RED_ZONE) {
  21.508 -		addr += BYTES_PER_WORD;
  21.509 -		size -= 2*BYTES_PER_WORD;
  21.510 -	}
  21.511 -	end = memchr(addr, POISON_END, size);
  21.512 -	if (end != (addr+size-1))
  21.513 -		return 1;
  21.514 -	return 0;
  21.515 +    int size = cachep->objsize;
  21.516 +    void *end;
  21.517 +    if (cachep->flags & SLAB_RED_ZONE) {
  21.518 +        addr += BYTES_PER_WORD;
  21.519 +        size -= 2*BYTES_PER_WORD;
  21.520 +    }
  21.521 +    end = memchr(addr, POISON_END, size);
  21.522 +    if (end != (addr+size-1))
  21.523 +        return 1;
  21.524 +    return 0;
  21.525  }
  21.526  #endif
  21.527  
  21.528 @@ -561,40 +519,40 @@ static inline int kmem_check_poison_obj 
  21.529   */
  21.530  static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp)
  21.531  {
  21.532 -	if (cachep->dtor
  21.533 +    if (cachep->dtor
  21.534  #if DEBUG
  21.535 -		|| cachep->flags & (SLAB_POISON | SLAB_RED_ZONE)
  21.536 +        || cachep->flags & (SLAB_POISON | SLAB_RED_ZONE)
  21.537  #endif
  21.538  	) {
  21.539 -		int i;
  21.540 -		for (i = 0; i < cachep->num; i++) {
  21.541 -			void* objp = slabp->s_mem+cachep->objsize*i;
  21.542 +        int i;
  21.543 +        for (i = 0; i < cachep->num; i++) {
  21.544 +            void* objp = slabp->s_mem+cachep->objsize*i;
  21.545  #if DEBUG
  21.546 -			if (cachep->flags & SLAB_RED_ZONE) {
  21.547 -				if (*((unsigned long*)(objp)) != RED_MAGIC1)
  21.548 -					BUG();
  21.549 -				if (*((unsigned long*)(objp + cachep->objsize
  21.550 -						-BYTES_PER_WORD)) != RED_MAGIC1)
  21.551 -					BUG();
  21.552 -				objp += BYTES_PER_WORD;
  21.553 -			}
  21.554 +            if (cachep->flags & SLAB_RED_ZONE) {
  21.555 +                if (*((unsigned long*)(objp)) != RED_MAGIC1)
  21.556 +                    BUG();
  21.557 +                if (*((unsigned long*)(objp + cachep->objsize
  21.558 +                                       -BYTES_PER_WORD)) != RED_MAGIC1)
  21.559 +                    BUG();
  21.560 +                objp += BYTES_PER_WORD;
  21.561 +            }
  21.562  #endif
  21.563 -			if (cachep->dtor)
  21.564 -				(cachep->dtor)(objp, cachep, 0);
  21.565 +            if (cachep->dtor)
  21.566 +                (cachep->dtor)(objp, cachep, 0);
  21.567  #if DEBUG
  21.568 -			if (cachep->flags & SLAB_RED_ZONE) {
  21.569 -				objp -= BYTES_PER_WORD;
  21.570 -			}	
  21.571 -			if ((cachep->flags & SLAB_POISON)  &&
  21.572 -				kmem_check_poison_obj(cachep, objp))
  21.573 -				BUG();
  21.574 +            if (cachep->flags & SLAB_RED_ZONE) {
  21.575 +                objp -= BYTES_PER_WORD;
  21.576 +            }	
  21.577 +            if ((cachep->flags & SLAB_POISON)  &&
  21.578 +                kmem_check_poison_obj(cachep, objp))
  21.579 +                BUG();
  21.580  #endif
  21.581 -		}
  21.582 -	}
  21.583 +        }
  21.584 +    }
  21.585  
  21.586 -	kmem_freepages(cachep, slabp->s_mem-slabp->colouroff);
  21.587 -	if (OFF_SLAB(cachep))
  21.588 -		kmem_cache_free(cachep->slabp_cache, slabp);
  21.589 +    kmem_freepages(cachep, slabp->s_mem-slabp->colouroff);
  21.590 +    if (OFF_SLAB(cachep))
  21.591 +        kmem_cache_free(cachep->slabp_cache, slabp);
  21.592  }
  21.593  
  21.594  /**
  21.595 @@ -627,210 +585,211 @@ static void kmem_slab_destroy (kmem_cach
  21.596   */
  21.597  kmem_cache_t *
  21.598  kmem_cache_create (const char *name, size_t size, size_t offset,
  21.599 -	unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
  21.600 -	void (*dtor)(void*, kmem_cache_t *, unsigned long))
  21.601 +                   unsigned long flags,
  21.602 +                   void (*ctor)(void*, kmem_cache_t *, unsigned long),
  21.603 +                   void (*dtor)(void*, kmem_cache_t *, unsigned long))
  21.604  {
  21.605 -	const char *func_nm = KERN_ERR "kmem_create: ";
  21.606 -	size_t left_over, align, slab_size;
  21.607 -	kmem_cache_t *cachep = NULL;
  21.608 -        unsigned long spin_flags;
  21.609 +    const char *func_nm = KERN_ERR "kmem_create: ";
  21.610 +    size_t left_over, align, slab_size;
  21.611 +    kmem_cache_t *cachep = NULL;
  21.612 +    unsigned long spin_flags;
  21.613  
  21.614 -	/*
  21.615 -	 * Sanity checks... these are all serious usage bugs.
  21.616 -	 */
  21.617 -	if ((!name) ||
  21.618 -		((strlen(name) >= CACHE_NAMELEN - 1)) ||
  21.619 -		(size < BYTES_PER_WORD) ||
  21.620 -		(size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
  21.621 -		(dtor && !ctor) ||
  21.622 -		(offset < 0 || offset > size))
  21.623 -			BUG();
  21.624 +    /*
  21.625 +     * Sanity checks... these are all serious usage bugs.
  21.626 +     */
  21.627 +    if ((!name) ||
  21.628 +        ((strlen(name) >= CACHE_NAMELEN - 1)) ||
  21.629 +        (size < BYTES_PER_WORD) ||
  21.630 +        (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
  21.631 +        (dtor && !ctor) ||
  21.632 +        (offset < 0 || offset > size))
  21.633 +        BUG();
  21.634  
  21.635  #if DEBUG
  21.636 -	if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
  21.637 -		/* No constructor, but inital state check requested */
  21.638 -		printk("%sNo con, but init state check requested - %s\n", func_nm, name);
  21.639 -		flags &= ~SLAB_DEBUG_INITIAL;
  21.640 -	}
  21.641 +    if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
  21.642 +        /* No constructor, but inital state check requested */
  21.643 +        printk("%sNo con, but init state check requested - %s\n",
  21.644 +               func_nm, name);
  21.645 +        flags &= ~SLAB_DEBUG_INITIAL;
  21.646 +    }
  21.647  
  21.648 -	if ((flags & SLAB_POISON) && ctor) {
  21.649 -		/* request for poisoning, but we can't do that with a constructor */
  21.650 -		printk("%sPoisoning requested, but con given - %s\n", func_nm, name);
  21.651 -		flags &= ~SLAB_POISON;
  21.652 -	}
  21.653 +    if ((flags & SLAB_POISON) && ctor) {
  21.654 +        /* request for poisoning, but we can't do that with a constructor */
  21.655 +        printk("%sPoisoning requested, but con given - %s\n",
  21.656 +               func_nm, name);
  21.657 +        flags &= ~SLAB_POISON;
  21.658 +    }
  21.659  #if FORCED_DEBUG
  21.660 -	if (size < (PAGE_SIZE>>3))
  21.661 -		/*
  21.662 -		 * do not red zone large object, causes severe
  21.663 -		 * fragmentation.
  21.664 -		 */
  21.665 -		flags |= SLAB_RED_ZONE;
  21.666 -	if (!ctor)
  21.667 -		flags |= SLAB_POISON;
  21.668 +    if (size < (PAGE_SIZE>>3))
  21.669 +        /*
  21.670 +         * do not red zone large object, causes severe
  21.671 +         * fragmentation.
  21.672 +         */
  21.673 +        flags |= SLAB_RED_ZONE;
  21.674 +    if (!ctor)
  21.675 +        flags |= SLAB_POISON;
  21.676  #endif
  21.677  #endif
  21.678  
  21.679 -	/*
  21.680 -	 * Always checks flags, a caller might be expecting debug
  21.681 -	 * support which isn't available.
  21.682 -	 */
  21.683 -	if (flags & ~CREATE_MASK)
  21.684 -		BUG();
  21.685 +    /*
  21.686 +     * Always checks flags, a caller might be expecting debug
  21.687 +     * support which isn't available.
  21.688 +     */
  21.689 +    if (flags & ~CREATE_MASK)
  21.690 +        BUG();
  21.691  
  21.692 -	/* Get cache's description obj. */
  21.693 -	cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
  21.694 -	if (!cachep)
  21.695 -		goto opps;
  21.696 -	memset(cachep, 0, sizeof(kmem_cache_t));
  21.697 +    /* Get cache's description obj. */
  21.698 +    cachep = (kmem_cache_t *)kmem_cache_alloc(&cache_cache);
  21.699 +    if (!cachep)
  21.700 +        goto opps;
  21.701 +    memset(cachep, 0, sizeof(kmem_cache_t));
  21.702  
  21.703 -	/* Check that size is in terms of words.  This is needed to avoid
  21.704 -	 * unaligned accesses for some archs when redzoning is used, and makes
  21.705 -	 * sure any on-slab bufctl's are also correctly aligned.
  21.706 -	 */
  21.707 -	if (size & (BYTES_PER_WORD-1)) {
  21.708 -		size += (BYTES_PER_WORD-1);
  21.709 -		size &= ~(BYTES_PER_WORD-1);
  21.710 -		printk("%sForcing size word alignment - %s\n", func_nm, name);
  21.711 -	}
  21.712 +    /* Check that size is in terms of words.  This is needed to avoid
  21.713 +     * unaligned accesses for some archs when redzoning is used, and makes
  21.714 +     * sure any on-slab bufctl's are also correctly aligned.
  21.715 +     */
  21.716 +    if (size & (BYTES_PER_WORD-1)) {
  21.717 +        size += (BYTES_PER_WORD-1);
  21.718 +        size &= ~(BYTES_PER_WORD-1);
  21.719 +        printk("%sForcing size word alignment - %s\n", func_nm, name);
  21.720 +    }
  21.721  	
  21.722  #if DEBUG
  21.723 -	if (flags & SLAB_RED_ZONE) {
  21.724 -		/*
  21.725 -		 * There is no point trying to honour cache alignment
  21.726 -		 * when redzoning.
  21.727 -		 */
  21.728 -		flags &= ~SLAB_HWCACHE_ALIGN;
  21.729 -		size += 2*BYTES_PER_WORD;	/* words for redzone */
  21.730 -	}
  21.731 +    if (flags & SLAB_RED_ZONE) {
  21.732 +        /*
  21.733 +         * There is no point trying to honour cache alignment
  21.734 +         * when redzoning.
  21.735 +         */
  21.736 +        flags &= ~SLAB_HWCACHE_ALIGN;
  21.737 +        size += 2*BYTES_PER_WORD;	/* words for redzone */
  21.738 +    }
  21.739  #endif
  21.740 -	align = BYTES_PER_WORD;
  21.741 -	if (flags & SLAB_HWCACHE_ALIGN)
  21.742 -		align = L1_CACHE_BYTES;
  21.743 +    align = BYTES_PER_WORD;
  21.744 +    if (flags & SLAB_HWCACHE_ALIGN)
  21.745 +        align = L1_CACHE_BYTES;
  21.746  
  21.747 -	/* Determine if the slab management is 'on' or 'off' slab. */
  21.748 -	if (size >= (PAGE_SIZE>>3))
  21.749 -		/*
  21.750 -		 * Size is large, assume best to place the slab management obj
  21.751 -		 * off-slab (should allow better packing of objs).
  21.752 -		 */
  21.753 -		flags |= CFLGS_OFF_SLAB;
  21.754 +    /* Determine if the slab management is 'on' or 'off' slab. */
  21.755 +    if (size >= (PAGE_SIZE>>3))
  21.756 +        /*
  21.757 +         * Size is large, assume best to place the slab management obj
  21.758 +         * off-slab (should allow better packing of objs).
  21.759 +         */
  21.760 +        flags |= CFLGS_OFF_SLAB;
  21.761  
  21.762 -	if (flags & SLAB_HWCACHE_ALIGN) {
  21.763 -		/* Need to adjust size so that objs are cache aligned. */
  21.764 -		/* Small obj size, can get at least two per cache line. */
  21.765 -		/* FIXME: only power of 2 supported, was better */
  21.766 -		while (size < align/2)
  21.767 -			align /= 2;
  21.768 -		size = (size+align-1)&(~(align-1));
  21.769 -	}
  21.770 +    if (flags & SLAB_HWCACHE_ALIGN) {
  21.771 +        /* Need to adjust size so that objs are cache aligned. */
  21.772 +        /* Small obj size, can get at least two per cache line. */
  21.773 +        /* FIXME: only power of 2 supported, was better */
  21.774 +        while (size < align/2)
  21.775 +            align /= 2;
  21.776 +        size = (size+align-1)&(~(align-1));
  21.777 +    }
  21.778  
  21.779 -	/* Cal size (in pages) of slabs, and the num of objs per slab.
  21.780 -	 * This could be made much more intelligent.  For now, try to avoid
  21.781 -	 * using high page-orders for slabs.  When the gfp() funcs are more
  21.782 -	 * friendly towards high-order requests, this should be changed.
  21.783 -	 */
  21.784 -	do {
  21.785 -		unsigned int break_flag = 0;
  21.786 -cal_wastage:
  21.787 -		kmem_cache_estimate(cachep->gfporder, size, flags,
  21.788 -						&left_over, &cachep->num);
  21.789 -		if (break_flag)
  21.790 -			break;
  21.791 -		if (cachep->gfporder >= MAX_GFP_ORDER)
  21.792 -			break;
  21.793 -		if (!cachep->num)
  21.794 -			goto next;
  21.795 -		if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) {
  21.796 -			/* Oops, this num of objs will cause problems. */
  21.797 -			cachep->gfporder--;
  21.798 -			break_flag++;
  21.799 -			goto cal_wastage;
  21.800 -		}
  21.801 +    /* Cal size (in pages) of slabs, and the num of objs per slab.
  21.802 +     * This could be made much more intelligent.  For now, try to avoid
  21.803 +     * using high page-orders for slabs.  When the gfp() funcs are more
  21.804 +     * friendly towards high-order requests, this should be changed.
  21.805 +     */
  21.806 +    do {
  21.807 +        unsigned int break_flag = 0;
  21.808 +    cal_wastage:
  21.809 +        kmem_cache_estimate(cachep->gfporder, size, flags,
  21.810 +                            &left_over, &cachep->num);
  21.811 +        if (break_flag)
  21.812 +            break;
  21.813 +        if (cachep->gfporder >= MAX_GFP_ORDER)
  21.814 +            break;
  21.815 +        if (!cachep->num)
  21.816 +            goto next;
  21.817 +        if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) {
  21.818 +            /* Oops, this num of objs will cause problems. */
  21.819 +            cachep->gfporder--;
  21.820 +            break_flag++;
  21.821 +            goto cal_wastage;
  21.822 +        }
  21.823  
  21.824 -		/*
  21.825 -		 * Large num of objs is good, but v. large slabs are currently
  21.826 -		 * bad for the gfp()s.
  21.827 -		 */
  21.828 -		if (cachep->gfporder >= slab_break_gfp_order)
  21.829 -			break;
  21.830 +        /*
  21.831 +         * Large num of objs is good, but v. large slabs are currently
  21.832 +         * bad for the gfp()s.
  21.833 +         */
  21.834 +        if (cachep->gfporder >= slab_break_gfp_order)
  21.835 +            break;
  21.836  
  21.837 -		if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
  21.838 -			break;	/* Acceptable internal fragmentation. */
  21.839 -next:
  21.840 -		cachep->gfporder++;
  21.841 -	} while (1);
  21.842 +        if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
  21.843 +            break;	/* Acceptable internal fragmentation. */
  21.844 +    next:
  21.845 +        cachep->gfporder++;
  21.846 +    } while (1);
  21.847  
  21.848 -	if (!cachep->num) {
  21.849 -		printk("kmem_cache_create: couldn't create cache %s.\n", name);
  21.850 -		kmem_cache_free(&cache_cache, cachep);
  21.851 -		cachep = NULL;
  21.852 -		goto opps;
  21.853 -	}
  21.854 -	slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(kmem_bufctl_t)+sizeof(slab_t));
  21.855 +    if (!cachep->num) {
  21.856 +        printk("kmem_cache_create: couldn't create cache %s.\n", name);
  21.857 +        kmem_cache_free(&cache_cache, cachep);
  21.858 +        cachep = NULL;
  21.859 +        goto opps;
  21.860 +    }
  21.861 +    slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(kmem_bufctl_t) + 
  21.862 +                               sizeof(slab_t));
  21.863  
  21.864 -	/*
  21.865 -	 * If the slab has been placed off-slab, and we have enough space then
  21.866 -	 * move it on-slab. This is at the expense of any extra colouring.
  21.867 -	 */
  21.868 -	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
  21.869 -		flags &= ~CFLGS_OFF_SLAB;
  21.870 -		left_over -= slab_size;
  21.871 -	}
  21.872 +    /*
  21.873 +     * If the slab has been placed off-slab, and we have enough space then
  21.874 +     * move it on-slab. This is at the expense of any extra colouring.
  21.875 +     */
  21.876 +    if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
  21.877 +        flags &= ~CFLGS_OFF_SLAB;
  21.878 +        left_over -= slab_size;
  21.879 +    }
  21.880  
  21.881 -	/* Offset must be a multiple of the alignment. */
  21.882 -	offset += (align-1);
  21.883 -	offset &= ~(align-1);
  21.884 -	if (!offset)
  21.885 -		offset = L1_CACHE_BYTES;
  21.886 -	cachep->colour_off = offset;
  21.887 -	cachep->colour = left_over/offset;
  21.888 +    /* Offset must be a multiple of the alignment. */
  21.889 +    offset += (align-1);
  21.890 +    offset &= ~(align-1);
  21.891 +    if (!offset)
  21.892 +        offset = L1_CACHE_BYTES;
  21.893 +    cachep->colour_off = offset;
  21.894 +    cachep->colour = left_over/offset;
  21.895  
  21.896 -	/* init remaining fields */
  21.897 -	if (!cachep->gfporder && !(flags & CFLGS_OFF_SLAB))
  21.898 -		flags |= CFLGS_OPTIMIZE;
  21.899 +    /* init remaining fields */
  21.900 +    if (!cachep->gfporder && !(flags & CFLGS_OFF_SLAB))
  21.901 +        flags |= CFLGS_OPTIMIZE;
  21.902  
  21.903 -	cachep->flags = flags;
  21.904 -	cachep->gfpflags = 0;
  21.905 -	if (flags & SLAB_CACHE_DMA)
  21.906 -		cachep->gfpflags |= GFP_DMA;
  21.907 -	spin_lock_init(&cachep->spinlock);
  21.908 -	cachep->objsize = size;
  21.909 -	INIT_LIST_HEAD(&cachep->slabs_full);
  21.910 -	INIT_LIST_HEAD(&cachep->slabs_partial);
  21.911 -	INIT_LIST_HEAD(&cachep->slabs_free);
  21.912 +    cachep->flags = flags;
  21.913 +    spin_lock_init(&cachep->spinlock);
  21.914 +    cachep->objsize = size;
  21.915 +    INIT_LIST_HEAD(&cachep->slabs_full);
  21.916 +    INIT_LIST_HEAD(&cachep->slabs_partial);
  21.917 +    INIT_LIST_HEAD(&cachep->slabs_free);
  21.918  
  21.919 -	if (flags & CFLGS_OFF_SLAB)
  21.920 -		cachep->slabp_cache = kmem_find_general_cachep(slab_size,0);
  21.921 -	cachep->ctor = ctor;
  21.922 -	cachep->dtor = dtor;
  21.923 -	/* Copy name over so we don't have problems with unloaded modules */
  21.924 -	strcpy(cachep->name, name);
  21.925 +    if (flags & CFLGS_OFF_SLAB)
  21.926 +        cachep->slabp_cache = kmem_find_general_cachep(slab_size);
  21.927 +    cachep->ctor = ctor;
  21.928 +    cachep->dtor = dtor;
  21.929 +    /* Copy name over so we don't have problems with unloaded modules */
  21.930 +    strcpy(cachep->name, name);
  21.931  
  21.932  #ifdef CONFIG_SMP
  21.933 -	if (g_cpucache_up)
  21.934 -		enable_cpucache(cachep);
  21.935 +    if (g_cpucache_up)
  21.936 +        enable_cpucache(cachep);
  21.937  #endif
  21.938 -	/* Need the semaphore to access the chain. */
  21.939 -	down(&cache_chain_sem);
  21.940 -	{
  21.941 -		struct list_head *p;
  21.942 +    /* Need the semaphore to access the chain. */
  21.943 +    down(&cache_chain_sem);
  21.944 +    {
  21.945 +        struct list_head *p;
  21.946  
  21.947 -		list_for_each(p, &cache_chain) {
  21.948 -			kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
  21.949 +        list_for_each(p, &cache_chain) {
  21.950 +            kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
  21.951  
  21.952 -			/* The name field is constant - no lock needed. */
  21.953 -			if (!strcmp(pc->name, name))
  21.954 -				BUG();
  21.955 -		}
  21.956 -	}
  21.957 +            /* The name field is constant - no lock needed. */
  21.958 +            if (!strcmp(pc->name, name))
  21.959 +                BUG();
  21.960 +        }
  21.961 +    }
  21.962  
  21.963 -	/* There is no reason to lock our new cache before we
  21.964 -	 * link it in - no one knows about it yet...
  21.965 -	 */
  21.966 -	list_add(&cachep->next, &cache_chain);
  21.967 -	up(&cache_chain_sem);
  21.968 -opps:
  21.969 -	return cachep;
  21.970 +    /* There is no reason to lock our new cache before we
  21.971 +     * link it in - no one knows about it yet...
  21.972 +     */
  21.973 +    list_add(&cachep->next, &cache_chain);
  21.974 +    up(&cache_chain_sem);
  21.975 + opps:
  21.976 +    return cachep;
  21.977  }
  21.978  
  21.979  
  21.980 @@ -841,21 +800,21 @@ opps:
  21.981   */
  21.982  static int is_chained_kmem_cache(kmem_cache_t * cachep)
  21.983  {
  21.984 -	struct list_head *p;
  21.985 -	int ret = 0;
  21.986 -        unsigned long spin_flags;
  21.987 +    struct list_head *p;
  21.988 +    int ret = 0;
  21.989 +    unsigned long spin_flags;
  21.990  
  21.991 -	/* Find the cache in the chain of caches. */
  21.992 -	down(&cache_chain_sem);
  21.993 -	list_for_each(p, &cache_chain) {
  21.994 -		if (p == &cachep->next) {
  21.995 -			ret = 1;
  21.996 -			break;
  21.997 -		}
  21.998 -	}
  21.999 -	up(&cache_chain_sem);
 21.1000 +    /* Find the cache in the chain of caches. */
 21.1001 +    down(&cache_chain_sem);
 21.1002 +    list_for_each(p, &cache_chain) {
 21.1003 +        if (p == &cachep->next) {
 21.1004 +            ret = 1;
 21.1005 +            break;
 21.1006 +        }
 21.1007 +    }
 21.1008 +    up(&cache_chain_sem);
 21.1009  
 21.1010 -	return ret;
 21.1011 +    return ret;
 21.1012  }
 21.1013  #else
 21.1014  #define is_chained_kmem_cache(x) 1
 21.1015 @@ -867,54 +826,54 @@ static int is_chained_kmem_cache(kmem_ca
 21.1016   */
 21.1017  static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
 21.1018  {
 21.1019 -	local_irq_disable();
 21.1020 -	func(arg);
 21.1021 -	local_irq_enable();
 21.1022 +    local_irq_disable();
 21.1023 +    func(arg);
 21.1024 +    local_irq_enable();
 21.1025  
 21.1026 -	if (smp_call_function(func, arg, 1, 1))
 21.1027 -		BUG();
 21.1028 +    if (smp_call_function(func, arg, 1, 1))
 21.1029 +        BUG();
 21.1030  }
 21.1031  typedef struct ccupdate_struct_s
 21.1032  {
 21.1033 -	kmem_cache_t *cachep;
 21.1034 -	cpucache_t *new[NR_CPUS];
 21.1035 +    kmem_cache_t *cachep;
 21.1036 +    cpucache_t *new[NR_CPUS];
 21.1037  } ccupdate_struct_t;
 21.1038  
 21.1039  static void do_ccupdate_local(void *info)
 21.1040  {
 21.1041 -	ccupdate_struct_t *new = (ccupdate_struct_t *)info;
 21.1042 -	cpucache_t *old = cc_data(new->cachep);
 21.1043 +    ccupdate_struct_t *new = (ccupdate_struct_t *)info;
 21.1044 +    cpucache_t *old = cc_data(new->cachep);
 21.1045  	
 21.1046 -	cc_data(new->cachep) = new->new[smp_processor_id()];
 21.1047 -	new->new[smp_processor_id()] = old;
 21.1048 +    cc_data(new->cachep) = new->new[smp_processor_id()];
 21.1049 +    new->new[smp_processor_id()] = old;
 21.1050  }
 21.1051  
 21.1052  static void free_block (kmem_cache_t* cachep, void** objpp, int len);
 21.1053  
 21.1054  static void drain_cpu_caches(kmem_cache_t *cachep)
 21.1055  {
 21.1056 -	ccupdate_struct_t new;
 21.1057 -	int i;
 21.1058 -        unsigned long spin_flags;
 21.1059 +    ccupdate_struct_t new;
 21.1060 +    int i;
 21.1061 +    unsigned long spin_flags;
 21.1062  
 21.1063 -	memset(&new.new,0,sizeof(new.new));
 21.1064 +    memset(&new.new,0,sizeof(new.new));
 21.1065  
 21.1066 -	new.cachep = cachep;
 21.1067 +    new.cachep = cachep;
 21.1068  
 21.1069 -	down(&cache_chain_sem);
 21.1070 -	smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
 21.1071 +    down(&cache_chain_sem);
 21.1072 +    smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
 21.1073  
 21.1074 -	for (i = 0; i < smp_num_cpus; i++) {
 21.1075 -		cpucache_t* ccold = new.new[cpu_logical_map(i)];
 21.1076 -		if (!ccold || (ccold->avail == 0))
 21.1077 -			continue;
 21.1078 -		local_irq_disable();
 21.1079 -		free_block(cachep, cc_entry(ccold), ccold->avail);
 21.1080 -		local_irq_enable();
 21.1081 -		ccold->avail = 0;
 21.1082 -	}
 21.1083 -	smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
 21.1084 -	up(&cache_chain_sem);
 21.1085 +    for (i = 0; i < smp_num_cpus; i++) {
 21.1086 +        cpucache_t* ccold = new.new[cpu_logical_map(i)];
 21.1087 +        if (!ccold || (ccold->avail == 0))
 21.1088 +            continue;
 21.1089 +        local_irq_disable();
 21.1090 +        free_block(cachep, cc_entry(ccold), ccold->avail);
 21.1091 +        local_irq_enable();
 21.1092 +        ccold->avail = 0;
 21.1093 +    }
 21.1094 +    smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
 21.1095 +    up(&cache_chain_sem);
 21.1096  }
 21.1097  
 21.1098  #else
 21.1099 @@ -923,35 +882,36 @@ static void drain_cpu_caches(kmem_cache_
 21.1100  
 21.1101  static int __kmem_cache_shrink(kmem_cache_t *cachep)
 21.1102  {
 21.1103 -	slab_t *slabp;
 21.1104 -	int ret;
 21.1105 +    slab_t *slabp;
 21.1106 +    int ret;
 21.1107  
 21.1108 -	drain_cpu_caches(cachep);
 21.1109 +    drain_cpu_caches(cachep);
 21.1110  
 21.1111 -	spin_lock_irq(&cachep->spinlock);
 21.1112 +    spin_lock_irq(&cachep->spinlock);
 21.1113  
 21.1114 -	/* If the cache is growing, stop shrinking. */
 21.1115 -	while (!cachep->growing) {
 21.1116 -		struct list_head *p;
 21.1117 +    /* If the cache is growing, stop shrinking. */
 21.1118 +    while (!cachep->growing) {
 21.1119 +        struct list_head *p;
 21.1120  
 21.1121 -		p = cachep->slabs_free.prev;
 21.1122 -		if (p == &cachep->slabs_free)
 21.1123 -			break;
 21.1124 +        p = cachep->slabs_free.prev;
 21.1125 +        if (p == &cachep->slabs_free)
 21.1126 +            break;
 21.1127  
 21.1128 -		slabp = list_entry(cachep->slabs_free.prev, slab_t, list);
 21.1129 +        slabp = list_entry(cachep->slabs_free.prev, slab_t, list);
 21.1130  #if DEBUG
 21.1131 -		if (slabp->inuse)
 21.1132 -			BUG();
 21.1133 +        if (slabp->inuse)
 21.1134 +            BUG();
 21.1135  #endif
 21.1136 -		list_del(&slabp->list);
 21.1137 +        list_del(&slabp->list);
 21.1138  
 21.1139 -		spin_unlock_irq(&cachep->spinlock);
 21.1140 -		kmem_slab_destroy(cachep, slabp);
 21.1141 -		spin_lock_irq(&cachep->spinlock);
 21.1142 -	}
 21.1143 -	ret = !list_empty(&cachep->slabs_full) || !list_empty(&cachep->slabs_partial);
 21.1144 -	spin_unlock_irq(&cachep->spinlock);
 21.1145 -	return ret;
 21.1146 +        spin_unlock_irq(&cachep->spinlock);
 21.1147 +        kmem_slab_destroy(cachep, slabp);
 21.1148 +        spin_lock_irq(&cachep->spinlock);
 21.1149 +    }
 21.1150 +    ret = (!list_empty(&cachep->slabs_full) || 
 21.1151 +           !list_empty(&cachep->slabs_partial));
 21.1152 +    spin_unlock_irq(&cachep->spinlock);
 21.1153 +    return ret;
 21.1154  }
 21.1155  
 21.1156  /**
 21.1157 @@ -963,10 +923,10 @@ static int __kmem_cache_shrink(kmem_cach
 21.1158   */
 21.1159  int kmem_cache_shrink(kmem_cache_t *cachep)
 21.1160  {
 21.1161 -	if (!cachep || !is_chained_kmem_cache(cachep))
 21.1162 -		BUG();
 21.1163 +    if (!cachep || !is_chained_kmem_cache(cachep))
 21.1164 +        BUG();
 21.1165  
 21.1166 -	return __kmem_cache_shrink(cachep);
 21.1167 +    return __kmem_cache_shrink(cachep);
 21.1168  }
 21.1169  
 21.1170  /**
 21.1171 @@ -986,201 +946,187 @@ int kmem_cache_shrink(kmem_cache_t *cach
 21.1172   */
 21.1173  int kmem_cache_destroy (kmem_cache_t * cachep)
 21.1174  {
 21.1175 -        unsigned long spin_flags;
 21.1176 +    unsigned long spin_flags;
 21.1177  
 21.1178 -	if (!cachep || cachep->growing)
 21.1179 -		BUG();
 21.1180 +    if (!cachep || cachep->growing)
 21.1181 +        BUG();
 21.1182  
 21.1183 -	/* Find the cache in the chain of caches. */
 21.1184 -	down(&cache_chain_sem);
 21.1185 -	/* the chain is never empty, cache_cache is never destroyed */
 21.1186 -	if (clock_searchp == cachep)
 21.1187 -		clock_searchp = list_entry(cachep->next.next,
 21.1188 -						kmem_cache_t, next);
 21.1189 -	list_del(&cachep->next);
 21.1190 -	up(&cache_chain_sem);
 21.1191 +    /* Find the cache in the chain of caches. */
 21.1192 +    down(&cache_chain_sem);
 21.1193 +    /* the chain is never empty, cache_cache is never destroyed */
 21.1194 +    if (clock_searchp == cachep)
 21.1195 +        clock_searchp = list_entry(cachep->next.next,
 21.1196 +                                   kmem_cache_t, next);
 21.1197 +    list_del(&cachep->next);
 21.1198 +    up(&cache_chain_sem);
 21.1199  
 21.1200 -	if (__kmem_cache_shrink(cachep)) {
 21.1201 -		printk(KERN_ERR "kmem_cache_destroy: Can't free all objects %p\n",
 21.1202 -		       cachep);
 21.1203 -		down(&cache_chain_sem);
 21.1204 -		list_add(&cachep->next,&cache_chain);
 21.1205 -		up(&cache_chain_sem);
 21.1206 -		return 1;
 21.1207 -	}
 21.1208 +    if (__kmem_cache_shrink(cachep)) {
 21.1209 +        printk(KERN_ERR "kmem_cache_destroy: Can't free all objects %p\n",
 21.1210 +               cachep);
 21.1211 +        down(&cache_chain_sem);
 21.1212 +        list_add(&cachep->next,&cache_chain);
 21.1213 +        up(&cache_chain_sem);
 21.1214 +        return 1;
 21.1215 +    }
 21.1216  #ifdef CONFIG_SMP
 21.1217 -	{
 21.1218 -		int i;
 21.1219 -		for (i = 0; i < NR_CPUS; i++)
 21.1220 -			kfree(cachep->cpudata[i]);
 21.1221 -	}
 21.1222 +    {
 21.1223 +        int i;
 21.1224 +        for (i = 0; i < NR_CPUS; i++)
 21.1225 +            kfree(cachep->cpudata[i]);
 21.1226 +    }
 21.1227  #endif
 21.1228 -	kmem_cache_free(&cache_cache, cachep);
 21.1229 +    kmem_cache_free(&cache_cache, cachep);
 21.1230  
 21.1231 -	return 0;
 21.1232 +    return 0;
 21.1233  }
 21.1234  
 21.1235  /* Get the memory for a slab management obj. */
 21.1236 -static inline slab_t * kmem_cache_slabmgmt (kmem_cache_t *cachep,
 21.1237 -			void *objp, int colour_off, int local_flags)
 21.1238 +static inline slab_t *kmem_cache_slabmgmt(kmem_cache_t *cachep,
 21.1239 +                                          void *objp, int colour_off, 
 21.1240 +                                          int local_flags)
 21.1241  {
 21.1242 -	slab_t *slabp;
 21.1243 +    slab_t *slabp;
 21.1244  	
 21.1245 -	if (OFF_SLAB(cachep)) {
 21.1246 -		/* Slab management obj is off-slab. */
 21.1247 -		slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
 21.1248 -		if (!slabp)
 21.1249 -			return NULL;
 21.1250 -	} else {
 21.1251 -		/* FIXME: change to
 21.1252 -			slabp = objp
 21.1253 -		 * if you enable OPTIMIZE
 21.1254 -		 */
 21.1255 -		slabp = objp+colour_off;
 21.1256 -		colour_off += L1_CACHE_ALIGN(cachep->num *
 21.1257 -				sizeof(kmem_bufctl_t) + sizeof(slab_t));
 21.1258 -	}
 21.1259 -	slabp->inuse = 0;
 21.1260 -	slabp->colouroff = colour_off;
 21.1261 -	slabp->s_mem = objp+colour_off;
 21.1262 +    if (OFF_SLAB(cachep)) {
 21.1263 +        /* Slab management obj is off-slab. */
 21.1264 +        slabp = kmem_cache_alloc(cachep->slabp_cache);
 21.1265 +        if (!slabp)
 21.1266 +            return NULL;
 21.1267 +    } else {
 21.1268 +        /* FIXME: change to
 21.1269 +           slabp = objp
 21.1270 +           * if you enable OPTIMIZE
 21.1271 +           */
 21.1272 +        slabp = objp+colour_off;
 21.1273 +        colour_off += L1_CACHE_ALIGN(cachep->num *
 21.1274 +                                     sizeof(kmem_bufctl_t) + sizeof(slab_t));
 21.1275 +    }
 21.1276 +    slabp->inuse = 0;
 21.1277 +    slabp->colouroff = colour_off;
 21.1278 +    slabp->s_mem = objp+colour_off;
 21.1279  
 21.1280 -	return slabp;
 21.1281 +    return slabp;
 21.1282  }
 21.1283  
 21.1284 -static inline void kmem_cache_init_objs (kmem_cache_t * cachep,
 21.1285 -			slab_t * slabp, unsigned long ctor_flags)
 21.1286 +static inline void kmem_cache_init_objs(kmem_cache_t *cachep,
 21.1287 +                                         slab_t *slabp,
 21.1288 +                                        unsigned long ctor_flags)
 21.1289  {
 21.1290 -	int i;
 21.1291 +    int i;
 21.1292  
 21.1293 -	for (i = 0; i < cachep->num; i++) {
 21.1294 -		void* objp = slabp->s_mem+cachep->objsize*i;
 21.1295 +    for (i = 0; i < cachep->num; i++) {
 21.1296 +        void* objp = slabp->s_mem+cachep->objsize*i;
 21.1297  #if DEBUG
 21.1298 -		if (cachep->flags & SLAB_RED_ZONE) {
 21.1299 -			*((unsigned long*)(objp)) = RED_MAGIC1;
 21.1300 -			*((unsigned long*)(objp + cachep->objsize -
 21.1301 -					BYTES_PER_WORD)) = RED_MAGIC1;
 21.1302 -			objp += BYTES_PER_WORD;
 21.1303 -		}
 21.1304 +        if (cachep->flags & SLAB_RED_ZONE) {
 21.1305 +            *((unsigned long*)(objp)) = RED_MAGIC1;
 21.1306 +            *((unsigned long*)(objp + cachep->objsize -
 21.1307 +                               BYTES_PER_WORD)) = RED_MAGIC1;
 21.1308 +            objp += BYTES_PER_WORD;
 21.1309 +        }
 21.1310  #endif
 21.1311  
 21.1312 -		/*
 21.1313 -		 * Constructors are not allowed to allocate memory from
 21.1314 -		 * the same cache which they are a constructor for.
 21.1315 -		 * Otherwise, deadlock. They must also be threaded.
 21.1316 -		 */
 21.1317 -		if (cachep->ctor)
 21.1318 -			cachep->ctor(objp, cachep, ctor_flags);
 21.1319 +        /*
 21.1320 +         * Constructors are not allowed to allocate memory from
 21.1321 +         * the same cache which they are a constructor for.
 21.1322 +         * Otherwise, deadlock. They must also be threaded.
 21.1323 +         */
 21.1324 +        if (cachep->ctor)
 21.1325 +            cachep->ctor(objp, cachep, ctor_flags);
 21.1326  #if DEBUG
 21.1327 -		if (cachep->flags & SLAB_RED_ZONE)
 21.1328 -			objp -= BYTES_PER_WORD;
 21.1329 -		if (cachep->flags & SLAB_POISON)
 21.1330 -			/* need to poison the objs */
 21.1331 -			kmem_poison_obj(cachep, objp);
 21.1332 -		if (cachep->flags & SLAB_RED_ZONE) {
 21.1333 -			if (*((unsigned long*)(objp)) != RED_MAGIC1)
 21.1334 -				BUG();
 21.1335 -			if (*((unsigned long*)(objp + cachep->objsize -
 21.1336 -					BYTES_PER_WORD)) != RED_MAGIC1)
 21.1337 -				BUG();
 21.1338 -		}
 21.1339 +        if (cachep->flags & SLAB_RED_ZONE)
 21.1340 +            objp -= BYTES_PER_WORD;
 21.1341 +        if (cachep->flags & SLAB_POISON)
 21.1342 +            /* need to poison the objs */
 21.1343 +            kmem_poison_obj(cachep, objp);
 21.1344 +        if (cachep->flags & SLAB_RED_ZONE) {
 21.1345 +            if (*((unsigned long*)(objp)) != RED_MAGIC1)
 21.1346 +                BUG();
 21.1347 +            if (*((unsigned long*)(objp + cachep->objsize -
 21.1348 +                                   BYTES_PER_WORD)) != RED_MAGIC1)
 21.1349 +                BUG();
 21.1350 +        }
 21.1351  #endif
 21.1352 -		slab_bufctl(slabp)[i] = i+1;
 21.1353 -	}
 21.1354 -	slab_bufctl(slabp)[i-1] = BUFCTL_END;
 21.1355 -	slabp->free = 0;
 21.1356 +        slab_bufctl(slabp)[i] = i+1;
 21.1357 +    }
 21.1358 +    slab_bufctl(slabp)[i-1] = BUFCTL_END;
 21.1359 +    slabp->free = 0;
 21.1360  }
 21.1361  
 21.1362  /*
 21.1363   * Grow (by 1) the number of slabs within a cache.  This is called by
 21.1364   * kmem_cache_alloc() when there are no active objs left in a cache.
 21.1365   */
 21.1366 -static int kmem_cache_grow (kmem_cache_t * cachep, int flags)
 21.1367 +static int kmem_cache_grow(kmem_cache_t * cachep)
 21.1368  {
 21.1369 -	slab_t	*slabp;
 21.1370 -	struct pfn_info	*page; unsigned int i;
 21.1371 -	void		*objp;
 21.1372 -	size_t		 offset;
 21.1373 -	unsigned int	 local_flags;
 21.1374 -	unsigned long	 ctor_flags;
 21.1375 -	unsigned long	 save_flags;
 21.1376 +    slab_t	*slabp;
 21.1377 +    struct pfn_info	*page; unsigned int i;
 21.1378 +    void		*objp;
 21.1379 +    size_t		 offset;
 21.1380 +    unsigned long	 ctor_flags;
 21.1381 +    unsigned long	 save_flags;
 21.1382  
 21.1383 -	/* Be lazy and only check for valid flags here,
 21.1384 - 	 * keeping it out of the critical path in kmem_cache_alloc().
 21.1385 -	 */
 21.1386 -	if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
 21.1387 -		BUG();
 21.1388 -	if (flags & SLAB_NO_GROW)
 21.1389 -		return 0;
 21.1390 +    ctor_flags = SLAB_CTOR_CONSTRUCTOR;
 21.1391 +
 21.1392 +    /* About to mess with non-constant members - lock. */
 21.1393 +    spin_lock_irqsave(&cachep->spinlock, save_flags);
 21.1394  
 21.1395 -	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
 21.1396 -	local_flags = (flags & SLAB_LEVEL_MASK);
 21.1397 -	if (local_flags == SLAB_ATOMIC)
 21.1398 -		/*
 21.1399 -		 * Not allowed to sleep.  Need to tell a constructor about
 21.1400 -		 * this - it might need to know...
 21.1401 -		 */
 21.1402 -		ctor_flags |= SLAB_CTOR_ATOMIC;
 21.1403 +    /* Get colour for the slab, and cal the next value. */
 21.1404 +    offset = cachep->colour_next;
 21.1405 +    cachep->colour_next++;
 21.1406 +    if (cachep->colour_next >= cachep->colour)
 21.1407 +        cachep->colour_next = 0;
 21.1408 +    offset *= cachep->colour_off;
 21.1409 +    cachep->dflags |= DFLGS_GROWN;
 21.1410  
 21.1411 -	/* About to mess with non-constant members - lock. */
 21.1412 -	spin_lock_irqsave(&cachep->spinlock, save_flags);
 21.1413 +    cachep->growing++;
 21.1414 +    spin_unlock_irqrestore(&cachep->spinlock, save_flags);
 21.1415  
 21.1416 -	/* Get colour for the slab, and cal the next value. */
 21.1417 -	offset = cachep->colour_next;
 21.1418 -	cachep->colour_next++;
 21.1419 -	if (cachep->colour_next >= cachep->colour)
 21.1420 -		cachep->colour_next = 0;
 21.1421 -	offset *= cachep->colour_off;
 21.1422 -	cachep->dflags |= DFLGS_GROWN;
 21.1423 -
 21.1424 -	cachep->growing++;
 21.1425 -	spin_unlock_irqrestore(&cachep->spinlock, save_flags);
 21.1426 +    /* A series of memory allocations for a new slab.
 21.1427 +     * Neither the cache-chain semaphore, or cache-lock, are
 21.1428 +     * held, but the incrementing c_growing prevents this
 21.1429 +     * cache from being reaped or shrunk.
 21.1430 +     * Note: The cache could be selected in for reaping in
 21.1431 +     * kmem_cache_reap(), but when the final test is made the
 21.1432 +     * growing value will be seen.
 21.1433 +     */
 21.1434  
 21.1435 -	/* A series of memory allocations for a new slab.
 21.1436 -	 * Neither the cache-chain semaphore, or cache-lock, are
 21.1437 -	 * held, but the incrementing c_growing prevents this
 21.1438 -	 * cache from being reaped or shrunk.
 21.1439 -	 * Note: The cache could be selected in for reaping in
 21.1440 -	 * kmem_cache_reap(), but when the final test is made the
 21.1441 -	 * growing value will be seen.
 21.1442 -	 */
 21.1443 +    /* Get mem for the objs. */
 21.1444 +    if (!(objp = kmem_getpages(cachep)))
 21.1445 +        goto failed;
 21.1446 +
 21.1447 +    /* Get slab management. */
 21.1448 +    if (!(slabp = kmem_cache_slabmgmt(cachep, objp, offset, 0)))
 21.1449 +        goto opps1;
 21.1450  
 21.1451 -	/* Get mem for the objs. */
 21.1452 -	if (!(objp = kmem_getpages(cachep, flags)))
 21.1453 -		goto failed;
 21.1454 -
 21.1455 -	/* Get slab management. */
 21.1456 -	if (!(slabp = kmem_cache_slabmgmt(cachep, objp, offset, local_flags)))
 21.1457 -		goto opps1;
 21.1458 +    /* Nasty!!!!!! I hope this is OK. */
 21.1459 +    i = 1 << cachep->gfporder;
 21.1460 +    page = virt_to_page(objp);
 21.1461 +    do {
 21.1462 +        SET_PAGE_CACHE(page, cachep);
 21.1463 +        SET_PAGE_SLAB(page, slabp);
 21.1464 +        PageSetSlab(page);
 21.1465 +        page++;
 21.1466 +    } while (--i);
 21.1467  
 21.1468 -	/* Nasty!!!!!! I hope this is OK. */
 21.1469 -	i = 1 << cachep->gfporder;
 21.1470 -	page = virt_to_page(objp);
 21.1471 -	do {
 21.1472 -		SET_PAGE_CACHE(page, cachep);
 21.1473 -		SET_PAGE_SLAB(page, slabp);
 21.1474 -		PageSetSlab(page);
 21.1475 -		page++;
 21.1476 -	} while (--i);
 21.1477 +    kmem_cache_init_objs(cachep, slabp, ctor_flags);
 21.1478 +
 21.1479 +    spin_lock_irqsave(&cachep->spinlock, save_flags);
 21.1480 +    cachep->growing--;
 21.1481  
 21.1482 -	kmem_cache_init_objs(cachep, slabp, ctor_flags);
 21.1483 -
 21.1484 -	spin_lock_irqsave(&cachep->spinlock, save_flags);
 21.1485 -	cachep->growing--;
 21.1486 +    /* Make slab active. */
 21.1487 +    list_add_tail(&slabp->list, &cachep->slabs_free);
 21.1488 +    STATS_INC_GROWN(cachep);
 21.1489 +    cachep->failures = 0;
 21.1490  
 21.1491 -	/* Make slab active. */
 21.1492 -	list_add_tail(&slabp->list, &cachep->slabs_free);
 21.1493 -	STATS_INC_GROWN(cachep);
 21.1494 -	cachep->failures = 0;
 21.1495 -
 21.1496 -	spin_unlock_irqrestore(&cachep->spinlock, save_flags);
 21.1497 -	return 1;
 21.1498 -opps1:
 21.1499 -	kmem_freepages(cachep, objp);
 21.1500 -failed:
 21.1501 -	spin_lock_irqsave(&cachep->spinlock, save_flags);
 21.1502 -	cachep->growing--;
 21.1503 -	spin_unlock_irqrestore(&cachep->spinlock, save_flags);
 21.1504 -	return 0;
 21.1505 +    spin_unlock_irqrestore(&cachep->spinlock, save_flags);
 21.1506 +    return 1;
 21.1507 + opps1:
 21.1508 +    kmem_freepages(cachep, objp);
 21.1509 + failed:
 21.1510 +    spin_lock_irqsave(&cachep->spinlock, save_flags);
 21.1511 +    cachep->growing--;
 21.1512 +    spin_unlock_irqrestore(&cachep->spinlock, save_flags);
 21.1513 +    return 0;
 21.1514  }
 21.1515  
 21.1516  /*
 21.1517 @@ -1192,70 +1138,59 @@ failed:
 21.1518  
 21.1519  #if DEBUG
 21.1520  static int kmem_extra_free_checks (kmem_cache_t * cachep,
 21.1521 -			slab_t *slabp, void * objp)
 21.1522 +                                   slab_t *slabp, void * objp)
 21.1523  {
 21.1524 -	int i;
 21.1525 -	unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
 21.1526 +    int i;
 21.1527 +    unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
 21.1528  
 21.1529 -	if (objnr >= cachep->num)
 21.1530 -		BUG();
 21.1531 -	if (objp != slabp->s_mem + objnr*cachep->objsize)
 21.1532 -		BUG();
 21.1533 +    if (objnr >= cachep->num)
 21.1534 +        BUG();
 21.1535 +    if (objp != slabp->s_mem + objnr*cachep->objsize)
 21.1536 +        BUG();
 21.1537  
 21.1538 -	/* Check slab's freelist to see if this obj is there. */
 21.1539 -	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
 21.1540 -		if (i == objnr)
 21.1541 -			BUG();
 21.1542 -	}
 21.1543 -	return 0;
 21.1544 +    /* Check slab's freelist to see if this obj is there. */
 21.1545 +    for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
 21.1546 +        if (i == objnr)
 21.1547 +            BUG();
 21.1548 +    }
 21.1549 +    return 0;
 21.1550  }
 21.1551  #endif
 21.1552  
 21.1553 -static inline void kmem_cache_alloc_head(kmem_cache_t *cachep, int flags)
 21.1554 -{
 21.1555 -	if (flags & SLAB_DMA) {
 21.1556 -		if (!(cachep->gfpflags & GFP_DMA))
 21.1557 -			BUG();
 21.1558 -	} else {
 21.1559 -		if (cachep->gfpflags & GFP_DMA)
 21.1560 -			BUG();
 21.1561 -	}
 21.1562 -}
 21.1563 -
 21.1564  static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,
 21.1565  						slab_t *slabp)
 21.1566  {
 21.1567 -	void *objp;
 21.1568 +    void *objp;
 21.1569  
 21.1570 -	STATS_INC_ALLOCED(cachep);
 21.1571 -	STATS_INC_ACTIVE(cachep);
 21.1572 -	STATS_SET_HIGH(cachep);
 21.1573 +    STATS_INC_ALLOCED(cachep);
 21.1574 +    STATS_INC_ACTIVE(cachep);
 21.1575 +    STATS_SET_HIGH(cachep);
 21.1576  
 21.1577 -	/* get obj pointer */
 21.1578 -	slabp->inuse++;
 21.1579 -	objp = slabp->s_mem + slabp->free*cachep->objsize;
 21.1580 -	slabp->free=slab_bufctl(slabp)[slabp->free];
 21.1581 +    /* get obj pointer */
 21.1582 +    slabp->inuse++;
 21.1583 +    objp = slabp->s_mem + slabp->free*cachep->objsize;
 21.1584 +    slabp->free=slab_bufctl(slabp)[slabp->free];
 21.1585  
 21.1586 -	if (unlikely(slabp->free == BUFCTL_END)) {
 21.1587 -		list_del(&slabp->list);
 21.1588 -		list_add(&slabp->list, &cachep->slabs_full);
 21.1589 -	}
 21.1590 +    if (unlikely(slabp->free == BUFCTL_END)) {
 21.1591 +        list_del(&slabp->list);
 21.1592 +        list_add(&slabp->list, &cachep->slabs_full);
 21.1593 +    }
 21.1594  #if DEBUG
 21.1595 -	if (cachep->flags & SLAB_POISON)
 21.1596 -		if (kmem_check_poison_obj(cachep, objp))
 21.1597 -			BUG();
 21.1598 -	if (cachep->flags & SLAB_RED_ZONE) {
 21.1599 -		/* Set alloc red-zone, and check old one. */
 21.1600 -		if (xchg((unsigned long *)objp, RED_MAGIC2) !=
 21.1601 -							 RED_MAGIC1)
 21.1602 -			BUG();
 21.1603 -		if (xchg((unsigned long *)(objp+cachep->objsize -
 21.1604 -			  BYTES_PER_WORD), RED_MAGIC2) != RED_MAGIC1)
 21.1605 -			BUG();
 21.1606 -		objp += BYTES_PER_WORD;
 21.1607 -	}
 21.1608 +    if (cachep->flags & SLAB_POISON)
 21.1609 +        if (kmem_check_poison_obj(cachep, objp))
 21.1610 +            BUG();
 21.1611 +    if (cachep->flags & SLAB_RED_ZONE) {
 21.1612 +        /* Set alloc red-zone, and check old one. */
 21.1613 +        if (xchg((unsigned long *)objp, RED_MAGIC2) !=
 21.1614 +            RED_MAGIC1)
 21.1615 +            BUG();
 21.1616 +        if (xchg((unsigned long *)(objp+cachep->objsize -
 21.1617 +                                   BYTES_PER_WORD), RED_MAGIC2) != RED_MAGIC1)
 21.1618 +            BUG();
 21.1619 +        objp += BYTES_PER_WORD;
 21.1620 +    }
 21.1621  #endif
 21.1622 -	return objp;
 21.1623 +    return objp;
 21.1624  }
 21.1625  
 21.1626  /*
 21.1627 @@ -1285,85 +1220,84 @@ static inline void * kmem_cache_alloc_on
 21.1628  })
 21.1629  
 21.1630  #ifdef CONFIG_SMP
 21.1631 -void* kmem_cache_alloc_batch(kmem_cache_t* cachep, int flags)
 21.1632 +void* kmem_cache_alloc_batch(kmem_cache_t* cachep)
 21.1633  {
 21.1634 -	int batchcount = cachep->batchcount;
 21.1635 -	cpucache_t* cc = cc_data(cachep);
 21.1636 +    int batchcount = cachep->batchcount;
 21.1637 +    cpucache_t* cc = cc_data(cachep);
 21.1638  
 21.1639 -	spin_lock(&cachep->spinlock);
 21.1640 -	while (batchcount--) {
 21.1641 -		struct list_head * slabs_partial, * entry;
 21.1642 -		slab_t *slabp;
 21.1643 -		/* Get slab alloc is to come from. */
 21.1644 -		slabs_partial = &(cachep)->slabs_partial;
 21.1645 -		entry = slabs_partial->next;
 21.1646 -		if (unlikely(entry == slabs_partial)) {
 21.1647 -			struct list_head * slabs_free;
 21.1648 -			slabs_free = &(cachep)->slabs_free;
 21.1649 -			entry = slabs_free->next;
 21.1650 -			if (unlikely(entry == slabs_free))
 21.1651 -				break;
 21.1652 -			list_del(entry);
 21.1653 -			list_add(entry, slabs_partial);
 21.1654 -		}
 21.1655 +    spin_lock(&cachep->spinlock);
 21.1656 +    while (batchcount--) {
 21.1657 +        struct list_head * slabs_partial, * entry;
 21.1658 +        slab_t *slabp;
 21.1659 +        /* Get slab alloc is to come from. */
 21.1660 +        slabs_partial = &(cachep)->slabs_partial;
 21.1661 +        entry = slabs_partial->next;
 21.1662 +        if (unlikely(entry == slabs_partial)) {
 21.1663 +            struct list_head * slabs_free;
 21.1664 +            slabs_free = &(cachep)->slabs_free;
 21.1665 +            entry = slabs_free->next;
 21.1666 +            if (unlikely(entry == slabs_free))
 21.1667 +                break;
 21.1668 +            list_del(entry);
 21.1669 +            list_add(entry, slabs_partial);
 21.1670 +        }
 21.1671  
 21.1672 -		slabp = list_entry(entry, slab_t, list);
 21.1673 -		cc_entry(cc)[cc->avail++] =
 21.1674 -				kmem_cache_alloc_one_tail(cachep, slabp);
 21.1675 -	}
 21.1676 -	spin_unlock(&cachep->spinlock);
 21.1677 +        slabp = list_entry(entry, slab_t, list);
 21.1678 +        cc_entry(cc)[cc->avail++] =
 21.1679 +            kmem_cache_alloc_one_tail(cachep, slabp);
 21.1680 +    }
 21.1681 +    spin_unlock(&cachep->spinlock);
 21.1682  
 21.1683 -	if (cc->avail)
 21.1684 -		return cc_entry(cc)[--cc->avail];
 21.1685 -	return NULL;
 21.1686 +    if (cc->avail)
 21.1687 +        return cc_entry(cc)[--cc->avail];
 21.1688 +    return NULL;
 21.1689  }
 21.1690  #endif
 21.1691  
 21.1692 -static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
 21.1693 +static inline void *__kmem_cache_alloc(kmem_cache_t *cachep)
 21.1694  {
 21.1695 -	unsigned long save_flags;
 21.1696 -	void* objp;
 21.1697 +    unsigned long flags;
 21.1698 +    void* objp;
 21.1699  
 21.1700 -	kmem_cache_alloc_head(cachep, flags);
 21.1701 -try_again:
 21.1702 -	local_irq_save(save_flags);
 21.1703 + try_again:
 21.1704 +    local_irq_save(flags);
 21.1705  #ifdef CONFIG_SMP
 21.1706 -	{
 21.1707 -		cpucache_t *cc = cc_data(cachep);
 21.1708 +    {
 21.1709 +        cpucache_t *cc = cc_data(cachep);
 21.1710  
 21.1711 -		if (cc) {
 21.1712 -			if (cc->avail) {
 21.1713 -				STATS_INC_ALLOCHIT(cachep);
 21.1714 -				objp = cc_entry(cc)[--cc->avail];
 21.1715 -			} else {
 21.1716 -				STATS_INC_ALLOCMISS(cachep);
 21.1717 -				objp = kmem_cache_alloc_batch(cachep,flags);
 21.1718 -				if (!objp)
 21.1719 -					goto alloc_new_slab_nolock;
 21.1720 -			}
 21.1721 -		} else {
 21.1722 -			spin_lock(&cachep->spinlock);
 21.1723 -			objp = kmem_cache_alloc_one(cachep);
 21.1724 -			spin_unlock(&cachep->spinlock);
 21.1725 -		}
 21.1726 -	}
 21.1727 +        if (cc) {
 21.1728 +            if (cc->avail) {
 21.1729 +                STATS_INC_ALLOCHIT(cachep);
 21.1730 +                objp = cc_entry(cc)[--cc->avail];
 21.1731 +            } else {
 21.1732 +                STATS_INC_ALLOCMISS(cachep);
 21.1733 +                objp = kmem_cache_alloc_batch(cachep);
 21.1734 +                if (!objp)
 21.1735 +                    goto alloc_new_slab_nolock;
 21.1736 +            }
 21.1737 +        } else {
 21.1738 +            spin_lock(&cachep->spinlock);
 21.1739 +            objp = kmem_cache_alloc_one(cachep);
 21.1740 +            spin_unlock(&cachep->spinlock);
 21.1741 +        }
 21.1742 +    }
 21.1743  #else
 21.1744 -	objp = kmem_cache_alloc_one(cachep);
 21.1745 +    objp = kmem_cache_alloc_one(cachep);
 21.1746  #endif
 21.1747 -	local_irq_restore(save_flags);
 21.1748 -	return objp;
 21.1749 -alloc_new_slab:
 21.1750 +    local_irq_restore(flags);
 21.1751 +    return objp;
 21.1752 + alloc_new_slab:
 21.1753  #ifdef CONFIG_SMP
 21.1754 -	spin_unlock(&cachep->spinlock);
 21.1755 -alloc_new_slab_nolock:
 21.1756 +    spin_unlock(&cachep->spinlock);
 21.1757 + alloc_new_slab_nolock:
 21.1758  #endif
 21.1759 -	local_irq_restore(save_flags);
 21.1760 -	if (kmem_cache_grow(cachep, flags))
 21.1761 -		/* Someone may have stolen our objs.  Doesn't matter, we'll
 21.1762 -		 * just come back here again.
 21.1763 -		 */
 21.1764 -		goto try_again;
 21.1765 -	return NULL;
 21.1766 +    local_irq_restore(flags);
 21.1767 +    if (kmem_cache_grow(cachep))
 21.1768 +        /* Someone may have stolen our objs.  Doesn't matter, we'll
 21.1769 +         * just come back here again.
 21.1770 +         */
 21.1771 +        goto try_again;
 21.1772 +    return NULL;
 21.1773  }
 21.1774  
 21.1775  /*
 21.1776 @@ -1397,76 +1331,76 @@ alloc_new_slab_nolock:
 21.1777  
 21.1778  static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
 21.1779  {
 21.1780 -	slab_t* slabp;
 21.1781 +    slab_t* slabp;
 21.1782  
 21.1783 -	CHECK_PAGE(virt_to_page(objp));
 21.1784 -	/* reduces memory footprint
 21.1785 -	 *
 21.1786 -	if (OPTIMIZE(cachep))
 21.1787 -		slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
 21.1788 -	 else
 21.1789 -	 */
 21.1790 -	slabp = GET_PAGE_SLAB(virt_to_page(objp));
 21.1791 +    CHECK_PAGE(virt_to_page(objp));
 21.1792 +    /* reduces memory footprint
 21.1793 +     *
 21.1794 +     if (OPTIMIZE(cachep))
 21.1795 +     slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
 21.1796 +     else
 21.1797 +    */
 21.1798 +    slabp = GET_PAGE_SLAB(virt_to_page(objp));
 21.1799  
 21.1800  #if DEBUG
 21.1801 -	if (cachep->flags & SLAB_DEBUG_INITIAL)
 21.1802 -		/* Need to call the slab's constructor so the
 21.1803 -		 * caller can perform a verify of its state (debugging).
 21.1804 -		 * Called without the cache-lock held.
 21.1805 -		 */
 21.1806 -		cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
 21.1807 +    if (cachep->flags & SLAB_DEBUG_INITIAL)
 21.1808 +        /* Need to call the slab's constructor so the
 21.1809 +         * caller can perform a verify of its state (debugging).
 21.1810 +         * Called without the cache-lock held.
 21.1811 +         */
 21.1812 +        cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
 21.1813  
 21.1814 -	if (cachep->flags & SLAB_RED_ZONE) {
 21.1815 -		objp -= BYTES_PER_WORD;
 21.1816 -		if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)
 21.1817 -			/* Either write before start, or a double free. */
 21.1818 -			BUG();
 21.1819 -		if (xchg((unsigned long *)(objp+cachep->objsize -
 21.1820 -				BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)
 21.1821 -			/* Either write past end, or a double free. */
 21.1822 -			BUG();
 21.1823 -	}
 21.1824 -	if (cachep->flags & SLAB_POISON)
 21.1825 -		kmem_poison_obj(cachep, objp);
 21.1826 -	if (kmem_extra_free_checks(cachep, slabp, objp))
 21.1827 -		return;
 21.1828 +    if (cachep->flags & SLAB_RED_ZONE) {
 21.1829 +        objp -= BYTES_PER_WORD;
 21.1830 +        if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)
 21.1831 +            /* Either write before start, or a double free. */
 21.1832 +            BUG();
 21.1833 +        if (xchg((unsigned long *)(objp+cachep->objsize -
 21.1834 +                                   BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)
 21.1835 +            /* Either write past end, or a double free. */
 21.1836 +            BUG();
 21.1837 +    }
 21.1838 +    if (cachep->flags & SLAB_POISON)
 21.1839 +        kmem_poison_obj(cachep, objp);
 21.1840 +    if (kmem_extra_free_checks(cachep, slabp, objp))
 21.1841 +        return;
 21.1842  #endif
 21.1843 -	{
 21.1844 -		unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
 21.1845 +    {
 21.1846 +        unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
 21.1847  
 21.1848 -		slab_bufctl(slabp)[objnr] = slabp->free;
 21.1849 -		slabp->free = objnr;
 21.1850 -	}
 21.1851 -	STATS_DEC_ACTIVE(cachep);
 21.1852 +        slab_bufctl(slabp)[objnr] = slabp->free;
 21.1853 +        slabp->free = objnr;
 21.1854 +    }
 21.1855 +    STATS_DEC_ACTIVE(cachep);
 21.1856  	
 21.1857 -	/* fixup slab chains */
 21.1858 -	{
 21.1859 -		int inuse = slabp->inuse;
 21.1860 -		if (unlikely(!--slabp->inuse)) {
 21.1861 -			/* Was partial or full, now empty. */
 21.1862 -			list_del(&slabp->list);
 21.1863 -			list_add(&slabp->list, &cachep->slabs_free);
 21.1864 -		} else if (unlikely(inuse == cachep->num)) {
 21.1865 -			/* Was full. */
 21.1866 -			list_del(&slabp->list);
 21.1867 -			list_add(&slabp->list, &cachep->slabs_partial);
 21.1868 -		}
 21.1869 -	}
 21.1870 +    /* fixup slab chains */
 21.1871 +    {
 21.1872 +        int inuse = slabp->inuse;
 21.1873 +        if (unlikely(!--slabp->inuse)) {
 21.1874 +            /* Was partial or full, now empty. */
 21.1875 +            list_del(&slabp->list);
 21.1876 +            list_add(&slabp->list, &cachep->slabs_free);
 21.1877 +        } else if (unlikely(inuse == cachep->num)) {
 21.1878 +            /* Was full. */
 21.1879 +            list_del(&slabp->list);
 21.1880 +            list_add(&slabp->list, &cachep->slabs_partial);
 21.1881 +        }
 21.1882 +    }
 21.1883  }
 21.1884  
 21.1885  #ifdef CONFIG_SMP
 21.1886  static inline void __free_block (kmem_cache_t* cachep,
 21.1887 -							void** objpp, int len)
 21.1888 +                                 void** objpp, int len)
 21.1889  {
 21.1890 -	for ( ; len > 0; len--, objpp++)
 21.1891 -		kmem_cache_free_one(cachep, *objpp);
 21.1892 +    for ( ; len > 0; len--, objpp++)
 21.1893 +        kmem_cache_free_one(cachep, *objpp);
 21.1894  }
 21.1895  
 21.1896  static void free_block (kmem_cache_t* cachep, void** objpp, int len)
 21.1897  {
 21.1898 -	spin_lock(&cachep->spinlock);
 21.1899 -	__free_block(cachep, objpp, len);
 21.1900 -	spin_unlock(&cachep->spinlock);
 21.1901 +    spin_lock(&cachep->spinlock);
 21.1902 +    __free_block(cachep, objpp, len);
 21.1903 +    spin_unlock(&cachep->spinlock);
 21.1904  }
 21.1905  #endif
 21.1906  
 21.1907 @@ -1477,76 +1411,57 @@ static void free_block (kmem_cache_t* ca
 21.1908  static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
 21.1909  {
 21.1910  #ifdef CONFIG_SMP
 21.1911 -	cpucache_t *cc = cc_data(cachep);
 21.1912 +    cpucache_t *cc = cc_data(cachep);
 21.1913  
 21.1914 -	CHECK_PAGE(virt_to_page(objp));
 21.1915 -	if (cc) {
 21.1916 -		int batchcount;
 21.1917 -		if (cc->avail < cc->limit) {
 21.1918 -			STATS_INC_FREEHIT(cachep);
 21.1919 -			cc_entry(cc)[cc->avail++] = objp;
 21.1920 -			return;
 21.1921 -		}
 21.1922 -		STATS_INC_FREEMISS(cachep);
 21.1923 -		batchcount = cachep->batchcount;
 21.1924 -		cc->avail -= batchcount;
 21.1925 -		free_block(cachep,
 21.1926 -					&cc_entry(cc)[cc->avail],batchcount);
 21.1927 -		cc_entry(cc)[cc->avail++] = objp;
 21.1928 -		return;
 21.1929 -	} else {
 21.1930 -		free_block(cachep, &objp, 1);
 21.1931 -	}
 21.1932 +    CHECK_PAGE(virt_to_page(objp));
 21.1933 +    if (cc) {
 21.1934 +        int batchcount;
 21.1935 +        if (cc->avail < cc->limit) {
 21.1936 +            STATS_INC_FREEHIT(cachep);
 21.1937 +            cc_entry(cc)[cc->avail++] = objp;
 21.1938 +            return;
 21.1939 +        }
 21.1940 +        STATS_INC_FREEMISS(cachep);
 21.1941 +        batchcount = cachep->batchcount;
 21.1942 +        cc->avail -= batchcount;
 21.1943 +        free_block(cachep,
 21.1944 +                   &cc_entry(cc)[cc->avail],batchcount);
 21.1945 +        cc_entry(cc)[cc->avail++] = objp;
 21.1946 +        return;
 21.1947 +    } else {
 21.1948 +        free_block(cachep, &objp, 1);
 21.1949 +    }
 21.1950  #else
 21.1951 -	kmem_cache_free_one(cachep, objp);
 21.1952 +    kmem_cache_free_one(cachep, objp);
 21.1953  #endif
 21.1954  }
 21.1955  
 21.1956  /**
 21.1957   * kmem_cache_alloc - Allocate an object
 21.1958   * @cachep: The cache to allocate from.
 21.1959 - * @flags: See kmalloc().
 21.1960   *
 21.1961   * Allocate an object from this cache.  The flags are only relevant
 21.1962   * if the cache has no available objects.
 21.1963   */
 21.1964 -void * kmem_cache_alloc (kmem_cache_t *cachep, int flags)
 21.1965 +void *kmem_cache_alloc(kmem_cache_t *cachep)
 21.1966  {
 21.1967 -	return __kmem_cache_alloc(cachep, flags);
 21.1968 +    return __kmem_cache_alloc(cachep);
 21.1969  }
 21.1970  
 21.1971  /**
 21.1972   * kmalloc - allocate memory
 21.1973   * @size: how many bytes of memory are required.
 21.1974 - * @flags: the type of memory to allocate.
 21.1975 - *
 21.1976 - * kmalloc is the normal method of allocating memory
 21.1977 - * in the kernel.
 21.1978 - *
 21.1979 - * The @flags argument may be one of:
 21.1980 - *
 21.1981 - * %GFP_USER - Allocate memory on behalf of user.  May sleep.
 21.1982 - *
 21.1983 - * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
 21.1984 - *
 21.1985 - * %GFP_ATOMIC - Allocation will not sleep.  Use inside interrupt handlers.
 21.1986 - *
 21.1987 - * Additionally, the %GFP_DMA flag may be set to indicate the memory
 21.1988 - * must be suitable for DMA.  This can mean different things on different
 21.1989 - * platforms.  For example, on i386, it means that the memory must come
 21.1990 - * from the first 16MB.
 21.1991   */
 21.1992 -void * kmalloc (size_t size, int flags)
 21.1993 +void *kmalloc(size_t size)
 21.1994  {
 21.1995 -	cache_sizes_t *csizep = cache_sizes;
 21.1996 +    cache_sizes_t *csizep = cache_sizes;
 21.1997  
 21.1998 -	for (; csizep->cs_size; csizep++) {
 21.1999 -		if (size > csizep->cs_size)
 21.2000 -			continue;
 21.2001 -		return __kmem_cache_alloc(flags & GFP_DMA ?
 21.2002 -			 csizep->cs_dmacachep : csizep->cs_cachep, flags);
 21.2003 -	}
 21.2004 -	return NULL;
 21.2005 +    for (; csizep->cs_size; csizep++) {
 21.2006 +        if (size > csizep->cs_size)
 21.2007 +            continue;
 21.2008 +        return __kmem_cache_alloc(csizep->cs_cachep);
 21.2009 +    }
 21.2010 +    return NULL;
 21.2011  }
 21.2012  
 21.2013  /**
 21.2014 @@ -1559,16 +1474,16 @@ void * kmalloc (size_t size, int flags)
 21.2015   */
 21.2016  void kmem_cache_free (kmem_cache_t *cachep, void *objp)
 21.2017  {
 21.2018 -	unsigned long flags;
 21.2019 +    unsigned long flags;
 21.2020  #if DEBUG
 21.2021 -	CHECK_PAGE(virt_to_page(objp));
 21.2022 -	if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
 21.2023 -		BUG();
 21.2024 +    CHECK_PAGE(virt_to_page(objp));
 21.2025 +    if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
 21.2026 +        BUG();
 21.2027  #endif
 21.2028  
 21.2029 -	local_irq_save(flags);
 21.2030 -	__kmem_cache_free(cachep, objp);
 21.2031 -	local_irq_restore(flags);
 21.2032 +    local_irq_save(flags);
 21.2033 +    __kmem_cache_free(cachep, objp);
 21.2034 +    local_irq_restore(flags);
 21.2035  }
 21.2036  
 21.2037  /**
 21.2038 @@ -1580,32 +1495,32 @@ void kmem_cache_free (kmem_cache_t *cach
 21.2039   */
 21.2040  void kfree (const void *objp)
 21.2041  {
 21.2042 -	kmem_cache_t *c;
 21.2043 -	unsigned long flags;
 21.2044 +    kmem_cache_t *c;
 21.2045 +    unsigned long flags;
 21.2046  
 21.2047 -	if (!objp)
 21.2048 -		return;
 21.2049 -	local_irq_save(flags);
 21.2050 -	CHECK_PAGE(virt_to_page(objp));
 21.2051 -	c = GET_PAGE_CACHE(virt_to_page(objp));
 21.2052 -	__kmem_cache_free(c, (void*)objp);
 21.2053 -	local_irq_restore(flags);
 21.2054 +    if (!objp)
 21.2055 +        return;
 21.2056 +    local_irq_save(flags);
 21.2057 +    CHECK_PAGE(virt_to_page(objp));
 21.2058 +    c = GET_PAGE_CACHE(virt_to_page(objp));
 21.2059 +    __kmem_cache_free(c, (void*)objp);
 21.2060 +    local_irq_restore(flags);
 21.2061  }
 21.2062  
 21.2063 -kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags)
 21.2064 +kmem_cache_t *kmem_find_general_cachep(size_t size)
 21.2065  {
 21.2066 -	cache_sizes_t *csizep = cache_sizes;
 21.2067 +    cache_sizes_t *csizep = cache_sizes;
 21.2068  
 21.2069 -	/* This function could be moved to the header file, and
 21.2070 -	 * made inline so consumers can quickly determine what
 21.2071 -	 * cache pointer they require.
 21.2072 -	 */
 21.2073 -	for ( ; csizep->cs_size; csizep++) {
 21.2074 -		if (size > csizep->cs_size)
 21.2075 -			continue;
 21.2076 -		break;
 21.2077 -	}
 21.2078 -	return (gfpflags & GFP_DMA) ? csizep->cs_dmacachep : csizep->cs_cachep;
 21.2079 +    /* This function could be moved to the header file, and
 21.2080 +     * made inline so consumers can quickly determine what
 21.2081 +     * cache pointer they require.
 21.2082 +     */
 21.2083 +    for ( ; csizep->cs_size; csizep++) {
 21.2084 +        if (size > csizep->cs_size)
 21.2085 +            continue;
 21.2086 +        break;
 21.2087 +    }
 21.2088 +    return csizep->cs_cachep;
 21.2089  }
 21.2090  
 21.2091  #ifdef CONFIG_SMP
 21.2092 @@ -1613,328 +1528,321 @@ kmem_cache_t * kmem_find_general_cachep 
 21.2093  /* called with cache_chain_sem acquired.  */
 21.2094  static int kmem_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount)
 21.2095  {
 21.2096 -	ccupdate_struct_t new;
 21.2097 -	int i;
 21.2098 +    ccupdate_struct_t new;
 21.2099 +    int i;
 21.2100  
 21.2101 -	/*
 21.2102 -	 * These are admin-provided, so we are more graceful.
 21.2103 -	 */
 21.2104 -	if (limit < 0)
 21.2105 -		return -EINVAL;
 21.2106 -	if (batchcount < 0)
 21.2107 -		return -EINVAL;
 21.2108 -	if (batchcount > limit)
 21.2109 -		return -EINVAL;
 21.2110 -	if (limit != 0 && !batchcount)
 21.2111 -		return -EINVAL;
 21.2112 +    /*
 21.2113 +     * These are admin-provided, so we are more graceful.
 21.2114 +     */
 21.2115 +    if (limit < 0)
 21.2116 +        return -EINVAL;
 21.2117 +    if (batchcount < 0)
 21.2118 +        return -EINVAL;
 21.2119 +    if (batchcount > limit)
 21.2120 +        return -EINVAL;
 21.2121 +    if (limit != 0 && !batchcount)
 21.2122 +        return -EINVAL;
 21.2123  
 21.2124 -	memset(&new.new,0,sizeof(new.new));
 21.2125 -	if (limit) {
 21.2126 -		for (i = 0; i< smp_num_cpus; i++) {
 21.2127 -			cpucache_t* ccnew;
 21.2128 +    memset(&new.new,0,sizeof(new.new));
 21.2129 +    if (limit) {
 21.2130 +        for (i = 0; i< smp_num_cpus; i++) {
 21.2131 +            cpucache_t* ccnew;
 21.2132  
 21.2133 -			ccnew = kmalloc(sizeof(void*)*limit+
 21.2134 -					sizeof(cpucache_t), GFP_KERNEL);
 21.2135 -			if (!ccnew)
 21.2136 -				goto oom;
 21.2137 -			ccnew->limit = limit;
 21.2138 -			ccnew->avail = 0;
 21.2139 -			new.new[cpu_logical_map(i)] = ccnew;
 21.2140 -		}
 21.2141 -	}
 21.2142 -	new.cachep = cachep;
 21.2143 -	spin_lock_irq(&cachep->spinlock);
 21.2144 -	cachep->batchcount = batchcount;
 21.2145 -	spin_unlock_irq(&cachep->spinlock);
 21.2146 +            ccnew = kmalloc(sizeof(void*)*limit+sizeof(cpucache_t));
 21.2147 +            if (!ccnew)
 21.2148 +                goto oom;
 21.2149 +            ccnew->limit = limit;
 21.2150 +            ccnew->avail = 0;
 21.2151 +            new.new[cpu_logical_map(i)] = ccnew;
 21.2152 +        }
 21.2153 +    }
 21.2154 +    new.cachep = cachep;
 21.2155 +    spin_lock_irq(&cachep->spinlock);
 21.2156 +    cachep->batchcount = batchcount;
 21.2157 +    spin_unlock_irq(&cachep->spinlock);
 21.2158 +
 21.2159 +    smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
 21.2160  
 21.2161 -	smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
 21.2162 -
 21.2163 -	for (i = 0; i < smp_num_cpus; i++) {
 21.2164 -		cpucache_t* ccold = new.new[cpu_logical_map(i)];
 21.2165 -		if (!ccold)
 21.2166 -			continue;
 21.2167 -		local_irq_disable();
 21.2168 -		free_block(cachep, cc_entry(ccold), ccold->avail);
 21.2169 -		local_irq_enable();
 21.2170 -		kfree(ccold);
 21.2171 -	}
 21.2172 -	return 0;
 21.2173 -oom:
 21.2174 -	for (i--; i >= 0; i--)
 21.2175 -		kfree(new.new[cpu_logical_map(i)]);
 21.2176 -	return -ENOMEM;
 21.2177 +    for (i = 0; i < smp_num_cpus; i++) {
 21.2178 +        cpucache_t* ccold = new.new[cpu_logical_map(i)];
 21.2179 +        if (!ccold)
 21.2180 +            continue;
 21.2181 +        local_irq_disable();
 21.2182 +        free_block(cachep, cc_entry(ccold), ccold->avail);
 21.2183 +        local_irq_enable();
 21.2184 +        kfree(ccold);
 21.2185 +    }
 21.2186 +    return 0;
 21.2187 + oom:
 21.2188 +    for (i--; i >= 0; i--)
 21.2189 +        kfree(new.new[cpu_logical_map(i)]);
 21.2190 +    return -ENOMEM;
 21.2191  }
 21.2192  
 21.2193  static void enable_cpucache (kmem_cache_t *cachep)
 21.2194  {
 21.2195 -	int err;
 21.2196 -	int limit;
 21.2197 +    int err;
 21.2198 +    int limit;
 21.2199  
 21.2200 -	/* FIXME: optimize */
 21.2201 -	if (cachep->objsize > PAGE_SIZE)
 21.2202 -		return;
 21.2203 -	if (cachep->objsize > 1024)
 21.2204 -		limit = 60;
 21.2205 -	else if (cachep->objsize > 256)
 21.2206 -		limit = 124;
 21.2207 -	else
 21.2208 -		limit = 252;
 21.2209 +    /* FIXME: optimize */
 21.2210 +    if (cachep->objsize > PAGE_SIZE)
 21.2211 +        return;
 21.2212 +    if (cachep->objsize > 1024)
 21.2213 +        limit = 60;
 21.2214 +    else if (cachep->objsize > 256)
 21.2215 +        limit = 124;
 21.2216 +    else
 21.2217 +        limit = 252;
 21.2218  
 21.2219 -	err = kmem_tune_cpucache(cachep, limit, limit/2);
 21.2220 -	if (err)
 21.2221 -		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
 21.2222 -					cachep->name, -err);
 21.2223 +    err = kmem_tune_cpucache(cachep, limit, limit/2);
 21.2224 +    if (err)
 21.2225 +        printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
 21.2226 +               cachep->name, -err);
 21.2227  }
 21.2228  
 21.2229  static void enable_all_cpucaches (void)
 21.2230  {
 21.2231 -	struct list_head* p;
 21.2232 -        unsigned long spin_flags;
 21.2233 +    struct list_head* p;
 21.2234 +    unsigned long spin_flags;
 21.2235  
 21.2236 -	down(&cache_chain_sem);
 21.2237 +    down(&cache_chain_sem);
 21.2238  
 21.2239 -	p = &cache_cache.next;
 21.2240 -	do {
 21.2241 -		kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);
 21.2242 +    p = &cache_cache.next;
 21.2243 +    do {
 21.2244 +        kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);
 21.2245  
 21.2246 -		enable_cpucache(cachep);
 21.2247 -		p = cachep->next.next;
 21.2248 -	} while (p != &cache_cache.next);
 21.2249 +        enable_cpucache(cachep);
 21.2250 +        p = cachep->next.next;
 21.2251 +    } while (p != &cache_cache.next);
 21.2252  
 21.2253 -	up(&cache_chain_sem);
 21.2254 +    up(&cache_chain_sem);
 21.2255  }
 21.2256  #endif
 21.2257  
 21.2258  /**
 21.2259   * kmem_cache_reap - Reclaim memory from caches.
 21.2260 - * @gfp_mask: the type of memory required.
 21.2261 - *
 21.2262 - * Called from do_try_to_free_pages() and __alloc_pages()
 21.2263   */
 21.2264 -int kmem_cache_reap (int gfp_mask)
 21.2265 +int kmem_cache_reap(void)
 21.2266  {
 21.2267 -	slab_t *slabp;
 21.2268 -	kmem_cache_t *searchp;
 21.2269 -	kmem_cache_t *best_cachep;
 21.2270 -	unsigned int best_pages;
 21.2271 -	unsigned int best_len;
 21.2272 -	unsigned int scan;
 21.2273 -	int ret = 0;
 21.2274 -        unsigned long spin_flags;
 21.2275 +    slab_t *slabp;
 21.2276 +    kmem_cache_t *searchp;
 21.2277 +    kmem_cache_t *best_cachep;
 21.2278 +    unsigned int best_pages;
 21.2279 +    unsigned int best_len;
 21.2280 +    unsigned int scan;
 21.2281 +    int ret = 0;
 21.2282 +    unsigned long spin_flags;
 21.2283  
 21.2284 -        down(&cache_chain_sem);
 21.2285 +    down(&cache_chain_sem);
 21.2286  
 21.2287 -	scan = REAP_SCANLEN;
 21.2288 -	best_len = 0;
 21.2289 -	best_pages = 0;
 21.2290 -	best_cachep = NULL;
 21.2291 -	searchp = clock_searchp;
 21.2292 -	do {
 21.2293 -		unsigned int pages;
 21.2294 -		struct list_head* p;
 21.2295 -		unsigned int full_free;
 21.2296 +    scan = REAP_SCANLEN;
 21.2297 +    best_len = 0;
 21.2298 +    best_pages = 0;
 21.2299 +    best_cachep = NULL;
 21.2300 +    searchp = clock_searchp;
 21.2301 +    do {
 21.2302 +        unsigned int pages;
 21.2303 +        struct list_head* p;
 21.2304 +        unsigned int full_free;
 21.2305  
 21.2306 -		/* It's safe to test this without holding the cache-lock. */
 21.2307 -		if (searchp->flags & SLAB_NO_REAP)
 21.2308 -			goto next;
 21.2309 -		spin_lock_irq(&searchp->spinlock);
 21.2310 -		if (searchp->growing)
 21.2311 -			goto next_unlock;
 21.2312 -		if (searchp->dflags & DFLGS_GROWN) {
 21.2313 -			searchp->dflags &= ~DFLGS_GROWN;
 21.2314 -			goto next_unlock;
 21.2315 -		}
 21.2316 +        /* It's safe to test this without holding the cache-lock. */
 21.2317 +        if (searchp->flags & SLAB_NO_REAP)
 21.2318 +            goto next;
 21.2319 +        spin_lock_irq(&searchp->spinlock);
 21.2320 +        if (searchp->growing)
 21.2321 +            goto next_unlock;
 21.2322 +        if (searchp->dflags & DFLGS_GROWN) {
 21.2323 +            searchp->dflags &= ~DFLGS_GROWN;
 21.2324 +            goto next_unlock;
 21.2325 +        }
 21.2326  #ifdef CONFIG_SMP
 21.2327 -		{
 21.2328 -			cpucache_t *cc = cc_data(searchp);
 21.2329 -			if (cc && cc->avail) {
 21.2330 -				__free_block(searchp, cc_entry(cc), cc->avail);
 21.2331 -				cc->avail = 0;
 21.2332 -			}
 21.2333 -		}
 21.2334 +        {
 21.2335 +            cpucache_t *cc = cc_data(searchp);
 21.2336 +            if (cc && cc->avail) {
 21.2337 +                __free_block(searchp, cc_entry(cc), cc->avail);
 21.2338 +                cc->avail = 0;
 21.2339 +            }
 21.2340 +        }
 21.2341  #endif
 21.2342  
 21.2343 -		full_free = 0;
 21.2344 -		p = searchp->slabs_free.next;
 21.2345 -		while (p != &searchp->slabs_free) {
 21.2346 -			slabp = list_entry(p, slab_t, list);
 21.2347 +        full_free = 0;
 21.2348 +        p = searchp->slabs_free.next;
 21.2349 +        while (p != &searchp->slabs_free) {
 21.2350 +            slabp = list_entry(p, slab_t, list);
 21.2351  #if DEBUG
 21.2352 -			if (slabp->inuse)
 21.2353 -				BUG();
 21.2354 +            if (slabp->inuse)
 21.2355 +                BUG();
 21.2356  #endif
 21.2357 -			full_free++;
 21.2358 -			p = p->next;
 21.2359 -		}
 21.2360 +            full_free++;
 21.2361 +            p = p->next;
 21.2362 +        }
 21.2363  
 21.2364 -		/*
 21.2365 -		 * Try to avoid slabs with constructors and/or
 21.2366 -		 * more than one page per slab (as it can be difficult
 21.2367 -		 * to get high orders from gfp()).
 21.2368 -		 */
 21.2369 -		pages = full_free * (1<<searchp->gfporder);
 21.2370 -		if (searchp->ctor)
 21.2371 -			pages = (pages*4+1)/5;
 21.2372 -		if (searchp->gfporder)
 21.2373 -			pages = (pages*4+1)/5;
 21.2374 -		if (pages > best_pages) {
 21.2375 -			best_cachep = searchp;
 21.2376 -			best_len = full_free;
 21.2377 -			best_pages = pages;
 21.2378 -			if (pages >= REAP_PERFECT) {
 21.2379 -				clock_searchp = list_entry(searchp->next.next,
 21.2380 -							kmem_cache_t,next);
 21.2381 -				goto perfect;
 21.2382 -			}
 21.2383 -		}
 21.2384 -next_unlock:
 21.2385 -		spin_unlock_irq(&searchp->spinlock);
 21.2386 -next:
 21.2387 -		searchp = list_entry(searchp->next.next,kmem_cache_t,next);
 21.2388 -	} while (--scan && searchp != clock_searchp);
 21.2389 +        /*
 21.2390 +         * Try to avoid slabs with constructors and/or
 21.2391 +         * more than one page per slab (as it can be difficult
 21.2392 +         * to get high orders from gfp()).
 21.2393 +         */
 21.2394 +        pages = full_free * (1<<searchp->gfporder);
 21.2395 +        if (searchp->ctor)
 21.2396 +            pages = (pages*4+1)/5;
 21.2397 +        if (searchp->gfporder)
 21.2398 +            pages = (pages*4+1)/5;
 21.2399 +        if (pages > best_pages) {
 21.2400 +            best_cachep = searchp;
 21.2401 +            best_len = full_free;
 21.2402 +            best_pages = pages;
 21.2403 +            if (pages >= REAP_PERFECT) {
 21.2404 +                clock_searchp = list_entry(searchp->next.next,
 21.2405 +                                           kmem_cache_t,next);
 21.2406 +                goto perfect;
 21.2407 +            }
 21.2408 +        }
 21.2409 +    next_unlock:
 21.2410 +        spin_unlock_irq(&searchp->spinlock);
 21.2411 +    next:
 21.2412 +        searchp = list_entry(searchp->next.next,kmem_cache_t,next);
 21.2413 +    } while (--scan && searchp != clock_searchp);
 21.2414  
 21.2415 -	clock_searchp = searchp;
 21.2416 +    clock_searchp = searchp;
 21.2417  
 21.2418 -	if (!best_cachep)
 21.2419 -		/* couldn't find anything to reap */
 21.2420 -		goto out;
 21.2421 +    if (!best_cachep)
 21.2422 +        /* couldn't find anything to reap */
 21.2423 +        goto out;
 21.2424  
 21.2425 -	spin_lock_irq(&best_cachep->spinlock);
 21.2426 -perfect:
 21.2427 -	/* free only 50% of the free slabs */
 21.2428 -	best_len = (best_len + 1)/2;
 21.2429 -	for (scan = 0; scan < best_len; scan++) {
 21.2430 -		struct list_head *p;
 21.2431 +    spin_lock_irq(&best_cachep->spinlock);
 21.2432 + perfect:
 21.2433 +    /* free only 50% of the free slabs */
 21.2434 +    best_len = (best_len + 1)/2;
 21.2435 +    for (scan = 0; scan < best_len; scan++) {
 21.2436 +        struct list_head *p;
 21.2437  
 21.2438 -		if (best_cachep->growing)
 21.2439 -			break;
 21.2440 -		p = best_cachep->slabs_free.prev;
 21.2441 -		if (p == &best_cachep->slabs_free)
 21.2442 -			break;
 21.2443 -		slabp = list_entry(p,slab_t,list);
 21.2444 +        if (best_cachep->growing)
 21.2445 +            break;
 21.2446 +        p = best_cachep->slabs_free.prev;
 21.2447 +        if (p == &best_cachep->slabs_free)
 21.2448 +            break;
 21.2449 +        slabp = list_entry(p,slab_t,list);
 21.2450  #if DEBUG
 21.2451 -		if (slabp->inuse)
 21.2452 -			BUG();
 21.2453 +        if (slabp->inuse)
 21.2454 +            BUG();
 21.2455  #endif
 21.2456 -		list_del(&slabp->list);
 21.2457 -		STATS_INC_REAPED(best_cachep);
 21.2458 +        list_del(&slabp->list);
 21.2459 +        STATS_INC_REAPED(best_cachep);
 21.2460  
 21.2461 -		/* Safe to drop the lock. The slab is no longer linked to the
 21.2462 -		 * cache.
 21.2463 -		 */
 21.2464 -		spin_unlock_irq(&best_cachep->spinlock);
 21.2465 -		kmem_slab_destroy(best_cachep, slabp);
 21.2466 -		spin_lock_irq(&best_cachep->spinlock);
 21.2467 -	}
 21.2468 -	spin_unlock_irq(&best_cachep->spinlock);
 21.2469 -	ret = scan * (1 << best_cachep->gfporder);
 21.2470 -out:
 21.2471 -	up(&cache_chain_sem);
 21.2472 -	return ret;
 21.2473 +        /* Safe to drop the lock. The slab is no longer linked to the
 21.2474 +         * cache.
 21.2475 +         */
 21.2476 +        spin_unlock_irq(&best_cachep->spinlock);
 21.2477 +        kmem_slab_destroy(best_cachep, slabp);
 21.2478 +        spin_lock_irq(&best_cachep->spinlock);
 21.2479 +    }
 21.2480 +    spin_unlock_irq(&best_cachep->spinlock);
 21.2481 +    ret = scan * (1 << best_cachep->gfporder);
 21.2482 + out:
 21.2483 +    up(&cache_chain_sem);
 21.2484 +    return ret;
 21.2485  }
 21.2486  
 21.2487  void dump_slabinfo()
 21.2488  {
 21.2489 -	struct list_head *p;
 21.2490 -        unsigned long spin_flags;
 21.2491 +    struct list_head *p;
 21.2492 +    unsigned long spin_flags;
 21.2493  
 21.2494 -	/* Output format version, so at least we can change it without _too_
 21.2495 -	 * many complaints.
 21.2496 -	 */
 21.2497 -	printk( "slabinfo - version: 1.1"
 21.2498 +    /* Output format version, so at least we can change it without _too_
 21.2499 +     * many complaints.
 21.2500 +     */
 21.2501 +    printk( "slabinfo - version: 1.1"
 21.2502  #if STATS
 21.2503 -				" (statistics)"
 21.2504 +            " (statistics)"
 21.2505  #endif
 21.2506  #ifdef CONFIG_SMP
 21.2507 -				" (SMP)"
 21.2508 +            " (SMP)"
 21.2509  #endif
 21.2510 -				"\n");
 21.2511 -	down(&cache_chain_sem);
 21.2512 -	p = &cache_cache.next;
 21.2513 -	do {
 21.2514 -		kmem_cache_t	*cachep;
 21.2515 -		struct list_head *q;
 21.2516 -		slab_t		*slabp;
 21.2517 -		unsigned long	active_objs;
 21.2518 -		unsigned long	num_objs;
 21.2519 -		unsigned long	active_slabs = 0;
 21.2520 -		unsigned long	num_slabs;
 21.2521 -		cachep = list_entry(p, kmem_cache_t, next);
 21.2522 +            "\n");
 21.2523 +    down(&cache_chain_sem);
 21.2524 +    p = &cache_cache.next;
 21.2525 +    do {
 21.2526 +        kmem_cache_t	*cachep;
 21.2527 +        struct list_head *q;
 21.2528 +        slab_t		*slabp;
 21.2529 +        unsigned long	active_objs;
 21.2530 +        unsigned long	num_objs;
 21.2531 +        unsigned long	active_slabs = 0;
 21.2532 +        unsigned long	num_slabs;
 21.2533 +        cachep = list_entry(p, kmem_cache_t, next);
 21.2534  
 21.2535 -		spin_lock_irq(&cachep->spinlock);
 21.2536 -		active_objs = 0;
 21.2537 -		num_slabs = 0;
 21.2538 -		list_for_each(q,&cachep->slabs_full) {
 21.2539 -			slabp = list_entry(q, slab_t, list);
 21.2540 -			if (slabp->inuse != cachep->num)
 21.2541 -				BUG();
 21.2542 -			active_objs += cachep->num;
 21.2543 -			active_slabs++;
 21.2544 -		}
 21.2545 -		list_for_each(q,&cachep->slabs_partial) {
 21.2546 -			slabp = list_entry(q, slab_t, list);
 21.2547 -			if (slabp->inuse == cachep->num || !slabp->inuse)
 21.2548 -				BUG();
 21.2549 -			active_objs += slabp->inuse;
 21.2550 -			active_slabs++;
 21.2551 -		}
 21.2552 -		list_for_each(q,&cachep->slabs_free) {
 21.2553 -			slabp = list_entry(q, slab_t, list);
 21.2554 -			if (slabp->inuse)
 21.2555 -				BUG();
 21.2556 -			num_slabs++;
 21.2557 -		}
 21.2558 -		num_slabs+=active_slabs;
 21.2559 -		num_objs = num_slabs*cachep->num;
 21.2560 +        spin_lock_irq(&cachep->spinlock);
 21.2561 +        active_objs = 0;
 21.2562 +        num_slabs = 0;
 21.2563 +        list_for_each(q,&cachep->slabs_full) {
 21.2564 +            slabp = list_entry(q, slab_t, list);
 21.2565 +            if (slabp->inuse != cachep->num)
 21.2566 +                BUG();
 21.2567 +            active_objs += cachep->num;
 21.2568 +            active_slabs++;
 21.2569 +        }
 21.2570 +        list_for_each(q,&cachep->slabs_partial) {
 21.2571 +            slabp = list_entry(q, slab_t, list);
 21.2572 +            if (slabp->inuse == cachep->num || !slabp->inuse)
 21.2573 +                BUG();
 21.2574 +            active_objs += slabp->inuse;
 21.2575 +            active_slabs++;
 21.2576 +        }
 21.2577 +        list_for_each(q,&cachep->slabs_free) {
 21.2578 +            slabp = list_entry(q, slab_t, list);
 21.2579 +            if (slabp->inuse)
 21.2580 +                BUG();
 21.2581 +            num_slabs++;
 21.2582 +        }
 21.2583 +        num_slabs+=active_slabs;
 21.2584 +        num_objs = num_slabs*cachep->num;
 21.2585  
 21.2586 -		printk("%-17s %6lu %6lu %6u %4lu %4lu %4u",
 21.2587 -			cachep->name, active_objs, num_objs, cachep->objsize,
 21.2588 -			active_slabs, num_slabs, (1<<cachep->gfporder));
 21.2589 +        printk("%-17s %6lu %6lu %6u %4lu %4lu %4u",
 21.2590 +               cachep->name, active_objs, num_objs, cachep->objsize,
 21.2591 +               active_slabs, num_slabs, (1<<cachep->gfporder));
 21.2592  
 21.2593  #if STATS
 21.2594 -		{
 21.2595 -			unsigned long errors = cachep->errors;
 21.2596 -			unsigned long high = cachep->high_mark;
 21.2597 -			unsigned long grown = cachep->grown;
 21.2598 -			unsigned long reaped = cachep->reaped;
 21.2599 -			unsigned long allocs = cachep->num_allocations;
 21.2600 +        {
 21.2601 +            unsigned long errors = cachep->errors;
 21.2602 +            unsigned long high = cachep->high_mark;
 21.2603 +            unsigned long grown = cachep->grown;
 21.2604 +            unsigned long reaped = cachep->reaped;
 21.2605 +            unsigned long allocs = cachep->num_allocations;
 21.2606  
 21.2607 -			printk(" : %6lu %7lu %5lu %4lu %4lu",
 21.2608 -					high, allocs, grown, reaped, errors);
 21.2609 -		}
 21.2610 +            printk(" : %6lu %7lu %5lu %4lu %4lu",
 21.2611 +                   high, allocs, grown, reaped, errors);
 21.2612 +        }
 21.2613  #endif
 21.2614  #ifdef CONFIG_SMP
 21.2615 -		{
 21.2616 -			unsigned int batchcount = cachep->batchcount;
 21.2617 -			unsigned int limit;
 21.2618 +        {
 21.2619 +            unsigned int batchcount = cachep->batchcount;
 21.2620 +            unsigned int limit;
 21.2621  
 21.2622 -			if (cc_data(cachep))
 21.2623 -				limit = cc_data(cachep)->limit;
 21.2624 -			 else
 21.2625 -				limit = 0;
 21.2626 -			printk(" : %4u %4u",
 21.2627 -					limit, batchcount);
 21.2628 -		}
 21.2629 +            if (cc_data(cachep))
 21.2630 +                limit = cc_data(cachep)->limit;
 21.2631 +            else
 21.2632 +                limit = 0;
 21.2633 +            printk(" : %4u %4u",
 21.2634 +                   limit, batchcount);
 21.2635 +        }
 21.2636  #endif
 21.2637  #if STATS && defined(CONFIG_SMP)
 21.2638 -		{
 21.2639 -			unsigned long allochit = atomic_read(&cachep->allochit);
 21.2640 -			unsigned long allocmiss = atomic_read(&cachep->allocmiss);
 21.2641 -			unsigned long freehit = atomic_read(&cachep->freehit);
 21.2642 -			unsigned long freemiss = atomic_read(&cachep->freemiss);
 21.2643 -			printk(" : %6lu %6lu %6lu %6lu",
 21.2644 -					allochit, allocmiss, freehit, freemiss);
 21.2645 -		}
 21.2646 +        {
 21.2647 +            unsigned long allochit = atomic_read(&cachep->allochit);
 21.2648 +            unsigned long allocmiss = atomic_read(&cachep->allocmiss);
 21.2649 +            unsigned long freehit = atomic_read(&cachep->freehit);
 21.2650 +            unsigned long freemiss = atomic_read(&cachep->freemiss);
 21.2651 +            printk(" : %6lu %6lu %6lu %6lu",
 21.2652 +                   allochit, allocmiss, freehit, freemiss);
 21.2653 +        }
 21.2654  #endif
 21.2655 -		printk("\n");
 21.2656 -		spin_unlock_irq(&cachep->spinlock);
 21.2657 -
 21.2658 -		p = cachep->next.next;
 21.2659 -	} while (p != &cache_cache.next);
 21.2660 +        printk("\n");
 21.2661 +        spin_unlock_irq(&cachep->spinlock);
 21.2662  
 21.2663 -	up(&cache_chain_sem);
 21.2664 +        p = cachep->next.next;
 21.2665 +    } while (p != &cache_cache.next);
 21.2666  
 21.2667 -	return;
 21.2668 +    up(&cache_chain_sem);
 21.2669 +
 21.2670 +    return;
 21.2671  }
 21.2672 -
 21.2673 -
 21.2674 -
    22.1 --- a/xen/common/trace.c	Wed Jun 23 16:02:02 2004 +0000
    22.2 +++ b/xen/common/trace.c	Wed Jun 23 16:03:02 2004 +0000
    22.3 @@ -59,7 +59,7 @@ void init_trace_bufs(void)
    22.4      nr_pages = smp_num_cpus * opt_tbuf_size;
    22.5      order    = get_order(nr_pages * PAGE_SIZE);
    22.6      
    22.7 -    if ( (rawbuf = (char *)__get_free_pages(GFP_KERNEL, order)) == NULL )
    22.8 +    if ( (rawbuf = (char *)__get_free_pages(order)) == NULL )
    22.9      {
   22.10          printk("Xen trace buffers: memory allocation failed\n");
   22.11          return;
    23.1 --- a/xen/drivers/char/console.c	Wed Jun 23 16:02:02 2004 +0000
    23.2 +++ b/xen/drivers/char/console.c	Wed Jun 23 16:03:02 2004 +0000
    23.3 @@ -300,7 +300,7 @@ long do_console_io(int cmd, int count, c
    23.4      case CONSOLEIO_write:
    23.5          if ( count > (PAGE_SIZE-1) )
    23.6              count = PAGE_SIZE-1;
    23.7 -        if ( (kbuf = (char *)get_free_page(GFP_KERNEL)) == NULL )
    23.8 +        if ( (kbuf = (char *)get_free_page()) == NULL )
    23.9              return -ENOMEM;
   23.10          kbuf[count] = '\0';
   23.11          rc = count;
    24.1 --- a/xen/drivers/pci/pci.c	Wed Jun 23 16:02:02 2004 +0000
    24.2 +++ b/xen/drivers/pci/pci.c	Wed Jun 23 16:03:02 2004 +0000
    24.3 @@ -1126,7 +1126,7 @@ static struct pci_bus * __devinit pci_al
    24.4  {
    24.5  	struct pci_bus *b;
    24.6  
    24.7 -	b = kmalloc(sizeof(*b), GFP_KERNEL);
    24.8 +	b = kmalloc(sizeof(*b));
    24.9  	if (b) {
   24.10  		memset(b, 0, sizeof(*b));
   24.11  		INIT_LIST_HEAD(&b->children);
   24.12 @@ -1351,7 +1351,7 @@ struct pci_dev * __devinit pci_scan_devi
   24.13  	if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
   24.14  		return NULL;
   24.15  
   24.16 -	dev = kmalloc(sizeof(*dev), GFP_KERNEL);
   24.17 +	dev = kmalloc(sizeof(*dev));
   24.18  	if (!dev)
   24.19  		return NULL;
   24.20  
   24.21 @@ -1424,14 +1424,14 @@ unsigned int __devinit pci_do_scan_bus(s
   24.22  {
   24.23  	unsigned int devfn, max, pass;
   24.24  	struct list_head *ln;
   24.25 -	/* XEN MODIFICATION: Allocate 'dev0' on heap to avoid stack overflow. */
   24.26 +	/* XEN MODIFICATION: Allocate dev0 on heap to avoid stack overflow. */
   24.27  	struct pci_dev *dev, *dev0;
   24.28  
   24.29  	DBG("Scanning bus %02x\n", bus->number);
   24.30  	max = bus->secondary;
   24.31  
   24.32  	/* Create a device template */
   24.33 -	dev0 = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
   24.34 +	dev0 = kmalloc(sizeof(struct pci_dev));
   24.35  	if(!dev0) {
   24.36  	  panic("Out of memory scanning PCI bus!\n");
   24.37  	}
    25.1 --- a/xen/drivers/pci/setup-res.c	Wed Jun 23 16:02:02 2004 +0000
    25.2 +++ b/xen/drivers/pci/setup-res.c	Wed Jun 23 16:03:02 2004 +0000
    25.3 @@ -171,7 +171,7 @@ pdev_sort_resources(struct pci_dev *dev,
    25.4  					ln->res->start;
    25.5  			}
    25.6  			if (r_align > align) {
    25.7 -				tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
    25.8 +				tmp = kmalloc(sizeof(*tmp));
    25.9  				if (!tmp)
   25.10  					panic("pdev_sort_resources(): "
   25.11  					      "kmalloc() failed!\n");
    26.1 --- a/xen/include/asm-x86/page.h	Wed Jun 23 16:02:02 2004 +0000
    26.2 +++ b/xen/include/asm-x86/page.h	Wed Jun 23 16:03:02 2004 +0000
    26.3 @@ -33,9 +33,6 @@ typedef struct { unsigned long pt_lo; } 
    26.4  #define l2_pgentry_val(_x) ((_x).l2_lo)
    26.5  #define pagetable_val(_x)  ((_x).pt_lo)
    26.6  
    26.7 -#define alloc_l1_pagetable()  ((l1_pgentry_t *)get_free_page(GFP_KERNEL))
    26.8 -#define alloc_l2_pagetable()  ((l2_pgentry_t *)get_free_page(GFP_KERNEL))
    26.9 -
   26.10  /* Add type to a table entry. */
   26.11  #define mk_l1_pgentry(_x)  ( (l1_pgentry_t) { (_x) } )
   26.12  #define mk_l2_pgentry(_x)  ( (l2_pgentry_t) { (_x) } )
    27.1 --- a/xen/include/asm-x86/x86_64/page.h	Wed Jun 23 16:02:02 2004 +0000
    27.2 +++ b/xen/include/asm-x86/x86_64/page.h	Wed Jun 23 16:03:02 2004 +0000
    27.3 @@ -67,11 +67,6 @@ typedef struct { unsigned long pgprot; }
    27.4  #define l4_pgentry_val(_x) ((_x).l4_lo)
    27.5  #define pagetable_val(_x)  ((_x).pt_lo)
    27.6  
    27.7 -#define alloc_l1_pagetable()  ((l1_pgentry_t *)get_free_page(GFP_KERNEL))
    27.8 -#define alloc_l2_pagetable()  ((l2_pgentry_t *)get_free_page(GFP_KERNEL))
    27.9 -#define alloc_l3_pagetable()  ((l3_pgentry_t *)get_free_page(GFP_KERNEL))
   27.10 -#define alloc_l4_pagetable()  ((l4_pgentry_t *)get_free_page(GFP_KERNEL))
   27.11 -
   27.12  /* Add type to a table entry. */
   27.13  #define mk_l1_pgentry(_x)  ( (l1_pgentry_t) { (_x) } )
   27.14  #define mk_l2_pgentry(_x)  ( (l2_pgentry_t) { (_x) } )
    28.1 --- a/xen/include/xen/mm.h	Wed Jun 23 16:02:02 2004 +0000
    28.2 +++ b/xen/include/xen/mm.h	Wed Jun 23 16:03:02 2004 +0000
    28.3 @@ -17,31 +17,16 @@
    28.4  #include <hypervisor-ifs/hypervisor-if.h>
    28.5  
    28.6  /*
    28.7 - * These are for compatibility with calls to the Linux memory allocators.
    28.8 - */
    28.9 -
   28.10 -#define __GFP_DMA       0x01
   28.11 -#define GFP_DMA         __GFP_DMA
   28.12 -#define __GFP_WAIT      0x10    /* Can wait and reschedule? */
   28.13 -#define __GFP_HIGH      0x20    /* Should access emergency pools? */
   28.14 -#define __GFP_IO        0x40    /* Can start low memory physical IO? */
   28.15 -#define __GFP_HIGHIO    0x80    /* Can start high mem physical IO? */
   28.16 -#define __GFP_FS        0x100   /* Can call down to low-level FS? */
   28.17 -#define GFP_ATOMIC      (__GFP_HIGH)
   28.18 -#define GFP_KERNEL      (__GFP_HIGH | __GFP_WAIT | __GFP_IO | \
   28.19 -                         __GFP_HIGHIO | __GFP_FS)
   28.20 -
   28.21 -/*
   28.22   * The following is for page_alloc.c.
   28.23   */
   28.24  
   28.25  void init_page_allocator(unsigned long min, unsigned long max);
   28.26 -unsigned long __get_free_pages(int mask, int order);
   28.27 +unsigned long __get_free_pages(int order);
   28.28  void __free_pages(unsigned long p, int order);
   28.29 -#define get_free_page(_m) (__get_free_pages((_m),0))
   28.30 -#define __get_free_page(_m) (__get_free_pages((_m),0))
   28.31 +#define get_free_page()   (__get_free_pages(0))
   28.32 +#define __get_free_page() (__get_free_pages(0))
   28.33  #define free_pages(_p,_o) (__free_pages(_p,_o))
   28.34 -#define free_page(_p) (__free_pages(_p,0))
   28.35 +#define free_page(_p)     (__free_pages(_p,0))
   28.36  
   28.37  
   28.38  /*
    29.1 --- a/xen/include/xen/shadow.h	Wed Jun 23 16:02:02 2004 +0000
    29.2 +++ b/xen/include/xen/shadow.h	Wed Jun 23 16:03:02 2004 +0000
    29.3 @@ -505,13 +505,13 @@ static inline void set_shadow_status( st
    29.4          SH_LOG("allocate more shadow hashtable blocks");
    29.5  
    29.6          // we need to allocate more space
    29.7 -        extra = kmalloc( sizeof(void*) + (shadow_ht_extra_size * 
    29.8 -                                          sizeof(struct shadow_status)), GFP_KERNEL );
    29.9 +        extra = kmalloc(sizeof(void*) + (shadow_ht_extra_size * 
   29.10 +                                         sizeof(struct shadow_status)));
   29.11  
   29.12          if( ! extra ) BUG(); // should be more graceful here....
   29.13  
   29.14 -        memset( extra, 0, sizeof(void*) + (shadow_ht_extra_size * 
   29.15 -                                           sizeof(struct shadow_status)) );
   29.16 +        memset(extra, 0, sizeof(void*) + (shadow_ht_extra_size * 
   29.17 +                                          sizeof(struct shadow_status)));
   29.18  
   29.19          m->shadow_extras_count++;
   29.20  
    30.1 --- a/xen/include/xen/slab.h	Wed Jun 23 16:02:02 2004 +0000
    30.2 +++ b/xen/include/xen/slab.h	Wed Jun 23 16:03:02 2004 +0000
    30.3 @@ -4,55 +4,44 @@
    30.4   */
    30.5  
    30.6  #ifndef __SLAB_H__
    30.7 -#define	__SLAB_H__
    30.8 +#define __SLAB_H__
    30.9  
   30.10  typedef struct kmem_cache_s kmem_cache_t;
   30.11  
   30.12  #include <xen/mm.h>
   30.13  #include <xen/cache.h>
   30.14  
   30.15 -/* flags for kmem_cache_alloc() */
   30.16 -#define	SLAB_ATOMIC		GFP_ATOMIC
   30.17 -#define	SLAB_KERNEL		GFP_KERNEL
   30.18 -#define	SLAB_DMA		GFP_DMA
   30.19 -
   30.20 -#define SLAB_LEVEL_MASK		(__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS)
   30.21 -#define	SLAB_NO_GROW		0x00001000UL	/* don't grow a cache */
   30.22 +/* Flags to pass to kmem_cache_create(). */
   30.23 +/* NB. The first 3 are only valid when built with SLAB_DEBUG_SUPPORT. */
   30.24 +#define SLAB_DEBUG_INITIAL      0x00000200UL    /* Call constructor */
   30.25 +#define SLAB_RED_ZONE           0x00000400UL    /* Red zone objs in a cache */
   30.26 +#define SLAB_POISON             0x00000800UL    /* Poison objects */
   30.27 +#define SLAB_NO_REAP            0x00001000UL    /* never reap from the cache */
   30.28 +#define SLAB_HWCACHE_ALIGN      0x00002000UL    /* align obj on a cache line */
   30.29  
   30.30 -/* flags to pass to kmem_cache_create().
   30.31 - * The first 3 are only valid when the allocator as been build
   30.32 - * SLAB_DEBUG_SUPPORT.
   30.33 - */
   30.34 -#define	SLAB_DEBUG_INITIAL	0x00000200UL	/* Call constructor (as verifier) */
   30.35 -#define	SLAB_RED_ZONE		0x00000400UL	/* Red zone objs in a cache */
   30.36 -#define	SLAB_POISON		0x00000800UL	/* Poison objects */
   30.37 -#define	SLAB_NO_REAP		0x00001000UL	/* never reap from the cache */
   30.38 -#define	SLAB_HWCACHE_ALIGN	0x00002000UL	/* align objs on a h/w cache lines */
   30.39 -#define SLAB_CACHE_DMA		0x00004000UL	/* use GFP_DMA memory */
   30.40 +/* Flags passed to a constructor function. */
   30.41 +#define SLAB_CTOR_CONSTRUCTOR   0x001UL /* if not set, then deconstructor */
   30.42 +#define SLAB_CTOR_ATOMIC        0x002UL /* tell cons. it can't sleep */
   30.43 +#define SLAB_CTOR_VERIFY        0x004UL /* tell cons. it's a verify call */
   30.44  
   30.45 -/* flags passed to a constructor func */
   30.46 -#define	SLAB_CTOR_CONSTRUCTOR	0x001UL		/* if not set, then deconstructor */
   30.47 -#define SLAB_CTOR_ATOMIC	0x002UL		/* tell constructor it can't sleep */
   30.48 -#define	SLAB_CTOR_VERIFY	0x004UL		/* tell constructor it's a verify call */
   30.49 -
   30.50 -/* prototypes */
   30.51  extern void kmem_cache_init(void);
   30.52  extern void kmem_cache_sizes_init(unsigned long);
   30.53  
   30.54 -extern kmem_cache_t *kmem_find_general_cachep(size_t, int gfpflags);
   30.55 -extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
   30.56 -				       void (*)(void *, kmem_cache_t *, unsigned long),
   30.57 -				       void (*)(void *, kmem_cache_t *, unsigned long));
   30.58 +extern kmem_cache_t *kmem_find_general_cachep(size_t);
   30.59 +extern kmem_cache_t *kmem_cache_create(
   30.60 +    const char *, size_t, size_t, unsigned long,
   30.61 +    void (*)(void *, kmem_cache_t *, unsigned long),
   30.62 +    void (*)(void *, kmem_cache_t *, unsigned long));
   30.63  extern int kmem_cache_destroy(kmem_cache_t *);
   30.64  extern int kmem_cache_shrink(kmem_cache_t *);
   30.65 -extern void *kmem_cache_alloc(kmem_cache_t *, int);
   30.66 +extern void *kmem_cache_alloc(kmem_cache_t *);
   30.67  extern void kmem_cache_free(kmem_cache_t *, void *);
   30.68  
   30.69 -extern void *kmalloc(size_t, int);
   30.70 +extern void *kmalloc(size_t);
   30.71  extern void kfree(const void *);
   30.72  
   30.73 -extern int FASTCALL(kmem_cache_reap(int));
   30.74 +extern int kmem_cache_reap(void);
   30.75  
   30.76  extern void dump_slabinfo();
   30.77  
   30.78 -#endif	/* __SLAB_H__ */
   30.79 +#endif /* __SLAB_H__ */