ia64/xen-unstable

changeset 1920:42f37b71f9b1

bitkeeper revision 1.1108.1.29 (41056396oI8TDCaTvBdU_3II1cHF9Q)

Rename memory allocator interfaces in Xen to avoid conflicts with
Linux namespace.
author kaf24@scramble.cl.cam.ac.uk
date Mon Jul 26 20:03:34 2004 +0000 (2004-07-26)
parents f10b0c614fad
children be1be474b56f a83817a4f882 7d853b31485c
files xen/arch/x86/acpi.c xen/arch/x86/apic.c xen/arch/x86/domain.c xen/arch/x86/irq.c xen/arch/x86/mpparse.c xen/arch/x86/pci-pc.c xen/arch/x86/pdb-stub.c xen/arch/x86/shadow.c xen/arch/x86/smpboot.c xen/arch/x86/x86_32/mm.c xen/common/ac_timer.c xen/common/dom0_ops.c xen/common/domain.c xen/common/event_channel.c xen/common/kernel.c xen/common/page_alloc.c xen/common/physdev.c xen/common/resource.c xen/common/sched_atropos.c xen/common/sched_bvt.c xen/common/sched_fair_bvt.c xen/common/sched_rrobin.c xen/common/schedule.c xen/common/slab.c xen/common/trace.c xen/drivers/char/console.c xen/drivers/pci/pci.c xen/drivers/pci/setup-bus.c xen/drivers/pci/setup-res.c xen/include/asm-x86/domain.h xen/include/asm-x86/io.h xen/include/asm-x86/shadow.h xen/include/asm-x86/types.h xen/include/xen/mm.h xen/include/xen/pci.h xen/include/xen/slab.h
line diff
     1.1 --- a/xen/arch/x86/acpi.c	Mon Jul 26 18:22:00 2004 +0000
     1.2 +++ b/xen/arch/x86/acpi.c	Mon Jul 26 20:03:34 2004 +0000
     1.3 @@ -578,7 +578,7 @@ static void acpi_create_identity_pmd (vo
     1.4  	pgd_t *pgd;
     1.5  	int i;
     1.6  
     1.7 -	ptep = (pte_t*)__get_free_page();
     1.8 +	ptep = (pte_t*)alloc_xenheap_page();
     1.9  
    1.10  	/* fill page with low mapping */
    1.11  	for (i = 0; i < PTRS_PER_PTE; i++)
    1.12 @@ -607,7 +607,7 @@ static void acpi_restore_pmd (void)
    1.13  {
    1.14  	set_pmd(pmd, saved_pmd);
    1.15  	local_flush_tlb();
    1.16 -	free_page((unsigned long)ptep);
    1.17 +	free_xenheap_page((unsigned long)ptep);
    1.18  }
    1.19  
    1.20  /**
     2.1 --- a/xen/arch/x86/apic.c	Mon Jul 26 18:22:00 2004 +0000
     2.2 +++ b/xen/arch/x86/apic.c	Mon Jul 26 20:03:34 2004 +0000
     2.3 @@ -445,7 +445,7 @@ void __init init_apic_mappings(void)
     2.4       * simulate the local APIC and another one for the IO-APIC.
     2.5       */
     2.6      if (!smp_found_config && detect_init_APIC()) {
     2.7 -        apic_phys = get_free_page();
     2.8 +        apic_phys = alloc_xenheap_page();
     2.9          apic_phys = __pa(apic_phys);
    2.10      } else
    2.11          apic_phys = mp_lapic_addr;
     3.1 --- a/xen/arch/x86/domain.c	Mon Jul 26 18:22:00 2004 +0000
     3.2 +++ b/xen/arch/x86/domain.c	Mon Jul 26 20:03:34 2004 +0000
     3.3 @@ -212,13 +212,13 @@ void machine_halt(void)
     3.4  
     3.5  void arch_do_createdomain(struct domain *d)
     3.6  {
     3.7 -    d->shared_info = (void *)get_free_page();
     3.8 +    d->shared_info = (void *)alloc_xenheap_page();
     3.9      memset(d->shared_info, 0, PAGE_SIZE);
    3.10      SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
    3.11      machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 
    3.12                             PAGE_SHIFT] = 0x80000000UL;  /* debug */
    3.13  
    3.14 -    d->mm.perdomain_pt = (l1_pgentry_t *)get_free_page();
    3.15 +    d->mm.perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
    3.16      memset(d->mm.perdomain_pt, 0, PAGE_SIZE);
    3.17      machine_to_phys_mapping[virt_to_phys(d->mm.perdomain_pt) >> 
    3.18                             PAGE_SHIFT] = 0x0fffdeadUL;  /* debug */
     4.1 --- a/xen/arch/x86/irq.c	Mon Jul 26 18:22:00 2004 +0000
     4.2 +++ b/xen/arch/x86/irq.c	Mon Jul 26 20:03:34 2004 +0000
     4.3 @@ -258,7 +258,7 @@ int pirq_guest_bind(struct domain *p, in
     4.4              goto out;
     4.5          }
     4.6  
     4.7 -        action = kmalloc(sizeof(irq_guest_action_t));
     4.8 +        action = xmalloc(sizeof(irq_guest_action_t));
     4.9          if ( (desc->action = (struct irqaction *)action) == NULL )
    4.10          {
    4.11              DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
    4.12 @@ -320,7 +320,7 @@ int pirq_guest_unbind(struct domain *p, 
    4.13      if ( action->nr_guests == 1 )
    4.14      {
    4.15          desc->action = NULL;
    4.16 -        kfree(action);
    4.17 +        xfree(action);
    4.18          desc->depth   = 1;
    4.19          desc->status |= IRQ_DISABLED;
    4.20          desc->status &= ~IRQ_GUEST;
     5.1 --- a/xen/arch/x86/mpparse.c	Mon Jul 26 18:22:00 2004 +0000
     5.2 +++ b/xen/arch/x86/mpparse.c	Mon Jul 26 20:03:34 2004 +0000
     5.3 @@ -509,7 +509,7 @@ static int __init smp_read_mpc(struct mp
     5.4  	
     5.5  	count = (max_mp_busses * sizeof(int)) * 4;
     5.6  	count += (max_irq_sources * sizeof(struct mpc_config_intsrc));
     5.7 -	bus_data = (void *)__get_free_pages(get_order(count));
     5.8 +	bus_data = (void *)alloc_xenheap_pages(get_order(count));
     5.9  	if (!bus_data) {
    5.10  		printk(KERN_ERR "SMP mptable: out of memory!\n");
    5.11  		return 0;
    5.12 @@ -694,7 +694,7 @@ static inline void __init construct_defa
    5.13  		struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
    5.14  	} *bus_data;
    5.15  
    5.16 -	bus_data = (void *)__get_free_pages(get_order(sizeof(*bus_data)));
    5.17 +	bus_data = (void *)alloc_xenheap_pages(get_order(sizeof(*bus_data)));
    5.18  	if (!bus_data)
    5.19  		panic("SMP mptable: out of memory!\n");
    5.20  	mp_bus_id_to_type = bus_data->mp_bus_id_to_type;
    5.21 @@ -1171,7 +1171,7 @@ void __init mp_config_acpi_legacy_irqs (
    5.22  
    5.23  	count = (MAX_MP_BUSSES * sizeof(int)) * 4;
    5.24  	count += (MAX_IRQ_SOURCES * sizeof(int)) * 4;
    5.25 -	bus_data = (void *)__get_free_pages(get_order(count));
    5.26 +	bus_data = (void *)alloc_xenheap_pages(get_order(count));
    5.27  	if (!bus_data) {
    5.28  		panic("Fatal: can't allocate bus memory for ACPI legacy IRQ!");
    5.29  	}
     6.1 --- a/xen/arch/x86/pci-pc.c	Mon Jul 26 18:22:00 2004 +0000
     6.2 +++ b/xen/arch/x86/pci-pc.c	Mon Jul 26 20:03:34 2004 +0000
     6.3 @@ -1003,7 +1003,7 @@ struct irq_routing_table * __devinit pci
     6.4  
     6.5  	if (!pci_bios_present)
     6.6  		return NULL;
     6.7 -	page = __get_free_page();
     6.8 +	page = alloc_xenheap_page();
     6.9  	if (!page)
    6.10  		return NULL;
    6.11  	opt.table = (struct irq_info *) page;
    6.12 @@ -1030,7 +1030,7 @@ struct irq_routing_table * __devinit pci
    6.13  	if (ret & 0xff00)
    6.14  		printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
    6.15  	else if (opt.size) {
    6.16 -		rt = kmalloc(sizeof(struct irq_routing_table) + opt.size);
    6.17 +		rt = xmalloc(sizeof(struct irq_routing_table) + opt.size);
    6.18  		if (rt) {
    6.19  			memset(rt, 0, sizeof(struct irq_routing_table));
    6.20  			rt->size = opt.size + sizeof(struct irq_routing_table);
    6.21 @@ -1039,7 +1039,7 @@ struct irq_routing_table * __devinit pci
    6.22  			printk(KERN_INFO "PCI: Using BIOS Interrupt Routing Table\n");
    6.23  		}
    6.24  	}
    6.25 -	free_page(page);
    6.26 +	free_xenheap_page(page);
    6.27  	return rt;
    6.28  }
    6.29  
    6.30 @@ -1109,7 +1109,7 @@ static void __devinit pcibios_fixup_ghos
    6.31  		if (d->devfn >= mirror) {
    6.32  			list_del(&d->global_list);
    6.33  			list_del(&d->bus_list);
    6.34 -			kfree(d);
    6.35 +			xfree(d);
    6.36  		} else
    6.37  			ln = ln->next;
    6.38  	}
     7.1 --- a/xen/arch/x86/pdb-stub.c	Mon Jul 26 18:22:00 2004 +0000
     7.2 +++ b/xen/arch/x86/pdb-stub.c	Mon Jul 26 20:03:34 2004 +0000
     7.3 @@ -836,7 +836,7 @@ struct pdb_breakpoint breakpoints;
     7.4  
     7.5  void pdb_bkpt_add (unsigned long cr3, unsigned long address)
     7.6  {
     7.7 -    struct pdb_breakpoint *bkpt = kmalloc(sizeof(*bkpt));
     7.8 +    struct pdb_breakpoint *bkpt = xmalloc(sizeof(*bkpt));
     7.9      bkpt->cr3 = cr3;
    7.10      bkpt->address = address;
    7.11      list_add(&bkpt->list, &breakpoints.list);
    7.12 @@ -877,7 +877,7 @@ int pdb_bkpt_remove (unsigned long cr3, 
    7.13  	if ( bkpt->cr3 == cr3 && bkpt->address == address )
    7.14  	{
    7.15              list_del(&bkpt->list);
    7.16 -            kfree(bkpt);
    7.17 +            xfree(bkpt);
    7.18              return 0;
    7.19  	}
    7.20      }
     8.1 --- a/xen/arch/x86/shadow.c	Mon Jul 26 18:22:00 2004 +0000
     8.2 +++ b/xen/arch/x86/shadow.c	Mon Jul 26 20:03:34 2004 +0000
     8.3 @@ -244,7 +244,7 @@ int shadow_mode_enable( struct domain *p
     8.4      m->shadow_mode = mode;
     8.5   
     8.6      // allocate hashtable
     8.7 -    m->shadow_ht = kmalloc(shadow_ht_buckets * 
     8.8 +    m->shadow_ht = xmalloc(shadow_ht_buckets * 
     8.9                             sizeof(struct shadow_status));
    8.10      if( m->shadow_ht == NULL )
    8.11          goto nomem;
    8.12 @@ -252,7 +252,7 @@ int shadow_mode_enable( struct domain *p
    8.13      memset(m->shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status));
    8.14  
    8.15      // allocate space for first lot of extra nodes
    8.16 -    m->shadow_ht_extras = kmalloc(sizeof(void*) + 
    8.17 +    m->shadow_ht_extras = xmalloc(sizeof(void*) + 
    8.18                                    (shadow_ht_extra_size * 
    8.19                                     sizeof(struct shadow_status)));
    8.20      if( m->shadow_ht_extras == NULL )
    8.21 @@ -278,7 +278,7 @@ int shadow_mode_enable( struct domain *p
    8.22      {
    8.23          m->shadow_dirty_bitmap_size = (p->max_pages+63)&(~63);
    8.24          m->shadow_dirty_bitmap = 
    8.25 -            kmalloc( m->shadow_dirty_bitmap_size/8);
    8.26 +            xmalloc( m->shadow_dirty_bitmap_size/8);
    8.27          if( m->shadow_dirty_bitmap == NULL )
    8.28          {
    8.29              m->shadow_dirty_bitmap_size = 0;
    8.30 @@ -313,20 +313,20 @@ void __shadow_mode_disable(struct domain
    8.31          struct shadow_status * this = next;
    8.32          m->shadow_extras_count--;
    8.33          next = *((struct shadow_status **)(&next[shadow_ht_extra_size]));
    8.34 -        kfree(this);
    8.35 +        xfree(this);
    8.36      }
    8.37  
    8.38      SH_LOG("freed extras, now %d", m->shadow_extras_count);
    8.39  
    8.40      if ( m->shadow_dirty_bitmap  )
    8.41      {
    8.42 -        kfree( m->shadow_dirty_bitmap );
    8.43 +        xfree( m->shadow_dirty_bitmap );
    8.44          m->shadow_dirty_bitmap = 0;
    8.45          m->shadow_dirty_bitmap_size = 0;
    8.46      }
    8.47  
    8.48      // free the hashtable itself
    8.49 -    kfree( &m->shadow_ht[0] );
    8.50 +    xfree( &m->shadow_ht[0] );
    8.51  }
    8.52  
    8.53  static int shadow_mode_table_op(struct domain *d, 
     9.1 --- a/xen/arch/x86/smpboot.c	Mon Jul 26 18:22:00 2004 +0000
     9.2 +++ b/xen/arch/x86/smpboot.c	Mon Jul 26 20:03:34 2004 +0000
     9.3 @@ -406,7 +406,7 @@ void __init start_secondary(void)
     9.4       * At this point, boot CPU has fully initialised the IDT. It is
     9.5       * now safe to make ourselves a private copy.
     9.6       */
     9.7 -    idt_tables[cpu] = kmalloc(IDT_ENTRIES*8);
     9.8 +    idt_tables[cpu] = xmalloc(IDT_ENTRIES*8);
     9.9      memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*8);
    9.10      *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*8)-1;
    9.11      *(unsigned long  *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
    9.12 @@ -671,7 +671,7 @@ static void __init do_boot_cpu (int apic
    9.13      /* So we see what's up. */
    9.14      printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
    9.15  
    9.16 -    stack = __pa(__get_free_pages(1));
    9.17 +    stack = __pa(alloc_xenheap_pages(1));
    9.18      stack_start.esp = stack + STACK_SIZE - STACK_RESERVED;
    9.19  
    9.20      /* Debug build: detect stack overflow by setting up a guard page. */
    10.1 --- a/xen/arch/x86/x86_32/mm.c	Mon Jul 26 18:22:00 2004 +0000
    10.2 +++ b/xen/arch/x86/x86_32/mm.c	Mon Jul 26 20:03:34 2004 +0000
    10.3 @@ -70,7 +70,7 @@ static void __init fixrange_init(unsigne
    10.4      {
    10.5          if ( l2_pgentry_val(*l2e) != 0 )
    10.6              continue;
    10.7 -        page = (unsigned long)get_free_page();
    10.8 +        page = (unsigned long)alloc_xenheap_page();
    10.9          clear_page(page);
   10.10          *l2e = mk_l2_pgentry(__pa(page) | __PAGE_HYPERVISOR);
   10.11          vaddr += 1 << L2_PAGETABLE_SHIFT;
   10.12 @@ -97,7 +97,7 @@ void __init paging_init(void)
   10.13      fixrange_init(addr, 0, idle_pg_table);
   10.14  
   10.15      /* Create page table for ioremap(). */
   10.16 -    ioremap_pt = (void *)get_free_page();
   10.17 +    ioremap_pt = (void *)alloc_xenheap_page();
   10.18      clear_page(ioremap_pt);
   10.19      idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] = 
   10.20          mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
   10.21 @@ -109,7 +109,7 @@ void __init paging_init(void)
   10.22                        ~_PAGE_RW);
   10.23  
   10.24      /* Set up mapping cache for domain pages. */
   10.25 -    mapcache = (unsigned long *)get_free_page();
   10.26 +    mapcache = (unsigned long *)alloc_xenheap_page();
   10.27      clear_page(mapcache);
   10.28      idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
   10.29          mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
    11.1 --- a/xen/common/ac_timer.c	Mon Jul 26 18:22:00 2004 +0000
    11.2 +++ b/xen/common/ac_timer.c	Mon Jul 26 20:03:34 2004 +0000
    11.3 @@ -130,13 +130,13 @@ static int add_entry(struct ac_timer **h
    11.4      if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
    11.5      {
    11.6          int i, limit = (GET_HEAP_LIMIT(heap)+1) << 1;
    11.7 -        struct ac_timer **new_heap = kmalloc(limit*sizeof(struct ac_timer *));
    11.8 +        struct ac_timer **new_heap = xmalloc(limit*sizeof(struct ac_timer *));
    11.9          if ( new_heap == NULL ) BUG();
   11.10          memcpy(new_heap, heap, (limit>>1)*sizeof(struct ac_timer *));
   11.11          for ( i = 0; i < smp_num_cpus; i++ )
   11.12              if ( ac_timers[i].heap == heap )
   11.13                  ac_timers[i].heap = new_heap;
   11.14 -        kfree(heap);
   11.15 +        xfree(heap);
   11.16          heap = new_heap;
   11.17          SET_HEAP_LIMIT(heap, limit-1);
   11.18      }
   11.19 @@ -278,7 +278,7 @@ void __init ac_timer_init(void)
   11.20  
   11.21      for ( i = 0; i < smp_num_cpus; i++ )
   11.22      {
   11.23 -        ac_timers[i].heap = kmalloc(
   11.24 +        ac_timers[i].heap = xmalloc(
   11.25              (DEFAULT_HEAP_LIMIT+1) * sizeof(struct ac_timer *));
   11.26          if ( ac_timers[i].heap == NULL ) BUG();
   11.27          SET_HEAP_SIZE(ac_timers[i].heap, 0);
    12.1 --- a/xen/common/dom0_ops.c	Mon Jul 26 18:22:00 2004 +0000
    12.2 +++ b/xen/common/dom0_ops.c	Mon Jul 26 20:03:34 2004 +0000
    12.3 @@ -392,7 +392,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    12.4  
    12.5          if ( op->u.getdomaininfo.ctxt != NULL )
    12.6          {
    12.7 -            if ( (c = kmalloc(sizeof(*c))) == NULL )
    12.8 +            if ( (c = xmalloc(sizeof(*c))) == NULL )
    12.9              {
   12.10                  ret = -ENOMEM;
   12.11                  put_domain(d);
   12.12 @@ -411,7 +411,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   12.13                  ret = -EINVAL;
   12.14  
   12.15              if ( c != NULL )
   12.16 -                kfree(c);
   12.17 +                xfree(c);
   12.18          }
   12.19  
   12.20          if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )     
    13.1 --- a/xen/common/domain.c	Mon Jul 26 18:22:00 2004 +0000
    13.2 +++ b/xen/common/domain.c	Mon Jul 26 20:03:34 2004 +0000
    13.3 @@ -365,7 +365,7 @@ void domain_destruct(struct domain *d)
    13.4      destroy_event_channels(d);
    13.5  
    13.6      free_perdomain_pt(d);
    13.7 -    free_page((unsigned long)d->shared_info);
    13.8 +    free_xenheap_page((unsigned long)d->shared_info);
    13.9  
   13.10      free_domain_struct(d);
   13.11  }
   13.12 @@ -381,7 +381,7 @@ int final_setup_guestos(struct domain *p
   13.13      int rc = 0;
   13.14      full_execution_context_t *c;
   13.15  
   13.16 -    if ( (c = kmalloc(sizeof(*c))) == NULL )
   13.17 +    if ( (c = xmalloc(sizeof(*c))) == NULL )
   13.18          return -ENOMEM;
   13.19  
   13.20      if ( test_bit(DF_CONSTRUCTED, &p->flags) )
   13.21 @@ -405,6 +405,6 @@ int final_setup_guestos(struct domain *p
   13.22  
   13.23   out:    
   13.24      if ( c != NULL )
   13.25 -        kfree(c);
   13.26 +        xfree(c);
   13.27      return rc;
   13.28  }
    14.1 --- a/xen/common/event_channel.c	Mon Jul 26 18:22:00 2004 +0000
    14.2 +++ b/xen/common/event_channel.c	Mon Jul 26 20:03:34 2004 +0000
    14.3 @@ -48,7 +48,7 @@ static int get_free_port(struct domain *
    14.4          
    14.5          max *= 2;
    14.6          
    14.7 -        chn = kmalloc(max * sizeof(event_channel_t));
    14.8 +        chn = xmalloc(max * sizeof(event_channel_t));
    14.9          if ( unlikely(chn == NULL) )
   14.10              return -ENOMEM;
   14.11  
   14.12 @@ -57,7 +57,7 @@ static int get_free_port(struct domain *
   14.13          if ( d->event_channel != NULL )
   14.14          {
   14.15              memcpy(chn, d->event_channel, (max/2) * sizeof(event_channel_t));
   14.16 -            kfree(d->event_channel);
   14.17 +            xfree(d->event_channel);
   14.18          }
   14.19  
   14.20          d->event_channel     = chn;
   14.21 @@ -477,7 +477,7 @@ long do_event_channel_op(evtchn_op_t *uo
   14.22  int init_event_channels(struct domain *d)
   14.23  {
   14.24      spin_lock_init(&d->event_channel_lock);
   14.25 -    d->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t));
   14.26 +    d->event_channel = xmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t));
   14.27      if ( unlikely(d->event_channel == NULL) )
   14.28          return -ENOMEM;
   14.29      d->max_event_channel = INIT_EVENT_CHANNELS;
   14.30 @@ -495,6 +495,6 @@ void destroy_event_channels(struct domai
   14.31      {
   14.32          for ( i = 0; i < d->max_event_channel; i++ )
   14.33              (void)__evtchn_close(d, i);
   14.34 -        kfree(d->event_channel);
   14.35 +        xfree(d->event_channel);
   14.36      }
   14.37  }
    15.1 --- a/xen/common/kernel.c	Mon Jul 26 18:22:00 2004 +0000
    15.2 +++ b/xen/common/kernel.c	Mon Jul 26 20:03:34 2004 +0000
    15.3 @@ -28,7 +28,7 @@
    15.4  
    15.5  unsigned long xenheap_phys_end;
    15.6  
    15.7 -kmem_cache_t *domain_struct_cachep;
    15.8 +xmem_cache_t *domain_struct_cachep;
    15.9  
   15.10  struct e820entry {
   15.11      unsigned long addr_lo, addr_hi;        /* start of memory segment */
   15.12 @@ -268,10 +268,10 @@ void cmain(multiboot_info_t *mbi)
   15.13      init_page_allocator(__pa(heap_start), xenheap_phys_end);
   15.14   
   15.15      /* Initialise the slab allocator. */
   15.16 -    kmem_cache_init();
   15.17 -    kmem_cache_sizes_init(max_page);
   15.18 +    xmem_cache_init();
   15.19 +    xmem_cache_sizes_init(max_page);
   15.20  
   15.21 -    domain_struct_cachep = kmem_cache_create(
   15.22 +    domain_struct_cachep = xmem_cache_create(
   15.23          "domain_cache", sizeof(struct domain),
   15.24          0, SLAB_HWCACHE_ALIGN, NULL, NULL);
   15.25      if ( domain_struct_cachep == NULL )
    16.1 --- a/xen/common/page_alloc.c	Mon Jul 26 18:22:00 2004 +0000
    16.2 +++ b/xen/common/page_alloc.c	Mon Jul 26 20:03:34 2004 +0000
    16.3 @@ -263,7 +263,7 @@ void __init init_page_allocator(unsigned
    16.4  
    16.5  
    16.6  /* Allocate 2^@order contiguous pages. Returns a VIRTUAL address. */
    16.7 -unsigned long __get_free_pages(int order)
    16.8 +unsigned long alloc_xenheap_pages(int order)
    16.9  {
   16.10      int i, attempts = 0;
   16.11      chunk_head_t *alloc_ch, *spare_ch;
   16.12 @@ -321,7 +321,7 @@ retry:
   16.13          
   16.14      if ( attempts++ < 8 )
   16.15      {
   16.16 -        kmem_cache_reap();
   16.17 +        xmem_cache_reap();
   16.18          goto retry;
   16.19      }
   16.20  
   16.21 @@ -333,7 +333,7 @@ retry:
   16.22  
   16.23  
   16.24  /* Free 2^@order pages at VIRTUAL address @p. */
   16.25 -void __free_pages(unsigned long p, int order)
   16.26 +void free_xenheap_pages(unsigned long p, int order)
   16.27  {
   16.28      unsigned long size = 1 << (order + PAGE_SHIFT);
   16.29      chunk_head_t *ch;
    17.1 --- a/xen/common/physdev.c	Mon Jul 26 18:22:00 2004 +0000
    17.2 +++ b/xen/common/physdev.c	Mon Jul 26 20:03:34 2004 +0000
    17.3 @@ -98,7 +98,7 @@ static void add_dev_to_task(struct domai
    17.4          return;
    17.5      }
    17.6  
    17.7 -    if ( (pdev = kmalloc(sizeof(phys_dev_t))) == NULL )
    17.8 +    if ( (pdev = xmalloc(sizeof(phys_dev_t))) == NULL )
    17.9      {
   17.10          INFO("Error allocating pdev structure.\n");
   17.11          return;
   17.12 @@ -171,7 +171,7 @@ int physdev_pci_access_modify(
   17.13  
   17.14      if ( p->io_bitmap == NULL )
   17.15      {
   17.16 -        if ( (p->io_bitmap = kmalloc(IO_BITMAP_BYTES)) == NULL )
   17.17 +        if ( (p->io_bitmap = xmalloc(IO_BITMAP_BYTES)) == NULL )
   17.18          {
   17.19              rc = -ENOMEM;
   17.20              goto out;
   17.21 @@ -737,7 +737,7 @@ void physdev_init_dom0(struct domain *p)
   17.22          /* Skip bridges and other peculiarities for now. */
   17.23          if ( dev->hdr_type != PCI_HEADER_TYPE_NORMAL )
   17.24              continue;
   17.25 -        pdev = kmalloc(sizeof(phys_dev_t));
   17.26 +        pdev = xmalloc(sizeof(phys_dev_t));
   17.27          pdev->dev = dev;
   17.28          pdev->flags = ACC_WRITE;
   17.29          pdev->state = 0;
    18.1 --- a/xen/common/resource.c	Mon Jul 26 18:22:00 2004 +0000
    18.2 +++ b/xen/common/resource.c	Mon Jul 26 20:03:34 2004 +0000
    18.3 @@ -220,7 +220,7 @@ int allocate_resource(struct resource *r
    18.4   */
    18.5  struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name)
    18.6  {
    18.7 -	struct resource *res = kmalloc(sizeof(*res));
    18.8 +	struct resource *res = xmalloc(sizeof(*res));
    18.9  
   18.10  	if (res) {
   18.11  		memset(res, 0, sizeof(*res));
   18.12 @@ -244,7 +244,7 @@ struct resource * __request_region(struc
   18.13  			}
   18.14  
   18.15  			/* Uhhuh, that didn't work out.. */
   18.16 -			kfree(res);
   18.17 +			xfree(res);
   18.18  			res = NULL;
   18.19  			break;
   18.20  		}
   18.21 @@ -262,7 +262,7 @@ int __check_region(struct resource *pare
   18.22  		return -EBUSY;
   18.23  
   18.24  	release_resource(res);
   18.25 -	kfree(res);
   18.26 +	xfree(res);
   18.27  	return 0;
   18.28  }
   18.29  
   18.30 @@ -287,7 +287,7 @@ void __release_region(struct resource *p
   18.31  			if (res->start != start || res->end != end)
   18.32  				break;
   18.33  			*p = res->sibling;
   18.34 -			kfree(res);
   18.35 +			xfree(res);
   18.36  			return;
   18.37  		}
   18.38  		p = &res->sibling;
    19.1 --- a/xen/common/sched_atropos.c	Mon Jul 26 18:22:00 2004 +0000
    19.2 +++ b/xen/common/sched_atropos.c	Mon Jul 26 20:03:34 2004 +0000
    19.3 @@ -75,7 +75,7 @@ struct at_cpu_info
    19.4  
    19.5  
    19.6  /* SLAB cache for struct at_dom_info objects */
    19.7 -static kmem_cache_t *dom_info_cache;
    19.8 +static xmem_cache_t *dom_info_cache;
    19.9  
   19.10  
   19.11  /** calculate the length of a linked list */
   19.12 @@ -528,14 +528,14 @@ static int at_init_scheduler()
   19.13      
   19.14      for ( i = 0; i < NR_CPUS; i++ )
   19.15      {
   19.16 -        schedule_data[i].sched_priv = kmalloc(sizeof(struct at_cpu_info));
   19.17 +        schedule_data[i].sched_priv = xmalloc(sizeof(struct at_cpu_info));
   19.18          if ( schedule_data[i].sched_priv == NULL )
   19.19              return -1;
   19.20          WAITQ(i)->next = WAITQ(i);
   19.21          WAITQ(i)->prev = WAITQ(i);
   19.22      }
   19.23  
   19.24 -    dom_info_cache = kmem_cache_create("Atropos dom info",
   19.25 +    dom_info_cache = xmem_cache_create("Atropos dom info",
   19.26                                         sizeof(struct at_dom_info),
   19.27                                         0, 0, NULL, NULL);
   19.28  
   19.29 @@ -591,7 +591,7 @@ static int at_alloc_task(struct domain *
   19.30  {
   19.31      ASSERT(p != NULL);
   19.32  
   19.33 -    p->sched_priv = kmem_cache_alloc(dom_info_cache);
   19.34 +    p->sched_priv = xmem_cache_alloc(dom_info_cache);
   19.35      if( p->sched_priv == NULL )
   19.36          return -1;
   19.37  
   19.38 @@ -604,7 +604,7 @@ static int at_alloc_task(struct domain *
   19.39  /* free memory associated with a task */
   19.40  static void at_free_task(struct domain *p)
   19.41  {
   19.42 -    kmem_cache_free( dom_info_cache, DOM_INFO(p) );
   19.43 +    xmem_cache_free( dom_info_cache, DOM_INFO(p) );
   19.44  }
   19.45  
   19.46  
    20.1 --- a/xen/common/sched_bvt.c	Mon Jul 26 18:22:00 2004 +0000
    20.2 +++ b/xen/common/sched_bvt.c	Mon Jul 26 20:03:34 2004 +0000
    20.3 @@ -62,7 +62,7 @@ struct bvt_cpu_info
    20.4  static s32 ctx_allow = (s32)MILLISECS(5);     /* context switch allowance */
    20.5  
    20.6  /* SLAB cache for struct bvt_dom_info objects */
    20.7 -static kmem_cache_t *dom_info_cache;
    20.8 +static xmem_cache_t *dom_info_cache;
    20.9  
   20.10  /*
   20.11   * Calculate the effective virtual time for a domain. Take into account 
   20.12 @@ -102,7 +102,7 @@ static void __calc_evt(struct bvt_dom_in
   20.13   */
   20.14  int bvt_alloc_task(struct domain *p)
   20.15  {
   20.16 -    p->sched_priv = kmem_cache_alloc(dom_info_cache);
   20.17 +    p->sched_priv = xmem_cache_alloc(dom_info_cache);
   20.18      if ( p->sched_priv == NULL )
   20.19          return -1;
   20.20      
   20.21 @@ -164,7 +164,7 @@ int bvt_init_idle_task(struct domain *p)
   20.22  void bvt_free_task(struct domain *p)
   20.23  {
   20.24      ASSERT( p->sched_priv != NULL );
   20.25 -    kmem_cache_free( dom_info_cache, p->sched_priv );
   20.26 +    xmem_cache_free( dom_info_cache, p->sched_priv );
   20.27  }
   20.28  
   20.29  
   20.30 @@ -437,7 +437,7 @@ static void bvt_dump_cpu_state(int i)
   20.31     this functions makes sure that the run_list
   20.32     is initialised properly. The new domain needs
   20.33     NOT to appear as to be on the runqueue */
   20.34 -static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
   20.35 +static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3)
   20.36  {
   20.37      struct bvt_dom_info *dom_inf = (struct bvt_dom_info*)arg1;
   20.38      dom_inf->run_list.next = NULL;
   20.39 @@ -451,7 +451,7 @@ int bvt_init_scheduler()
   20.40  
   20.41      for ( i = 0; i < NR_CPUS; i++ )
   20.42      {
   20.43 -        schedule_data[i].sched_priv = kmalloc(sizeof(struct bvt_cpu_info));
   20.44 +        schedule_data[i].sched_priv = xmalloc(sizeof(struct bvt_cpu_info));
   20.45          INIT_LIST_HEAD(RUNQUEUE(i));
   20.46          
   20.47          if ( schedule_data[i].sched_priv == NULL )
   20.48 @@ -463,7 +463,7 @@ int bvt_init_scheduler()
   20.49          CPU_SVT(i) = 0; /* XXX do I really need to do this? */
   20.50      }
   20.51  
   20.52 -    dom_info_cache = kmem_cache_create("BVT dom info",
   20.53 +    dom_info_cache = xmem_cache_create("BVT dom info",
   20.54                                         sizeof(struct bvt_dom_info),
   20.55                                         0, 0, cache_constructor, NULL);
   20.56  
    21.1 --- a/xen/common/sched_fair_bvt.c	Mon Jul 26 18:22:00 2004 +0000
    21.2 +++ b/xen/common/sched_fair_bvt.c	Mon Jul 26 20:03:34 2004 +0000
    21.3 @@ -74,7 +74,7 @@ static s32 ctx_allow = (s32)MILLISECS(5)
    21.4  static s32 max_vtb   = (s32)MILLISECS(5);
    21.5  
    21.6  /* SLAB cache for struct fbvt_dom_info objects */
    21.7 -static kmem_cache_t *dom_info_cache;
    21.8 +static xmem_cache_t *dom_info_cache;
    21.9  
   21.10  /*
   21.11   * Calculate the effective virtual time for a domain. Take into account 
   21.12 @@ -114,7 +114,7 @@ static void __calc_evt(struct fbvt_dom_i
   21.13   */
   21.14  int fbvt_alloc_task(struct domain *p)
   21.15  {
   21.16 -    p->sched_priv = kmem_cache_alloc(dom_info_cache);
   21.17 +    p->sched_priv = xmem_cache_alloc(dom_info_cache);
   21.18      if ( p->sched_priv == NULL )
   21.19          return -1;
   21.20      
   21.21 @@ -178,7 +178,7 @@ int fbvt_init_idle_task(struct domain *p
   21.22  void fbvt_free_task(struct domain *p)
   21.23  {
   21.24      ASSERT( p->sched_priv != NULL );
   21.25 -    kmem_cache_free( dom_info_cache, p->sched_priv );
   21.26 +    xmem_cache_free( dom_info_cache, p->sched_priv );
   21.27  }
   21.28  
   21.29  /* 
   21.30 @@ -503,7 +503,7 @@ static void fbvt_dump_cpu_state(int i)
   21.31     this functions makes sure that the run_list
   21.32     is initialised properly. The new domain needs
   21.33     NOT to appear as to be on the runqueue */
   21.34 -static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
   21.35 +static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3)
   21.36  {
   21.37      struct fbvt_dom_info *dom_inf = (struct fbvt_dom_info*)arg1;
   21.38      dom_inf->run_list.next = NULL;
   21.39 @@ -519,7 +519,7 @@ int fbvt_init_scheduler()
   21.40  
   21.41      for ( i = 0; i < NR_CPUS; i++ )
   21.42      {
   21.43 -        schedule_data[i].sched_priv = kmalloc(sizeof(struct fbvt_cpu_info));
   21.44 +        schedule_data[i].sched_priv = xmalloc(sizeof(struct fbvt_cpu_info));
   21.45          INIT_LIST_HEAD(RUNQUEUE(i));
   21.46          if ( schedule_data[i].sched_priv == NULL )
   21.47          {
   21.48 @@ -530,7 +530,7 @@ int fbvt_init_scheduler()
   21.49          CPU_SVT(i) = 0; /* XXX do I really need to do this? */
   21.50      }
   21.51  
   21.52 -    dom_info_cache = kmem_cache_create("FBVT dom info",
   21.53 +    dom_info_cache = xmem_cache_create("FBVT dom info",
   21.54                                         sizeof(struct fbvt_dom_info),
   21.55                                         0, 0, cache_constructor, NULL);
   21.56  
    22.1 --- a/xen/common/sched_rrobin.c	Mon Jul 26 18:22:00 2004 +0000
    22.2 +++ b/xen/common/sched_rrobin.c	Mon Jul 26 20:03:34 2004 +0000
    22.3 @@ -31,11 +31,11 @@ struct rrobin_dom_info
    22.4  static void rr_dump_cpu_state(int cpu);
    22.5  
    22.6  /* SLAB cache for struct rrobin_dom_info objects */
    22.7 -static kmem_cache_t *dom_info_cache;
    22.8 +static xmem_cache_t *dom_info_cache;
    22.9  
   22.10  
   22.11  /* Ensures proper initialisation of the dom_info */
   22.12 -static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
   22.13 +static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3)
   22.14  {
   22.15      struct rrobin_dom_info *dom_inf = (struct rrobin_dom_info*)arg1;
   22.16      dom_inf->run_list.next = NULL;
   22.17 @@ -51,7 +51,7 @@ static int rr_init_scheduler()
   22.18      for ( i = 0; i < NR_CPUS; i++ )
   22.19          INIT_LIST_HEAD(RUNQUEUE(i));
   22.20     
   22.21 -    dom_info_cache = kmem_cache_create("FBVT dom info", 
   22.22 +    dom_info_cache = xmem_cache_create("FBVT dom info", 
   22.23                                          sizeof(struct rrobin_dom_info), 
   22.24                                          0, 0, cache_constructor, NULL);
   22.25  
   22.26 @@ -66,7 +66,7 @@ static int rr_init_scheduler()
   22.27  /* Allocates memory for per domain private scheduling data*/
   22.28  static int rr_alloc_task(struct domain *d)
   22.29  {
   22.30 -    d->sched_priv = kmem_cache_alloc(dom_info_cache);
   22.31 +    d->sched_priv = xmem_cache_alloc(dom_info_cache);
   22.32      if ( d->sched_priv == NULL )
   22.33          return -1;
   22.34  
   22.35 @@ -85,7 +85,7 @@ static void rr_add_task(struct domain *p
   22.36  static void rr_free_task(struct domain *p)
   22.37  {
   22.38      ASSERT( p->sched_priv != NULL );
   22.39 -    kmem_cache_free( dom_info_cache, p->sched_priv );
   22.40 +    xmem_cache_free( dom_info_cache, p->sched_priv );
   22.41  }
   22.42  
   22.43  /* Initialises idle task */
    23.1 --- a/xen/common/schedule.c	Mon Jul 26 18:22:00 2004 +0000
    23.2 +++ b/xen/common/schedule.c	Mon Jul 26 20:03:34 2004 +0000
    23.3 @@ -96,26 +96,26 @@ static struct ac_timer t_timer[NR_CPUS];
    23.4   */
    23.5  static struct ac_timer fallback_timer[NR_CPUS];
    23.6  
    23.7 -extern kmem_cache_t *domain_struct_cachep;
    23.8 +extern xmem_cache_t *domain_struct_cachep;
    23.9  
   23.10  void free_domain_struct(struct domain *d)
   23.11  {
   23.12      SCHED_OP(free_task, d);
   23.13 -    kmem_cache_free(domain_struct_cachep, d);
   23.14 +    xmem_cache_free(domain_struct_cachep, d);
   23.15  }
   23.16  
   23.17  struct domain *alloc_domain_struct(void)
   23.18  {
   23.19      struct domain *d;
   23.20  
   23.21 -    if ( (d = kmem_cache_alloc(domain_struct_cachep)) == NULL )
   23.22 +    if ( (d = xmem_cache_alloc(domain_struct_cachep)) == NULL )
   23.23          return NULL;
   23.24      
   23.25      memset(d, 0, sizeof(*d));
   23.26  
   23.27      if ( SCHED_OP(alloc_task, d) < 0 )
   23.28      {
   23.29 -        kmem_cache_free(domain_struct_cachep, d);
   23.30 +        xmem_cache_free(domain_struct_cachep, d);
   23.31          return NULL;
   23.32      }
   23.33  
    24.1 --- a/xen/common/slab.c	Mon Jul 26 18:22:00 2004 +0000
    24.2 +++ b/xen/common/slab.c	Mon Jul 26 20:03:34 2004 +0000
    24.3 @@ -3,7 +3,7 @@
    24.4   * Written by Mark Hemment, 1996/97.
    24.5   * (markhe@nextd.demon.co.uk)
    24.6   *
    24.7 - * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
    24.8 + * xmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
    24.9   *
   24.10   * Major cleanup, different bufctl logic, per-cpu arrays
   24.11   *	(c) 2000 Manfred Spraul
   24.12 @@ -31,8 +31,8 @@
   24.13   * If partial slabs exist, then new allocations come from these slabs,
   24.14   * otherwise from empty slabs or new slabs are allocated.
   24.15   *
   24.16 - * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
   24.17 - * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
   24.18 + * xmem_cache_destroy() CAN CRASH if you try to allocate from the cache
   24.19 + * during xmem_cache_destroy(). The caller must prevent concurrent allocs.
   24.20   *
   24.21   * On SMP systems, each cache has a short per-cpu head array, most allocs
   24.22   * and frees go into that array, and if that array overflows, then 1/2
   24.23 @@ -43,7 +43,7 @@
   24.24   *
   24.25   * SMP synchronization:
   24.26   *  constructors and destructors are called without any locking.
   24.27 - *  Several members in kmem_cache_t and slab_t never change, they
   24.28 + *  Several members in xmem_cache_t and slab_t never change, they
   24.29   *	are accessed without any locking.
   24.30   *  The per-cpu arrays are never accessed from the wrong cpu, no locking.
   24.31   *  The non-constant members are protected with a per-cache irq spinlock.
   24.32 @@ -61,7 +61,7 @@
   24.33  #include <xen/sched.h>
   24.34  
   24.35  /*
   24.36 - * DEBUG  - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
   24.37 + * DEBUG  - 1 for xmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
   24.38   *	    SLAB_RED_ZONE & SLAB_POISON.
   24.39   *	    0 for faster, smaller code (especially in the critical paths).
   24.40   *
   24.41 @@ -81,7 +81,7 @@
   24.42  #endif
   24.43  
   24.44  /*
   24.45 - * Parameters for kmem_cache_reap
   24.46 + * Parameters for xmem_cache_reap
   24.47   */
   24.48  #define REAP_SCANLEN	10
   24.49  #define REAP_PERFECT	10
   24.50 @@ -89,7 +89,7 @@
   24.51  /* Shouldn't this be in a header file somewhere? */
   24.52  #define	BYTES_PER_WORD		sizeof(void *)
   24.53  
   24.54 -/* Legal flag mask for kmem_cache_create(). */
   24.55 +/* Legal flag mask for xmem_cache_create(). */
   24.56  #if DEBUG
   24.57  #define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
   24.58  			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
   24.59 @@ -99,7 +99,7 @@
   24.60  #endif
   24.61  
   24.62  /*
   24.63 - * kmem_bufctl_t:
   24.64 + * xmem_bufctl_t:
   24.65   *
   24.66   * Bufctl's are used for linking objs within a slab
   24.67   * linked offsets.
   24.68 @@ -117,12 +117,12 @@
   24.69   * is less than 512 (PAGE_SIZE<<3), but greater than 256.
   24.70   */
   24.71  
   24.72 -#define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
   24.73 -#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
   24.74 -#define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-2)
   24.75 +#define BUFCTL_END	(((xmem_bufctl_t)(~0U))-0)
   24.76 +#define BUFCTL_FREE	(((xmem_bufctl_t)(~0U))-1)
   24.77 +#define	SLAB_LIMIT	(((xmem_bufctl_t)(~0U))-2)
   24.78  
   24.79  /* Max number of objs-per-slab for caches which use off-slab slabs.
   24.80 - * Needed to avoid a possible looping condition in kmem_cache_grow().
   24.81 + * Needed to avoid a possible looping condition in xmem_cache_grow().
   24.82   */
   24.83  static unsigned long offslab_limit;
   24.84  
   24.85 @@ -138,11 +138,11 @@ typedef struct slab_s {
   24.86      unsigned long    colouroff;
   24.87      void            *s_mem;    /* including colour offset */
   24.88      unsigned int     inuse;    /* num of objs active in slab */
   24.89 -    kmem_bufctl_t    free;
   24.90 +    xmem_bufctl_t    free;
   24.91  } slab_t;
   24.92  
   24.93  #define slab_bufctl(slabp) \
   24.94 -	((kmem_bufctl_t *)(((slab_t*)slabp)+1))
   24.95 +	((xmem_bufctl_t *)(((slab_t*)slabp)+1))
   24.96  
   24.97  /*
   24.98   * cpucache_t
   24.99 @@ -161,14 +161,14 @@ typedef struct cpucache_s {
  24.100  #define cc_data(cachep) \
  24.101  	((cachep)->cpudata[smp_processor_id()])
  24.102  /*
  24.103 - * kmem_cache_t
  24.104 + * xmem_cache_t
  24.105   *
  24.106   * manages a cache.
  24.107   */
  24.108  
  24.109  #define CACHE_NAMELEN	20	/* max name length for a slab cache */
  24.110  
  24.111 -struct kmem_cache_s {
  24.112 +struct xmem_cache_s {
  24.113  /* 1) each alloc & free */
  24.114      /* full, partial first, then free */
  24.115      struct list_head	slabs_full;
  24.116 @@ -188,15 +188,15 @@ struct kmem_cache_s {
  24.117      size_t			colour;		/* cache colouring range */
  24.118      unsigned int		colour_off;	/* colour offset */
  24.119      unsigned int		colour_next;	/* cache colouring */
  24.120 -    kmem_cache_t		*slabp_cache;
  24.121 +    xmem_cache_t		*slabp_cache;
  24.122      unsigned int		growing;
  24.123      unsigned int		dflags;		/* dynamic flags */
  24.124  
  24.125      /* constructor func */
  24.126 -    void (*ctor)(void *, kmem_cache_t *, unsigned long);
  24.127 +    void (*ctor)(void *, xmem_cache_t *, unsigned long);
  24.128  
  24.129      /* de-constructor func */
  24.130 -    void (*dtor)(void *, kmem_cache_t *, unsigned long);
  24.131 +    void (*dtor)(void *, xmem_cache_t *, unsigned long);
  24.132  
  24.133      unsigned long		failures;
  24.134  
  24.135 @@ -297,17 +297,17 @@ static int slab_break_gfp_order = BREAK_
  24.136  
  24.137  /* Macros for storing/retrieving the cachep and or slab from the
  24.138   * global 'mem_map'. These are used to find the slab an obj belongs to.
  24.139 - * With kfree(), these are used to find the cache which an obj belongs to.
  24.140 + * With xfree(), these are used to find the cache which an obj belongs to.
  24.141   */
  24.142  #define	SET_PAGE_CACHE(pg,x)  ((pg)->list.next = (struct list_head *)(x))
  24.143 -#define	GET_PAGE_CACHE(pg)    ((kmem_cache_t *)(pg)->list.next)
  24.144 +#define	GET_PAGE_CACHE(pg)    ((xmem_cache_t *)(pg)->list.next)
  24.145  #define	SET_PAGE_SLAB(pg,x)   ((pg)->list.prev = (struct list_head *)(x))
  24.146  #define	GET_PAGE_SLAB(pg)     ((slab_t *)(pg)->list.prev)
  24.147  
  24.148  /* Size description struct for general caches. */
  24.149  typedef struct cache_sizes {
  24.150      size_t		 cs_size;
  24.151 -    kmem_cache_t	*cs_cachep;
  24.152 +    xmem_cache_t	*cs_cachep;
  24.153  } cache_sizes_t;
  24.154  
  24.155  static cache_sizes_t cache_sizes[] = {
  24.156 @@ -325,15 +325,15 @@ static cache_sizes_t cache_sizes[] = {
  24.157  };
  24.158  
  24.159  /* internal cache of cache description objs */
  24.160 -static kmem_cache_t cache_cache = {
  24.161 +static xmem_cache_t cache_cache = {
  24.162      slabs_full:    LIST_HEAD_INIT(cache_cache.slabs_full),
  24.163      slabs_partial: LIST_HEAD_INIT(cache_cache.slabs_partial),
  24.164      slabs_free:    LIST_HEAD_INIT(cache_cache.slabs_free),
  24.165 -    objsize:       sizeof(kmem_cache_t),
  24.166 +    objsize:       sizeof(xmem_cache_t),
  24.167      flags:         SLAB_NO_REAP,
  24.168      spinlock:      SPIN_LOCK_UNLOCKED,
  24.169      colour_off:    L1_CACHE_BYTES,
  24.170 -    name:          "kmem_cache"
  24.171 +    name:          "xmem_cache"
  24.172  };
  24.173  
  24.174  /* Guard access to the cache-chain. */
  24.175 @@ -344,7 +344,7 @@ static spinlock_t cache_chain_sem;
  24.176  #define up(_m)           spin_unlock_irqrestore(_m,spin_flags)
  24.177  
  24.178  /* Place maintainer for reaping. */
  24.179 -static kmem_cache_t *clock_searchp = &cache_cache;
  24.180 +static xmem_cache_t *clock_searchp = &cache_cache;
  24.181  
  24.182  #define cache_chain (cache_cache.next)
  24.183  
  24.184 @@ -355,12 +355,12 @@ static kmem_cache_t *clock_searchp = &ca
  24.185   */
  24.186  static int g_cpucache_up;
  24.187  
  24.188 -static void enable_cpucache (kmem_cache_t *cachep);
  24.189 +static void enable_cpucache (xmem_cache_t *cachep);
  24.190  static void enable_all_cpucaches (void);
  24.191  #endif
  24.192  
  24.193  /* Cal the num objs, wastage, and bytes left over for a given slab size. */
  24.194 -static void kmem_cache_estimate (unsigned long gfporder, size_t size,
  24.195 +static void xmem_cache_estimate (unsigned long gfporder, size_t size,
  24.196                                   int flags, size_t *left_over, unsigned int *num)
  24.197  {
  24.198      int i;
  24.199 @@ -370,7 +370,7 @@ static void kmem_cache_estimate (unsigne
  24.200  
  24.201      if (!(flags & CFLGS_OFF_SLAB)) {
  24.202          base = sizeof(slab_t);
  24.203 -        extra = sizeof(kmem_bufctl_t);
  24.204 +        extra = sizeof(xmem_bufctl_t);
  24.205      }
  24.206      i = 0;
  24.207      while (i*size + L1_CACHE_ALIGN(base+i*extra) <= wastage)
  24.208 @@ -388,14 +388,14 @@ static void kmem_cache_estimate (unsigne
  24.209  }
  24.210  
  24.211  /* Initialisation - setup the `cache' cache. */
  24.212 -void __init kmem_cache_init(void)
  24.213 +void __init xmem_cache_init(void)
  24.214  {
  24.215      size_t left_over;
  24.216  
  24.217      init_MUTEX(&cache_chain_sem);
  24.218      INIT_LIST_HEAD(&cache_chain);
  24.219  
  24.220 -    kmem_cache_estimate(0, cache_cache.objsize, 0,
  24.221 +    xmem_cache_estimate(0, cache_cache.objsize, 0,
  24.222  			&left_over, &cache_cache.num);
  24.223      if (!cache_cache.num)
  24.224          BUG();
  24.225 @@ -408,7 +408,7 @@ void __init kmem_cache_init(void)
  24.226  /* Initialisation - setup remaining internal and general caches.
  24.227   * Called after the gfp() functions have been enabled, and before smp_init().
  24.228   */
  24.229 -void __init kmem_cache_sizes_init(unsigned long num_physpages)
  24.230 +void __init xmem_cache_sizes_init(unsigned long num_physpages)
  24.231  {
  24.232      cache_sizes_t *sizes = cache_sizes;
  24.233      char name[20];
  24.234 @@ -426,7 +426,7 @@ void __init kmem_cache_sizes_init(unsign
  24.235           * allow tighter packing of the smaller caches. */
  24.236          sprintf(name,"size-%Zd",sizes->cs_size);
  24.237          if (!(sizes->cs_cachep =
  24.238 -              kmem_cache_create(name, sizes->cs_size,
  24.239 +              xmem_cache_create(name, sizes->cs_size,
  24.240                                  0, SLAB_HWCACHE_ALIGN, NULL, NULL))) {
  24.241              BUG();
  24.242          }
  24.243 @@ -440,7 +440,7 @@ void __init kmem_cache_sizes_init(unsign
  24.244      } while (sizes->cs_size);
  24.245  }
  24.246  
  24.247 -int __init kmem_cpucache_init(void)
  24.248 +int __init xmem_cpucache_init(void)
  24.249  {
  24.250  #ifdef CONFIG_SMP
  24.251      g_cpucache_up = 1;
  24.252 @@ -449,15 +449,15 @@ int __init kmem_cpucache_init(void)
  24.253      return 0;
  24.254  }
  24.255  
  24.256 -/*__initcall(kmem_cpucache_init);*/
  24.257 +/*__initcall(xmem_cpucache_init);*/
  24.258  
  24.259  /* Interface to system's page allocator. No need to hold the cache-lock.
  24.260   */
  24.261 -static inline void *kmem_getpages(kmem_cache_t *cachep)
  24.262 +static inline void *xmem_getpages(xmem_cache_t *cachep)
  24.263  {
  24.264      void *addr;
  24.265  
  24.266 -    addr = (void*) __get_free_pages(cachep->gfporder);
  24.267 +    addr = (void*) alloc_xenheap_pages(cachep->gfporder);
  24.268      /* Assume that now we have the pages no one else can legally
  24.269       * messes with the 'struct page's.
  24.270       * However vm_scan() might try to test the structure to see if
  24.271 @@ -468,12 +468,12 @@ static inline void *kmem_getpages(kmem_c
  24.272  }
  24.273  
  24.274  /* Interface to system's page release. */
  24.275 -static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
  24.276 +static inline void xmem_freepages (xmem_cache_t *cachep, void *addr)
  24.277  {
  24.278      unsigned long i = (1<<cachep->gfporder);
  24.279      struct pfn_info *page = virt_to_page(addr);
  24.280  
  24.281 -    /* free_pages() does not clear the type bit - we do that.
  24.282 +    /* free_xenheap_pages() does not clear the type bit - we do that.
  24.283       * The pages have been unlinked from their cache-slab,
  24.284       * but their 'struct page's might be accessed in
  24.285       * vm_scan(). Shouldn't be a worry.
  24.286 @@ -483,11 +483,11 @@ static inline void kmem_freepages (kmem_
  24.287          page++;
  24.288      }
  24.289  
  24.290 -    free_pages((unsigned long)addr, cachep->gfporder);
  24.291 +    free_xenheap_pages((unsigned long)addr, cachep->gfporder);
  24.292  }
  24.293  
  24.294  #if DEBUG
  24.295 -static inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr)
  24.296 +static inline void xmem_poison_obj (xmem_cache_t *cachep, void *addr)
  24.297  {
  24.298      int size = cachep->objsize;
  24.299      if (cachep->flags & SLAB_RED_ZONE) {
  24.300 @@ -498,7 +498,7 @@ static inline void kmem_poison_obj (kmem
  24.301      *(unsigned char *)(addr+size-1) = POISON_END;
  24.302  }
  24.303  
  24.304 -static inline int kmem_check_poison_obj (kmem_cache_t *cachep, void *addr)
  24.305 +static inline int xmem_check_poison_obj (xmem_cache_t *cachep, void *addr)
  24.306  {
  24.307      int size = cachep->objsize;
  24.308      void *end;
  24.309 @@ -517,7 +517,7 @@ static inline int kmem_check_poison_obj 
  24.310   * Before calling the slab must have been unlinked from the cache.
  24.311   * The cache-lock is not held/needed.
  24.312   */
  24.313 -static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp)
  24.314 +static void xmem_slab_destroy (xmem_cache_t *cachep, slab_t *slabp)
  24.315  {
  24.316      if (cachep->dtor
  24.317  #if DEBUG
  24.318 @@ -544,19 +544,19 @@ static void kmem_slab_destroy (kmem_cach
  24.319                  objp -= BYTES_PER_WORD;
  24.320              }	
  24.321              if ((cachep->flags & SLAB_POISON)  &&
  24.322 -                kmem_check_poison_obj(cachep, objp))
  24.323 +                xmem_check_poison_obj(cachep, objp))
  24.324                  BUG();
  24.325  #endif
  24.326          }
  24.327      }
  24.328  
  24.329 -    kmem_freepages(cachep, slabp->s_mem-slabp->colouroff);
  24.330 +    xmem_freepages(cachep, slabp->s_mem-slabp->colouroff);
  24.331      if (OFF_SLAB(cachep))
  24.332 -        kmem_cache_free(cachep->slabp_cache, slabp);
  24.333 +        xmem_cache_free(cachep->slabp_cache, slabp);
  24.334  }
  24.335  
  24.336  /**
  24.337 - * kmem_cache_create - Create a cache.
  24.338 + * xmem_cache_create - Create a cache.
  24.339   * @name: A string which is used in /proc/slabinfo to identify this cache.
  24.340   * @size: The size of objects to be created in this cache.
  24.341   * @offset: The offset to use within the page.
  24.342 @@ -583,15 +583,15 @@ static void kmem_slab_destroy (kmem_cach
  24.343   * cacheline.  This can be beneficial if you're counting cycles as closely
  24.344   * as davem.
  24.345   */
  24.346 -kmem_cache_t *
  24.347 -kmem_cache_create (const char *name, size_t size, size_t offset,
  24.348 +xmem_cache_t *
  24.349 +xmem_cache_create (const char *name, size_t size, size_t offset,
  24.350                     unsigned long flags,
  24.351 -                   void (*ctor)(void*, kmem_cache_t *, unsigned long),
  24.352 -                   void (*dtor)(void*, kmem_cache_t *, unsigned long))
  24.353 +                   void (*ctor)(void*, xmem_cache_t *, unsigned long),
  24.354 +                   void (*dtor)(void*, xmem_cache_t *, unsigned long))
  24.355  {
  24.356 -    const char *func_nm = KERN_ERR "kmem_create: ";
  24.357 +    const char *func_nm = KERN_ERR "xmem_create: ";
  24.358      size_t left_over, align, slab_size;
  24.359 -    kmem_cache_t *cachep = NULL;
  24.360 +    xmem_cache_t *cachep = NULL;
  24.361      unsigned long spin_flags;
  24.362  
  24.363      /*
  24.364 @@ -639,10 +639,10 @@ kmem_cache_create (const char *name, siz
  24.365          BUG();
  24.366  
  24.367      /* Get cache's description obj. */
  24.368 -    cachep = (kmem_cache_t *)kmem_cache_alloc(&cache_cache);
  24.369 +    cachep = (xmem_cache_t *)xmem_cache_alloc(&cache_cache);
  24.370      if (!cachep)
  24.371          goto opps;
  24.372 -    memset(cachep, 0, sizeof(kmem_cache_t));
  24.373 +    memset(cachep, 0, sizeof(xmem_cache_t));
  24.374  
  24.375      /* Check that size is in terms of words.  This is needed to avoid
  24.376       * unaligned accesses for some archs when redzoning is used, and makes
  24.377 @@ -693,7 +693,7 @@ kmem_cache_create (const char *name, siz
  24.378      do {
  24.379          unsigned int break_flag = 0;
  24.380      cal_wastage:
  24.381 -        kmem_cache_estimate(cachep->gfporder, size, flags,
  24.382 +        xmem_cache_estimate(cachep->gfporder, size, flags,
  24.383                              &left_over, &cachep->num);
  24.384          if (break_flag)
  24.385              break;
  24.386 @@ -722,12 +722,12 @@ kmem_cache_create (const char *name, siz
  24.387      } while (1);
  24.388  
  24.389      if (!cachep->num) {
  24.390 -        printk("kmem_cache_create: couldn't create cache %s.\n", name);
  24.391 -        kmem_cache_free(&cache_cache, cachep);
  24.392 +        printk("xmem_cache_create: couldn't create cache %s.\n", name);
  24.393 +        xmem_cache_free(&cache_cache, cachep);
  24.394          cachep = NULL;
  24.395          goto opps;
  24.396      }
  24.397 -    slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(kmem_bufctl_t) + 
  24.398 +    slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(xmem_bufctl_t) + 
  24.399                                 sizeof(slab_t));
  24.400  
  24.401      /*
  24.402 @@ -759,7 +759,7 @@ kmem_cache_create (const char *name, siz
  24.403      INIT_LIST_HEAD(&cachep->slabs_free);
  24.404  
  24.405      if (flags & CFLGS_OFF_SLAB)
  24.406 -        cachep->slabp_cache = kmem_find_general_cachep(slab_size);
  24.407 +        cachep->slabp_cache = xmem_find_general_cachep(slab_size);
  24.408      cachep->ctor = ctor;
  24.409      cachep->dtor = dtor;
  24.410      /* Copy name over so we don't have problems with unloaded modules */
  24.411 @@ -775,7 +775,7 @@ kmem_cache_create (const char *name, siz
  24.412          struct list_head *p;
  24.413  
  24.414          list_for_each(p, &cache_chain) {
  24.415 -            kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
  24.416 +            xmem_cache_t *pc = list_entry(p, xmem_cache_t, next);
  24.417  
  24.418              /* The name field is constant - no lock needed. */
  24.419              if (!strcmp(pc->name, name))
  24.420 @@ -795,10 +795,10 @@ kmem_cache_create (const char *name, siz
  24.421  
  24.422  #if DEBUG
  24.423  /*
  24.424 - * This check if the kmem_cache_t pointer is chained in the cache_cache
  24.425 + * This check if the xmem_cache_t pointer is chained in the cache_cache
  24.426   * list. -arca
  24.427   */
  24.428 -static int is_chained_kmem_cache(kmem_cache_t * cachep)
  24.429 +static int is_chained_xmem_cache(xmem_cache_t * cachep)
  24.430  {
  24.431      struct list_head *p;
  24.432      int ret = 0;
  24.433 @@ -817,7 +817,7 @@ static int is_chained_kmem_cache(kmem_ca
  24.434      return ret;
  24.435  }
  24.436  #else
  24.437 -#define is_chained_kmem_cache(x) 1
  24.438 +#define is_chained_xmem_cache(x) 1
  24.439  #endif
  24.440  
  24.441  #ifdef CONFIG_SMP
  24.442 @@ -835,7 +835,7 @@ static void smp_call_function_all_cpus(v
  24.443  }
  24.444  typedef struct ccupdate_struct_s
  24.445  {
  24.446 -    kmem_cache_t *cachep;
  24.447 +    xmem_cache_t *cachep;
  24.448      cpucache_t *new[NR_CPUS];
  24.449  } ccupdate_struct_t;
  24.450  
  24.451 @@ -848,9 +848,9 @@ static void do_ccupdate_local(void *info
  24.452      new->new[smp_processor_id()] = old;
  24.453  }
  24.454  
  24.455 -static void free_block (kmem_cache_t* cachep, void** objpp, int len);
  24.456 +static void free_block (xmem_cache_t* cachep, void** objpp, int len);
  24.457  
  24.458 -static void drain_cpu_caches(kmem_cache_t *cachep)
  24.459 +static void drain_cpu_caches(xmem_cache_t *cachep)
  24.460  {
  24.461      ccupdate_struct_t new;
  24.462      int i;
  24.463 @@ -880,7 +880,7 @@ static void drain_cpu_caches(kmem_cache_
  24.464  #define drain_cpu_caches(cachep)	do { } while (0)
  24.465  #endif
  24.466  
  24.467 -static int __kmem_cache_shrink(kmem_cache_t *cachep)
  24.468 +static int __xmem_cache_shrink(xmem_cache_t *cachep)
  24.469  {
  24.470      slab_t *slabp;
  24.471      int ret;
  24.472 @@ -905,7 +905,7 @@ static int __kmem_cache_shrink(kmem_cach
  24.473          list_del(&slabp->list);
  24.474  
  24.475          spin_unlock_irq(&cachep->spinlock);
  24.476 -        kmem_slab_destroy(cachep, slabp);
  24.477 +        xmem_slab_destroy(cachep, slabp);
  24.478          spin_lock_irq(&cachep->spinlock);
  24.479      }
  24.480      ret = (!list_empty(&cachep->slabs_full) || 
  24.481 @@ -915,25 +915,25 @@ static int __kmem_cache_shrink(kmem_cach
  24.482  }
  24.483  
  24.484  /**
  24.485 - * kmem_cache_shrink - Shrink a cache.
  24.486 + * xmem_cache_shrink - Shrink a cache.
  24.487   * @cachep: The cache to shrink.
  24.488   *
  24.489   * Releases as many slabs as possible for a cache.
  24.490   * To help debugging, a zero exit status indicates all slabs were released.
  24.491   */
  24.492 -int kmem_cache_shrink(kmem_cache_t *cachep)
  24.493 +int xmem_cache_shrink(xmem_cache_t *cachep)
  24.494  {
  24.495 -    if (!cachep || !is_chained_kmem_cache(cachep))
  24.496 +    if (!cachep || !is_chained_xmem_cache(cachep))
  24.497          BUG();
  24.498  
  24.499 -    return __kmem_cache_shrink(cachep);
  24.500 +    return __xmem_cache_shrink(cachep);
  24.501  }
  24.502  
  24.503  /**
  24.504 - * kmem_cache_destroy - delete a cache
  24.505 + * xmem_cache_destroy - delete a cache
  24.506   * @cachep: the cache to destroy
  24.507   *
  24.508 - * Remove a kmem_cache_t object from the slab cache.
  24.509 + * Remove a xmem_cache_t object from the slab cache.
  24.510   * Returns 0 on success.
  24.511   *
  24.512   * It is expected this function will be called by a module when it is
  24.513 @@ -942,9 +942,9 @@ int kmem_cache_shrink(kmem_cache_t *cach
  24.514   * module doesn't have persistent in-kernel storage across loads and unloads.
  24.515   *
  24.516   * The caller must guarantee that noone will allocate memory from the cache
  24.517 - * during the kmem_cache_destroy().
  24.518 + * during the xmem_cache_destroy().
  24.519   */
  24.520 -int kmem_cache_destroy (kmem_cache_t * cachep)
  24.521 +int xmem_cache_destroy (xmem_cache_t * cachep)
  24.522  {
  24.523      unsigned long spin_flags;
  24.524  
  24.525 @@ -956,12 +956,12 @@ int kmem_cache_destroy (kmem_cache_t * c
  24.526      /* the chain is never empty, cache_cache is never destroyed */
  24.527      if (clock_searchp == cachep)
  24.528          clock_searchp = list_entry(cachep->next.next,
  24.529 -                                   kmem_cache_t, next);
  24.530 +                                   xmem_cache_t, next);
  24.531      list_del(&cachep->next);
  24.532      up(&cache_chain_sem);
  24.533  
  24.534 -    if (__kmem_cache_shrink(cachep)) {
  24.535 -        printk(KERN_ERR "kmem_cache_destroy: Can't free all objects %p\n",
  24.536 +    if (__xmem_cache_shrink(cachep)) {
  24.537 +        printk(KERN_ERR "xmem_cache_destroy: Can't free all objects %p\n",
  24.538                 cachep);
  24.539          down(&cache_chain_sem);
  24.540          list_add(&cachep->next,&cache_chain);
  24.541 @@ -972,16 +972,16 @@ int kmem_cache_destroy (kmem_cache_t * c
  24.542      {
  24.543          int i;
  24.544          for (i = 0; i < NR_CPUS; i++)
  24.545 -            kfree(cachep->cpudata[i]);
  24.546 +            xfree(cachep->cpudata[i]);
  24.547      }
  24.548  #endif
  24.549 -    kmem_cache_free(&cache_cache, cachep);
  24.550 +    xmem_cache_free(&cache_cache, cachep);
  24.551  
  24.552      return 0;
  24.553  }
  24.554  
  24.555  /* Get the memory for a slab management obj. */
  24.556 -static inline slab_t *kmem_cache_slabmgmt(kmem_cache_t *cachep,
  24.557 +static inline slab_t *xmem_cache_slabmgmt(xmem_cache_t *cachep,
  24.558                                            void *objp, int colour_off, 
  24.559                                            int local_flags)
  24.560  {
  24.561 @@ -989,7 +989,7 @@ static inline slab_t *kmem_cache_slabmgm
  24.562  	
  24.563      if (OFF_SLAB(cachep)) {
  24.564          /* Slab management obj is off-slab. */
  24.565 -        slabp = kmem_cache_alloc(cachep->slabp_cache);
  24.566 +        slabp = xmem_cache_alloc(cachep->slabp_cache);
  24.567          if (!slabp)
  24.568              return NULL;
  24.569      } else {
  24.570 @@ -999,7 +999,7 @@ static inline slab_t *kmem_cache_slabmgm
  24.571             */
  24.572          slabp = objp+colour_off;
  24.573          colour_off += L1_CACHE_ALIGN(cachep->num *
  24.574 -                                     sizeof(kmem_bufctl_t) + sizeof(slab_t));
  24.575 +                                     sizeof(xmem_bufctl_t) + sizeof(slab_t));
  24.576      }
  24.577      slabp->inuse = 0;
  24.578      slabp->colouroff = colour_off;
  24.579 @@ -1008,7 +1008,7 @@ static inline slab_t *kmem_cache_slabmgm
  24.580      return slabp;
  24.581  }
  24.582  
  24.583 -static inline void kmem_cache_init_objs(kmem_cache_t *cachep,
  24.584 +static inline void xmem_cache_init_objs(xmem_cache_t *cachep,
  24.585                                           slab_t *slabp,
  24.586                                          unsigned long ctor_flags)
  24.587  {
  24.588 @@ -1037,7 +1037,7 @@ static inline void kmem_cache_init_objs(
  24.589              objp -= BYTES_PER_WORD;
  24.590          if (cachep->flags & SLAB_POISON)
  24.591              /* need to poison the objs */
  24.592 -            kmem_poison_obj(cachep, objp);
  24.593 +            xmem_poison_obj(cachep, objp);
  24.594          if (cachep->flags & SLAB_RED_ZONE) {
  24.595              if (*((unsigned long*)(objp)) != RED_MAGIC1)
  24.596                  BUG();
  24.597 @@ -1054,9 +1054,9 @@ static inline void kmem_cache_init_objs(
  24.598  
  24.599  /*
  24.600   * Grow (by 1) the number of slabs within a cache.  This is called by
  24.601 - * kmem_cache_alloc() when there are no active objs left in a cache.
  24.602 + * xmem_cache_alloc() when there are no active objs left in a cache.
  24.603   */
  24.604 -static int kmem_cache_grow(kmem_cache_t * cachep)
  24.605 +static int xmem_cache_grow(xmem_cache_t * cachep)
  24.606  {
  24.607      slab_t	*slabp;
  24.608      struct pfn_info	*page; unsigned int i;
  24.609 @@ -1086,16 +1086,16 @@ static int kmem_cache_grow(kmem_cache_t 
  24.610       * held, but the incrementing c_growing prevents this
  24.611       * cache from being reaped or shrunk.
  24.612       * Note: The cache could be selected in for reaping in
  24.613 -     * kmem_cache_reap(), but when the final test is made the
  24.614 +     * xmem_cache_reap(), but when the final test is made the
  24.615       * growing value will be seen.
  24.616       */
  24.617  
  24.618      /* Get mem for the objs. */
  24.619 -    if (!(objp = kmem_getpages(cachep)))
  24.620 +    if (!(objp = xmem_getpages(cachep)))
  24.621          goto failed;
  24.622  
  24.623      /* Get slab management. */
  24.624 -    if (!(slabp = kmem_cache_slabmgmt(cachep, objp, offset, 0)))
  24.625 +    if (!(slabp = xmem_cache_slabmgmt(cachep, objp, offset, 0)))
  24.626          goto opps1;
  24.627  
  24.628      /* Nasty!!!!!! I hope this is OK. */
  24.629 @@ -1108,7 +1108,7 @@ static int kmem_cache_grow(kmem_cache_t 
  24.630          page++;
  24.631      } while (--i);
  24.632  
  24.633 -    kmem_cache_init_objs(cachep, slabp, ctor_flags);
  24.634 +    xmem_cache_init_objs(cachep, slabp, ctor_flags);
  24.635  
  24.636      spin_lock_irqsave(&cachep->spinlock, save_flags);
  24.637      cachep->growing--;
  24.638 @@ -1121,7 +1121,7 @@ static int kmem_cache_grow(kmem_cache_t 
  24.639      spin_unlock_irqrestore(&cachep->spinlock, save_flags);
  24.640      return 1;
  24.641   opps1:
  24.642 -    kmem_freepages(cachep, objp);
  24.643 +    xmem_freepages(cachep, objp);
  24.644   failed:
  24.645      spin_lock_irqsave(&cachep->spinlock, save_flags);
  24.646      cachep->growing--;
  24.647 @@ -1137,7 +1137,7 @@ static int kmem_cache_grow(kmem_cache_t 
  24.648   */
  24.649  
  24.650  #if DEBUG
  24.651 -static int kmem_extra_free_checks (kmem_cache_t * cachep,
  24.652 +static int xmem_extra_free_checks (xmem_cache_t * cachep,
  24.653                                     slab_t *slabp, void * objp)
  24.654  {
  24.655      int i;
  24.656 @@ -1157,7 +1157,7 @@ static int kmem_extra_free_checks (kmem_
  24.657  }
  24.658  #endif
  24.659  
  24.660 -static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,
  24.661 +static inline void * xmem_cache_alloc_one_tail (xmem_cache_t *cachep,
  24.662  						slab_t *slabp)
  24.663  {
  24.664      void *objp;
  24.665 @@ -1177,7 +1177,7 @@ static inline void * kmem_cache_alloc_on
  24.666      }
  24.667  #if DEBUG
  24.668      if (cachep->flags & SLAB_POISON)
  24.669 -        if (kmem_check_poison_obj(cachep, objp))
  24.670 +        if (xmem_check_poison_obj(cachep, objp))
  24.671              BUG();
  24.672      if (cachep->flags & SLAB_RED_ZONE) {
  24.673          /* Set alloc red-zone, and check old one. */
  24.674 @@ -1198,7 +1198,7 @@ static inline void * kmem_cache_alloc_on
  24.675   * caller must guarantee synchronization
  24.676   * #define for the goto optimization 8-)
  24.677   */
  24.678 -#define kmem_cache_alloc_one(cachep)				\
  24.679 +#define xmem_cache_alloc_one(cachep)				\
  24.680  ({								\
  24.681  	struct list_head * slabs_partial, * entry;		\
  24.682  	slab_t *slabp;						\
  24.683 @@ -1216,11 +1216,11 @@ static inline void * kmem_cache_alloc_on
  24.684  	}							\
  24.685  								\
  24.686  	slabp = list_entry(entry, slab_t, list);		\
  24.687 -	kmem_cache_alloc_one_tail(cachep, slabp);		\
  24.688 +	xmem_cache_alloc_one_tail(cachep, slabp);		\
  24.689  })
  24.690  
  24.691  #ifdef CONFIG_SMP
  24.692 -void* kmem_cache_alloc_batch(kmem_cache_t* cachep)
  24.693 +void* xmem_cache_alloc_batch(xmem_cache_t* cachep)
  24.694  {
  24.695      int batchcount = cachep->batchcount;
  24.696      cpucache_t* cc = cc_data(cachep);
  24.697 @@ -1244,7 +1244,7 @@ void* kmem_cache_alloc_batch(kmem_cache_
  24.698  
  24.699          slabp = list_entry(entry, slab_t, list);
  24.700          cc_entry(cc)[cc->avail++] =
  24.701 -            kmem_cache_alloc_one_tail(cachep, slabp);
  24.702 +            xmem_cache_alloc_one_tail(cachep, slabp);
  24.703      }
  24.704      spin_unlock(&cachep->spinlock);
  24.705  
  24.706 @@ -1254,7 +1254,7 @@ void* kmem_cache_alloc_batch(kmem_cache_
  24.707  }
  24.708  #endif
  24.709  
  24.710 -static inline void *__kmem_cache_alloc(kmem_cache_t *cachep)
  24.711 +static inline void *__xmem_cache_alloc(xmem_cache_t *cachep)
  24.712  {
  24.713      unsigned long flags;
  24.714      void* objp;
  24.715 @@ -1271,18 +1271,18 @@ static inline void *__kmem_cache_alloc(k
  24.716                  objp = cc_entry(cc)[--cc->avail];
  24.717              } else {
  24.718                  STATS_INC_ALLOCMISS(cachep);
  24.719 -                objp = kmem_cache_alloc_batch(cachep);
  24.720 +                objp = xmem_cache_alloc_batch(cachep);
  24.721                  if (!objp)
  24.722                      goto alloc_new_slab_nolock;
  24.723              }
  24.724          } else {
  24.725              spin_lock(&cachep->spinlock);
  24.726 -            objp = kmem_cache_alloc_one(cachep);
  24.727 +            objp = xmem_cache_alloc_one(cachep);
  24.728              spin_unlock(&cachep->spinlock);
  24.729          }
  24.730      }
  24.731  #else
  24.732 -    objp = kmem_cache_alloc_one(cachep);
  24.733 +    objp = xmem_cache_alloc_one(cachep);
  24.734  #endif
  24.735      local_irq_restore(flags);
  24.736      return objp;
  24.737 @@ -1292,7 +1292,7 @@ static inline void *__kmem_cache_alloc(k
  24.738   alloc_new_slab_nolock:
  24.739  #endif
  24.740      local_irq_restore(flags);
  24.741 -    if (kmem_cache_grow(cachep))
  24.742 +    if (xmem_cache_grow(cachep))
  24.743          /* Someone may have stolen our objs.  Doesn't matter, we'll
  24.744           * just come back here again.
  24.745           */
  24.746 @@ -1310,7 +1310,7 @@ static inline void *__kmem_cache_alloc(k
  24.747  # define CHECK_NR(pg)						\
  24.748  	do {							\
  24.749  		if (!VALID_PAGE(pg)) {				\
  24.750 -			printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
  24.751 +			printk(KERN_ERR "xfree: out of range ptr %lxh.\n", \
  24.752  				(unsigned long)objp);		\
  24.753  			BUG();					\
  24.754  		} \
  24.755 @@ -1319,7 +1319,7 @@ static inline void *__kmem_cache_alloc(k
  24.756  	do {							\
  24.757  		CHECK_NR(page);					\
  24.758  		if (!PageSlab(page)) {				\
  24.759 -			printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
  24.760 +			printk(KERN_ERR "xfree: bad ptr %lxh.\n", \
  24.761  				(unsigned long)objp);		\
  24.762  			BUG();					\
  24.763  		}						\
  24.764 @@ -1329,7 +1329,7 @@ static inline void *__kmem_cache_alloc(k
  24.765  # define CHECK_PAGE(pg)	do { } while (0)
  24.766  #endif
  24.767  
  24.768 -static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
  24.769 +static inline void xmem_cache_free_one(xmem_cache_t *cachep, void *objp)
  24.770  {
  24.771      slab_t* slabp;
  24.772  
  24.773 @@ -1361,8 +1361,8 @@ static inline void kmem_cache_free_one(k
  24.774              BUG();
  24.775      }
  24.776      if (cachep->flags & SLAB_POISON)
  24.777 -        kmem_poison_obj(cachep, objp);
  24.778 -    if (kmem_extra_free_checks(cachep, slabp, objp))
  24.779 +        xmem_poison_obj(cachep, objp);
  24.780 +    if (xmem_extra_free_checks(cachep, slabp, objp))
  24.781          return;
  24.782  #endif
  24.783      {
  24.784 @@ -1389,14 +1389,14 @@ static inline void kmem_cache_free_one(k
  24.785  }
  24.786  
  24.787  #ifdef CONFIG_SMP
  24.788 -static inline void __free_block (kmem_cache_t* cachep,
  24.789 +static inline void __free_block (xmem_cache_t* cachep,
  24.790                                   void** objpp, int len)
  24.791  {
  24.792      for ( ; len > 0; len--, objpp++)
  24.793 -        kmem_cache_free_one(cachep, *objpp);
  24.794 +        xmem_cache_free_one(cachep, *objpp);
  24.795  }
  24.796  
  24.797 -static void free_block (kmem_cache_t* cachep, void** objpp, int len)
  24.798 +static void free_block (xmem_cache_t* cachep, void** objpp, int len)
  24.799  {
  24.800      spin_lock(&cachep->spinlock);
  24.801      __free_block(cachep, objpp, len);
  24.802 @@ -1405,10 +1405,10 @@ static void free_block (kmem_cache_t* ca
  24.803  #endif
  24.804  
  24.805  /*
  24.806 - * __kmem_cache_free
  24.807 + * __xmem_cache_free
  24.808   * called with disabled ints
  24.809   */
  24.810 -static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
  24.811 +static inline void __xmem_cache_free (xmem_cache_t *cachep, void* objp)
  24.812  {
  24.813  #ifdef CONFIG_SMP
  24.814      cpucache_t *cc = cc_data(cachep);
  24.815 @@ -1432,47 +1432,47 @@ static inline void __kmem_cache_free (km
  24.816          free_block(cachep, &objp, 1);
  24.817      }
  24.818  #else
  24.819 -    kmem_cache_free_one(cachep, objp);
  24.820 +    xmem_cache_free_one(cachep, objp);
  24.821  #endif
  24.822  }
  24.823  
  24.824  /**
  24.825 - * kmem_cache_alloc - Allocate an object
  24.826 + * xmem_cache_alloc - Allocate an object
  24.827   * @cachep: The cache to allocate from.
  24.828   *
  24.829   * Allocate an object from this cache.  The flags are only relevant
  24.830   * if the cache has no available objects.
  24.831   */
  24.832 -void *kmem_cache_alloc(kmem_cache_t *cachep)
  24.833 +void *xmem_cache_alloc(xmem_cache_t *cachep)
  24.834  {
  24.835 -    return __kmem_cache_alloc(cachep);
  24.836 +    return __xmem_cache_alloc(cachep);
  24.837  }
  24.838  
  24.839  /**
  24.840 - * kmalloc - allocate memory
  24.841 + * xmalloc - allocate memory
  24.842   * @size: how many bytes of memory are required.
  24.843   */
  24.844 -void *kmalloc(size_t size)
  24.845 +void *xmalloc(size_t size)
  24.846  {
  24.847      cache_sizes_t *csizep = cache_sizes;
  24.848  
  24.849      for (; csizep->cs_size; csizep++) {
  24.850          if (size > csizep->cs_size)
  24.851              continue;
  24.852 -        return __kmem_cache_alloc(csizep->cs_cachep);
  24.853 +        return __xmem_cache_alloc(csizep->cs_cachep);
  24.854      }
  24.855      return NULL;
  24.856  }
  24.857  
  24.858  /**
  24.859 - * kmem_cache_free - Deallocate an object
  24.860 + * xmem_cache_free - Deallocate an object
  24.861   * @cachep: The cache the allocation was from.
  24.862   * @objp: The previously allocated object.
  24.863   *
  24.864   * Free an object which was previously allocated from this
  24.865   * cache.
  24.866   */
  24.867 -void kmem_cache_free (kmem_cache_t *cachep, void *objp)
  24.868 +void xmem_cache_free (xmem_cache_t *cachep, void *objp)
  24.869  {
  24.870      unsigned long flags;
  24.871  #if DEBUG
  24.872 @@ -1482,20 +1482,20 @@ void kmem_cache_free (kmem_cache_t *cach
  24.873  #endif
  24.874  
  24.875      local_irq_save(flags);
  24.876 -    __kmem_cache_free(cachep, objp);
  24.877 +    __xmem_cache_free(cachep, objp);
  24.878      local_irq_restore(flags);
  24.879  }
  24.880  
  24.881  /**
  24.882 - * kfree - free previously allocated memory
  24.883 - * @objp: pointer returned by kmalloc.
  24.884 + * xfree - free previously allocated memory
  24.885 + * @objp: pointer returned by xmalloc.
  24.886   *
  24.887 - * Don't free memory not originally allocated by kmalloc()
  24.888 + * Don't free memory not originally allocated by xmalloc()
  24.889   * or you will run into trouble.
  24.890   */
  24.891 -void kfree (const void *objp)
  24.892 +void xfree (const void *objp)
  24.893  {
  24.894 -    kmem_cache_t *c;
  24.895 +    xmem_cache_t *c;
  24.896      unsigned long flags;
  24.897  
  24.898      if (!objp)
  24.899 @@ -1503,11 +1503,11 @@ void kfree (const void *objp)
  24.900      local_irq_save(flags);
  24.901      CHECK_PAGE(virt_to_page(objp));
  24.902      c = GET_PAGE_CACHE(virt_to_page(objp));
  24.903 -    __kmem_cache_free(c, (void*)objp);
  24.904 +    __xmem_cache_free(c, (void*)objp);
  24.905      local_irq_restore(flags);
  24.906  }
  24.907  
  24.908 -kmem_cache_t *kmem_find_general_cachep(size_t size)
  24.909 +xmem_cache_t *xmem_find_general_cachep(size_t size)
  24.910  {
  24.911      cache_sizes_t *csizep = cache_sizes;
  24.912  
  24.913 @@ -1526,7 +1526,7 @@ kmem_cache_t *kmem_find_general_cachep(s
  24.914  #ifdef CONFIG_SMP
  24.915  
  24.916  /* called with cache_chain_sem acquired.  */
  24.917 -static int kmem_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount)
  24.918 +static int xmem_tune_cpucache (xmem_cache_t* cachep, int limit, int batchcount)
  24.919  {
  24.920      ccupdate_struct_t new;
  24.921      int i;
  24.922 @@ -1548,7 +1548,7 @@ static int kmem_tune_cpucache (kmem_cach
  24.923          for (i = 0; i< smp_num_cpus; i++) {
  24.924              cpucache_t* ccnew;
  24.925  
  24.926 -            ccnew = kmalloc(sizeof(void*)*limit+sizeof(cpucache_t));
  24.927 +            ccnew = xmalloc(sizeof(void*)*limit+sizeof(cpucache_t));
  24.928              if (!ccnew)
  24.929                  goto oom;
  24.930              ccnew->limit = limit;
  24.931 @@ -1570,16 +1570,16 @@ static int kmem_tune_cpucache (kmem_cach
  24.932          local_irq_disable();
  24.933          free_block(cachep, cc_entry(ccold), ccold->avail);
  24.934          local_irq_enable();
  24.935 -        kfree(ccold);
  24.936 +        xfree(ccold);
  24.937      }
  24.938      return 0;
  24.939   oom:
  24.940      for (i--; i >= 0; i--)
  24.941 -        kfree(new.new[cpu_logical_map(i)]);
  24.942 +        xfree(new.new[cpu_logical_map(i)]);
  24.943      return -ENOMEM;
  24.944  }
  24.945  
  24.946 -static void enable_cpucache (kmem_cache_t *cachep)
  24.947 +static void enable_cpucache (xmem_cache_t *cachep)
  24.948  {
  24.949      int err;
  24.950      int limit;
  24.951 @@ -1594,7 +1594,7 @@ static void enable_cpucache (kmem_cache_
  24.952      else
  24.953          limit = 252;
  24.954  
  24.955 -    err = kmem_tune_cpucache(cachep, limit, limit/2);
  24.956 +    err = xmem_tune_cpucache(cachep, limit, limit/2);
  24.957      if (err)
  24.958          printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
  24.959                 cachep->name, -err);
  24.960 @@ -1609,7 +1609,7 @@ static void enable_all_cpucaches (void)
  24.961  
  24.962      p = &cache_cache.next;
  24.963      do {
  24.964 -        kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);
  24.965 +        xmem_cache_t* cachep = list_entry(p, xmem_cache_t, next);
  24.966  
  24.967          enable_cpucache(cachep);
  24.968          p = cachep->next.next;
  24.969 @@ -1620,13 +1620,13 @@ static void enable_all_cpucaches (void)
  24.970  #endif
  24.971  
  24.972  /**
  24.973 - * kmem_cache_reap - Reclaim memory from caches.
  24.974 + * xmem_cache_reap - Reclaim memory from caches.
  24.975   */
  24.976 -int kmem_cache_reap(void)
  24.977 +int xmem_cache_reap(void)
  24.978  {
  24.979      slab_t *slabp;
  24.980 -    kmem_cache_t *searchp;
  24.981 -    kmem_cache_t *best_cachep;
  24.982 +    xmem_cache_t *searchp;
  24.983 +    xmem_cache_t *best_cachep;
  24.984      unsigned int best_pages;
  24.985      unsigned int best_len;
  24.986      unsigned int scan;
  24.987 @@ -1693,14 +1693,14 @@ int kmem_cache_reap(void)
  24.988              best_pages = pages;
  24.989              if (pages >= REAP_PERFECT) {
  24.990                  clock_searchp = list_entry(searchp->next.next,
  24.991 -                                           kmem_cache_t,next);
  24.992 +                                           xmem_cache_t,next);
  24.993                  goto perfect;
  24.994              }
  24.995          }
  24.996      next_unlock:
  24.997          spin_unlock_irq(&searchp->spinlock);
  24.998      next:
  24.999 -        searchp = list_entry(searchp->next.next,kmem_cache_t,next);
 24.1000 +        searchp = list_entry(searchp->next.next,xmem_cache_t,next);
 24.1001      } while (--scan && searchp != clock_searchp);
 24.1002  
 24.1003      clock_searchp = searchp;
 24.1004 @@ -1733,7 +1733,7 @@ int kmem_cache_reap(void)
 24.1005           * cache.
 24.1006           */
 24.1007          spin_unlock_irq(&best_cachep->spinlock);
 24.1008 -        kmem_slab_destroy(best_cachep, slabp);
 24.1009 +        xmem_slab_destroy(best_cachep, slabp);
 24.1010          spin_lock_irq(&best_cachep->spinlock);
 24.1011      }
 24.1012      spin_unlock_irq(&best_cachep->spinlock);
 24.1013 @@ -1762,14 +1762,14 @@ void dump_slabinfo()
 24.1014      down(&cache_chain_sem);
 24.1015      p = &cache_cache.next;
 24.1016      do {
 24.1017 -        kmem_cache_t	*cachep;
 24.1018 +        xmem_cache_t	*cachep;
 24.1019          struct list_head *q;
 24.1020          slab_t		*slabp;
 24.1021          unsigned long	active_objs;
 24.1022          unsigned long	num_objs;
 24.1023          unsigned long	active_slabs = 0;
 24.1024          unsigned long	num_slabs;
 24.1025 -        cachep = list_entry(p, kmem_cache_t, next);
 24.1026 +        cachep = list_entry(p, xmem_cache_t, next);
 24.1027  
 24.1028          spin_lock_irq(&cachep->spinlock);
 24.1029          active_objs = 0;
    25.1 --- a/xen/common/trace.c	Mon Jul 26 18:22:00 2004 +0000
    25.2 +++ b/xen/common/trace.c	Mon Jul 26 20:03:34 2004 +0000
    25.3 @@ -59,7 +59,7 @@ void init_trace_bufs(void)
    25.4      nr_pages = smp_num_cpus * opt_tbuf_size;
    25.5      order    = get_order(nr_pages * PAGE_SIZE);
    25.6      
    25.7 -    if ( (rawbuf = (char *)__get_free_pages(order)) == NULL )
    25.8 +    if ( (rawbuf = (char *)alloc_xenheap_pages(order)) == NULL )
    25.9      {
   25.10          printk("Xen trace buffers: memory allocation failed\n");
   25.11          return;
    26.1 --- a/xen/drivers/char/console.c	Mon Jul 26 18:22:00 2004 +0000
    26.2 +++ b/xen/drivers/char/console.c	Mon Jul 26 20:03:34 2004 +0000
    26.3 @@ -303,7 +303,7 @@ long do_console_io(int cmd, int count, c
    26.4      case CONSOLEIO_write:
    26.5          if ( count > (PAGE_SIZE-1) )
    26.6              count = PAGE_SIZE-1;
    26.7 -        if ( (kbuf = (char *)get_free_page()) == NULL )
    26.8 +        if ( (kbuf = (char *)alloc_xenheap_page()) == NULL )
    26.9              return -ENOMEM;
   26.10          kbuf[count] = '\0';
   26.11          rc = count;
   26.12 @@ -311,7 +311,7 @@ long do_console_io(int cmd, int count, c
   26.13              rc = -EFAULT;
   26.14          else
   26.15              serial_puts(sercon_handle, kbuf);
   26.16 -        free_page((unsigned long)kbuf);
   26.17 +        free_xenheap_page((unsigned long)kbuf);
   26.18          break;
   26.19      case CONSOLEIO_read:
   26.20          rc = 0;
    27.1 --- a/xen/drivers/pci/pci.c	Mon Jul 26 18:22:00 2004 +0000
    27.2 +++ b/xen/drivers/pci/pci.c	Mon Jul 26 20:03:34 2004 +0000
    27.3 @@ -1126,7 +1126,7 @@ static struct pci_bus * __devinit pci_al
    27.4  {
    27.5  	struct pci_bus *b;
    27.6  
    27.7 -	b = kmalloc(sizeof(*b));
    27.8 +	b = xmalloc(sizeof(*b));
    27.9  	if (b) {
   27.10  		memset(b, 0, sizeof(*b));
   27.11  		INIT_LIST_HEAD(&b->children);
   27.12 @@ -1351,7 +1351,7 @@ struct pci_dev * __devinit pci_scan_devi
   27.13  	if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
   27.14  		return NULL;
   27.15  
   27.16 -	dev = kmalloc(sizeof(*dev));
   27.17 +	dev = xmalloc(sizeof(*dev));
   27.18  	if (!dev)
   27.19  		return NULL;
   27.20  
   27.21 @@ -1363,7 +1363,7 @@ struct pci_dev * __devinit pci_scan_devi
   27.22  	   set this higher, assuming the system even supports it.  */
   27.23  	dev->dma_mask = 0xffffffff;
   27.24  	if (pci_setup_device(dev) < 0) {
   27.25 -		kfree(dev);
   27.26 +		xfree(dev);
   27.27  		dev = NULL;
   27.28  	}
   27.29  	return dev;
   27.30 @@ -1431,7 +1431,7 @@ unsigned int __devinit pci_do_scan_bus(s
   27.31  	max = bus->secondary;
   27.32  
   27.33  	/* Create a device template */
   27.34 -	dev0 = kmalloc(sizeof(struct pci_dev));
   27.35 +	dev0 = xmalloc(sizeof(struct pci_dev));
   27.36  	if(!dev0) {
   27.37  	  panic("Out of memory scanning PCI bus!\n");
   27.38  	}
   27.39 @@ -1444,7 +1444,7 @@ unsigned int __devinit pci_do_scan_bus(s
   27.40  		dev0->devfn = devfn;
   27.41  		pci_scan_slot(dev0);
   27.42  	}
   27.43 -	kfree(dev0);
   27.44 +	xfree(dev0);
   27.45  
   27.46  	/*
   27.47  	 * After performing arch-dependent fixup of the bus, look behind
    28.1 --- a/xen/drivers/pci/setup-bus.c	Mon Jul 26 18:22:00 2004 +0000
    28.2 +++ b/xen/drivers/pci/setup-bus.c	Mon Jul 26 20:03:34 2004 +0000
    28.3 @@ -74,7 +74,7 @@ pbus_assign_resources_sorted(struct pci_
    28.4  		pci_assign_resource(list->dev, idx);
    28.5  		tmp = list;
    28.6  		list = list->next;
    28.7 -		kfree(tmp);
    28.8 +		xfree(tmp);
    28.9  	}
   28.10  
   28.11  	return found_vga;
    29.1 --- a/xen/drivers/pci/setup-res.c	Mon Jul 26 18:22:00 2004 +0000
    29.2 +++ b/xen/drivers/pci/setup-res.c	Mon Jul 26 20:03:34 2004 +0000
    29.3 @@ -171,10 +171,10 @@ pdev_sort_resources(struct pci_dev *dev,
    29.4  					ln->res->start;
    29.5  			}
    29.6  			if (r_align > align) {
    29.7 -				tmp = kmalloc(sizeof(*tmp));
    29.8 +				tmp = xmalloc(sizeof(*tmp));
    29.9  				if (!tmp)
   29.10  					panic("pdev_sort_resources(): "
   29.11 -					      "kmalloc() failed!\n");
   29.12 +					      "xmalloc() failed!\n");
   29.13  				tmp->next = ln;
   29.14  				tmp->res = r;
   29.15  				tmp->dev = dev;
    30.1 --- a/xen/include/asm-x86/domain.h	Mon Jul 26 18:22:00 2004 +0000
    30.2 +++ b/xen/include/asm-x86/domain.h	Mon Jul 26 20:03:34 2004 +0000
    30.3 @@ -9,7 +9,7 @@ extern void arch_final_setup_guestos(
    30.4  
    30.5  static inline void free_perdomain_pt(struct domain *d)
    30.6  {
    30.7 -    free_page((unsigned long)d->mm.perdomain_pt);
    30.8 +    free_xenheap_page((unsigned long)d->mm.perdomain_pt);
    30.9  }
   30.10  
   30.11  extern void domain_relinquish_memory(struct domain *d);
    31.1 --- a/xen/include/asm-x86/io.h	Mon Jul 26 18:22:00 2004 +0000
    31.2 +++ b/xen/include/asm-x86/io.h	Mon Jul 26 20:03:34 2004 +0000
    31.3 @@ -12,7 +12,7 @@
    31.4   *
    31.5   *  The returned physical address is the physical (CPU) mapping for
    31.6   *  the memory address given. It is only valid to use this function on
    31.7 - *  addresses directly mapped or allocated via kmalloc.
    31.8 + *  addresses directly mapped or allocated via xmalloc.
    31.9   *
   31.10   *  This function does not give bus mappings for DMA transfers. In
   31.11   *  almost all conceivable cases a device driver should not be using
    32.1 --- a/xen/include/asm-x86/shadow.h	Mon Jul 26 18:22:00 2004 +0000
    32.2 +++ b/xen/include/asm-x86/shadow.h	Mon Jul 26 20:03:34 2004 +0000
    32.3 @@ -524,7 +524,7 @@ static inline void set_shadow_status( st
    32.4          SH_LOG("allocate more shadow hashtable blocks");
    32.5  
    32.6          // we need to allocate more space
    32.7 -        extra = kmalloc(sizeof(void*) + (shadow_ht_extra_size * 
    32.8 +        extra = xmalloc(sizeof(void*) + (shadow_ht_extra_size * 
    32.9                                           sizeof(struct shadow_status)));
   32.10  
   32.11          if( ! extra ) BUG(); // should be more graceful here....
    33.1 --- a/xen/include/asm-x86/types.h	Mon Jul 26 18:22:00 2004 +0000
    33.2 +++ b/xen/include/asm-x86/types.h	Mon Jul 26 20:03:34 2004 +0000
    33.3 @@ -56,6 +56,6 @@ typedef unsigned long size_t;
    33.4  typedef unsigned long dma_addr_t;
    33.5  typedef u64 dma64_addr_t;
    33.6  
    33.7 -typedef unsigned short kmem_bufctl_t;
    33.8 +typedef unsigned short xmem_bufctl_t;
    33.9  
   33.10  #endif
    34.1 --- a/xen/include/xen/mm.h	Mon Jul 26 18:22:00 2004 +0000
    34.2 +++ b/xen/include/xen/mm.h	Mon Jul 26 20:03:34 2004 +0000
    34.3 @@ -4,12 +4,10 @@
    34.4  
    34.5  /* page_alloc.c */
    34.6  void init_page_allocator(unsigned long min, unsigned long max);
    34.7 -unsigned long __get_free_pages(int order);
    34.8 -void __free_pages(unsigned long p, int order);
    34.9 -#define get_free_page()   (__get_free_pages(0))
   34.10 -#define __get_free_page() (__get_free_pages(0))
   34.11 -#define free_pages(_p,_o) (__free_pages(_p,_o))
   34.12 -#define free_page(_p)     (__free_pages(_p,0))
   34.13 +unsigned long alloc_xenheap_pages(int order);
   34.14 +void free_xenheap_pages(unsigned long p, int order);
   34.15 +#define alloc_xenheap_page() (alloc_xenheap_pages(0))
   34.16 +#define free_xenheap_page(_p) (free_xenheap_pages(_p,0))
   34.17  
   34.18  #include <asm/mm.h>
   34.19  
    35.1 --- a/xen/include/xen/pci.h	Mon Jul 26 18:22:00 2004 +0000
    35.2 +++ b/xen/include/xen/pci.h	Mon Jul 26 20:03:34 2004 +0000
    35.3 @@ -667,7 +667,7 @@ unsigned int pci_do_scan_bus(struct pci_
    35.4  struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr);
    35.5  
    35.6  #if 0
    35.7 -/* kmem_cache style wrapper around pci_alloc_consistent() */
    35.8 +/* xmem_cache style wrapper around pci_alloc_consistent() */
    35.9  struct pci_pool *pci_pool_create (const char *name, struct pci_dev *dev,
   35.10  		size_t size, size_t align, size_t allocation, int flags);
   35.11  void pci_pool_destroy (struct pci_pool *pool);
    36.1 --- a/xen/include/xen/slab.h	Mon Jul 26 18:22:00 2004 +0000
    36.2 +++ b/xen/include/xen/slab.h	Mon Jul 26 20:03:34 2004 +0000
    36.3 @@ -6,12 +6,12 @@
    36.4  #ifndef __SLAB_H__
    36.5  #define __SLAB_H__
    36.6  
    36.7 -typedef struct kmem_cache_s kmem_cache_t;
    36.8 +typedef struct xmem_cache_s xmem_cache_t;
    36.9  
   36.10  #include <xen/mm.h>
   36.11  #include <xen/cache.h>
   36.12  
   36.13 -/* Flags to pass to kmem_cache_create(). */
   36.14 +/* Flags to pass to xmem_cache_create(). */
   36.15  /* NB. The first 3 are only valid when built with SLAB_DEBUG_SUPPORT. */
   36.16  #define SLAB_DEBUG_INITIAL      0x00000200UL    /* Call constructor */
   36.17  #define SLAB_RED_ZONE           0x00000400UL    /* Red zone objs in a cache */
   36.18 @@ -24,23 +24,23 @@ typedef struct kmem_cache_s kmem_cache_t
   36.19  #define SLAB_CTOR_ATOMIC        0x002UL /* tell cons. it can't sleep */
   36.20  #define SLAB_CTOR_VERIFY        0x004UL /* tell cons. it's a verify call */
   36.21  
   36.22 -extern void kmem_cache_init(void);
   36.23 -extern void kmem_cache_sizes_init(unsigned long);
   36.24 +extern void xmem_cache_init(void);
   36.25 +extern void xmem_cache_sizes_init(unsigned long);
   36.26  
   36.27 -extern kmem_cache_t *kmem_find_general_cachep(size_t);
   36.28 -extern kmem_cache_t *kmem_cache_create(
   36.29 +extern xmem_cache_t *xmem_find_general_cachep(size_t);
   36.30 +extern xmem_cache_t *xmem_cache_create(
   36.31      const char *, size_t, size_t, unsigned long,
   36.32 -    void (*)(void *, kmem_cache_t *, unsigned long),
   36.33 -    void (*)(void *, kmem_cache_t *, unsigned long));
   36.34 -extern int kmem_cache_destroy(kmem_cache_t *);
   36.35 -extern int kmem_cache_shrink(kmem_cache_t *);
   36.36 -extern void *kmem_cache_alloc(kmem_cache_t *);
   36.37 -extern void kmem_cache_free(kmem_cache_t *, void *);
   36.38 +    void (*)(void *, xmem_cache_t *, unsigned long),
   36.39 +    void (*)(void *, xmem_cache_t *, unsigned long));
   36.40 +extern int xmem_cache_destroy(xmem_cache_t *);
   36.41 +extern int xmem_cache_shrink(xmem_cache_t *);
   36.42 +extern void *xmem_cache_alloc(xmem_cache_t *);
   36.43 +extern void xmem_cache_free(xmem_cache_t *, void *);
   36.44  
   36.45 -extern void *kmalloc(size_t);
   36.46 -extern void kfree(const void *);
   36.47 +extern void *xmalloc(size_t);
   36.48 +extern void xfree(const void *);
   36.49  
   36.50 -extern int kmem_cache_reap(void);
   36.51 +extern int xmem_cache_reap(void);
   36.52  
   36.53  extern void dump_slabinfo();
   36.54