ia64/xen-unstable

changeset 3620:0ef6e8e6e85d

bitkeeper revision 1.1159.212.71 (4200f0afX_JumfbEHQex6TdFENULMQ)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
author iap10@labyrinth.cl.cam.ac.uk
date Wed Feb 02 15:24:31 2005 +0000 (2005-02-02)
parents 36fa617b88a7 a4b03d935138
children bf2c38625b39 f89816eaeaad
files .rootkeys xen/Makefile xen/arch/x86/domain.c xen/arch/x86/irq.c xen/arch/x86/microcode.c xen/arch/x86/mtrr/generic.c xen/arch/x86/mtrr/main.c xen/arch/x86/pci-pc.c xen/arch/x86/setup.c xen/arch/x86/shadow.c xen/arch/x86/smpboot.c xen/common/ac_timer.c xen/common/dom0_ops.c xen/common/domain.c xen/common/event_channel.c xen/common/grant_table.c xen/common/malloc.c xen/common/page_alloc.c xen/common/physdev.c xen/common/resource.c xen/common/sched_atropos.c xen/common/sched_bvt.c xen/common/sched_rrobin.c xen/common/slab.c xen/drivers/pci/pci.c xen/drivers/pci/setup-res.c xen/include/asm-x86/processor.h xen/include/asm-x86/shadow.h xen/include/xen/domain.h xen/include/xen/lib.h xen/include/xen/list.h xen/include/xen/slab.h
line diff
     1.1 --- a/.rootkeys	Wed Feb 02 09:38:32 2005 +0000
     1.2 +++ b/.rootkeys	Wed Feb 02 15:24:31 2005 +0000
     1.3 @@ -923,6 +923,7 @@ 41262590gGIOn-1pvF5KpUu8Wb6_JA xen/commo
     1.4  3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen/common/kernel.c
     1.5  3e4cd9d8LAAghUY0hNIK72uc2ch_Nw xen/common/keyhandler.c
     1.6  3ddb79bduhSEZI8xa7IbGQCpap5y2A xen/common/lib.c
     1.7 +4200cf14XGr26_PCC8NxREDhr7Hk5Q xen/common/malloc.c
     1.8  41a61536SZbR6cj1ukWTb0DYU-vz9w xen/common/multicall.c
     1.9  3ddb79bdD4SLmmdMD7yLW5HcUWucXw xen/common/page_alloc.c
    1.10  3e54c38dkHAev597bPr71-hGzTdocg xen/common/perfc.c
     2.1 --- a/xen/Makefile	Wed Feb 02 09:38:32 2005 +0000
     2.2 +++ b/xen/Makefile	Wed Feb 02 15:24:31 2005 +0000
     2.3 @@ -77,7 +77,7 @@ include/xen/banner.h: tools/figlet/figle
     2.4  	tools/figlet/figlet -d tools/figlet Xen $(XEN_VERSION).$(XEN_SUBVERSION)$(XEN_EXTRAVERSION) > $@.new
     2.5  	@mv -f $@.new $@
     2.6  
     2.7 -include/asm-$(TARGET_ARCH)/asm-offsets.h: arch/$(TARGET_ARCH)/asm-offsets.s
     2.8 +include/asm-$(TARGET_ARCH)/asm-offsets.h: arch/$(TARGET_ARCH)/asm-offsets.s $(HDRS)
     2.9  	@(set -e; \
    2.10  	  echo "/*"; \
    2.11  	  echo " * DO NOT MODIFY."; \
     3.1 --- a/xen/arch/x86/domain.c	Wed Feb 02 09:38:32 2005 +0000
     3.2 +++ b/xen/arch/x86/domain.c	Wed Feb 02 15:24:31 2005 +0000
     3.3 @@ -223,42 +223,24 @@ void dump_pageframe_info(struct domain *
     3.4             page->u.inuse.type_info);
     3.5  }
     3.6  
     3.7 -xmem_cache_t *domain_struct_cachep;
     3.8 -xmem_cache_t *exec_domain_struct_cachep;
     3.9 -
    3.10 -void __init domain_startofday(void)
    3.11 -{
    3.12 -    domain_struct_cachep = xmem_cache_create(
    3.13 -        "domain_cache", sizeof(struct domain),
    3.14 -        0, SLAB_HWCACHE_ALIGN, NULL, NULL);
    3.15 -    if ( domain_struct_cachep == NULL )
    3.16 -        panic("No slab cache for domain structs.");
    3.17 -
    3.18 -    exec_domain_struct_cachep = xmem_cache_create(
    3.19 -        "exec_dom_cache", sizeof(struct exec_domain),
    3.20 -        0, SLAB_HWCACHE_ALIGN, NULL, NULL);
    3.21 -    if ( exec_domain_struct_cachep == NULL )
    3.22 -        BUG();
    3.23 -}
    3.24 -
    3.25  struct domain *arch_alloc_domain_struct(void)
    3.26  {
    3.27 -    return xmem_cache_alloc(domain_struct_cachep);
    3.28 +    return xmalloc(struct domain);
    3.29  }
    3.30  
    3.31  void arch_free_domain_struct(struct domain *d)
    3.32  {
    3.33 -    xmem_cache_free(domain_struct_cachep, d);
    3.34 +    xfree(d);
    3.35  }
    3.36  
    3.37  struct exec_domain *arch_alloc_exec_domain_struct(void)
    3.38  {
    3.39 -    return xmem_cache_alloc(exec_domain_struct_cachep);
    3.40 +    return xmalloc(struct exec_domain);
    3.41  }
    3.42  
    3.43  void arch_free_exec_domain_struct(struct exec_domain *ed)
    3.44  {
    3.45 -    xmem_cache_free(exec_domain_struct_cachep, ed);
    3.46 +    xfree(ed);
    3.47  }
    3.48  
    3.49  void free_perdomain_pt(struct domain *d)
     4.1 --- a/xen/arch/x86/irq.c	Wed Feb 02 09:38:32 2005 +0000
     4.2 +++ b/xen/arch/x86/irq.c	Wed Feb 02 15:24:31 2005 +0000
     4.3 @@ -260,7 +260,7 @@ int pirq_guest_bind(struct exec_domain *
     4.4              goto out;
     4.5          }
     4.6  
     4.7 -        action = xmalloc(sizeof(irq_guest_action_t));
     4.8 +        action = xmalloc(irq_guest_action_t);
     4.9          if ( (desc->action = (struct irqaction *)action) == NULL )
    4.10          {
    4.11              DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
     5.1 --- a/xen/arch/x86/microcode.c	Wed Feb 02 09:38:32 2005 +0000
     5.2 +++ b/xen/arch/x86/microcode.c	Wed Feb 02 15:24:31 2005 +0000
     5.3 @@ -84,7 +84,7 @@
     5.4  #define DECLARE_MUTEX(_m) spinlock_t _m = SPIN_LOCK_UNLOCKED
     5.5  #define down(_m) spin_lock(_m)
     5.6  #define up(_m) spin_unlock(_m)
     5.7 -#define vmalloc(_s) xmalloc(_s)
     5.8 +#define vmalloc(_s) _xmalloc(_s)
     5.9  #define vfree(_p) xfree(_p)
    5.10  #define num_online_cpus() smp_num_cpus
    5.11  static inline int on_each_cpu(
     6.1 --- a/xen/arch/x86/mtrr/generic.c	Wed Feb 02 09:38:32 2005 +0000
     6.2 +++ b/xen/arch/x86/mtrr/generic.c	Wed Feb 02 15:24:31 2005 +0000
     6.3 @@ -52,7 +52,8 @@ void __init get_mtrr_state(void)
     6.4  	unsigned lo, dummy;
     6.5  
     6.6  	if (!mtrr_state.var_ranges) {
     6.7 -		mtrr_state.var_ranges = xmalloc(num_var_ranges * sizeof (struct mtrr_var_range));
     6.8 +		mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range,
     6.9 +						  num_var_ranges);
    6.10  		if (!mtrr_state.var_ranges)
    6.11  			return;
    6.12  	} 
     7.1 --- a/xen/arch/x86/mtrr/main.c	Wed Feb 02 09:38:32 2005 +0000
     7.2 +++ b/xen/arch/x86/mtrr/main.c	Wed Feb 02 15:24:31 2005 +0000
     7.3 @@ -136,8 +136,7 @@ static void __init init_table(void)
     7.4  	int i, max;
     7.5  
     7.6  	max = num_var_ranges;
     7.7 -	if ((usage_table = xmalloc(max * sizeof *usage_table))
     7.8 -	    == NULL) {
     7.9 +	if ((usage_table = xmalloc_array(unsigned int, max)) == NULL) {
    7.10  		printk(KERN_ERR "mtrr: could not allocate\n");
    7.11  		return;
    7.12  	}
     8.1 --- a/xen/arch/x86/pci-pc.c	Wed Feb 02 09:38:32 2005 +0000
     8.2 +++ b/xen/arch/x86/pci-pc.c	Wed Feb 02 15:24:31 2005 +0000
     8.3 @@ -1036,7 +1036,7 @@ struct irq_routing_table * __devinit pci
     8.4  	if (ret & 0xff00)
     8.5  		printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
     8.6  	else if (opt.size) {
     8.7 -		rt = xmalloc(sizeof(struct irq_routing_table) + opt.size);
     8.8 +		rt = _xmalloc(sizeof(struct irq_routing_table) + opt.size);
     8.9  		if (rt) {
    8.10  			memset(rt, 0, sizeof(struct irq_routing_table));
    8.11  			rt->size = opt.size + sizeof(struct irq_routing_table);
     9.1 --- a/xen/arch/x86/setup.c	Wed Feb 02 09:38:32 2005 +0000
     9.2 +++ b/xen/arch/x86/setup.c	Wed Feb 02 15:24:31 2005 +0000
     9.3 @@ -602,8 +602,6 @@ void __init __start_xen(multiboot_info_t
     9.4      xmem_cache_init();
     9.5      xmem_cache_sizes_init(max_page);
     9.6  
     9.7 -    domain_startofday();
     9.8 -
     9.9      start_of_day();
    9.10  
    9.11      grant_table_init();
    10.1 --- a/xen/arch/x86/shadow.c	Wed Feb 02 09:38:32 2005 +0000
    10.2 +++ b/xen/arch/x86/shadow.c	Wed Feb 02 15:24:31 2005 +0000
    10.3 @@ -176,8 +176,7 @@ int shadow_mode_enable(struct domain *p,
    10.4  {
    10.5      struct mm_struct *m = &p->exec_domain[0]->mm;
    10.6  
    10.7 -    m->shadow_ht = xmalloc(
    10.8 -        shadow_ht_buckets * sizeof(struct shadow_status));
    10.9 +    m->shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
   10.10      if ( m->shadow_ht == NULL )
   10.11          goto nomem;
   10.12      memset(m->shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status));
   10.13 @@ -186,7 +185,7 @@ int shadow_mode_enable(struct domain *p,
   10.14      {
   10.15          m->shadow_dirty_bitmap_size = (p->max_pages + 63) & ~63;
   10.16          m->shadow_dirty_bitmap = 
   10.17 -            xmalloc(m->shadow_dirty_bitmap_size/8);
   10.18 +            _xmalloc(m->shadow_dirty_bitmap_size/8);
   10.19          if ( m->shadow_dirty_bitmap == NULL )
   10.20          {
   10.21              m->shadow_dirty_bitmap_size = 0;
    11.1 --- a/xen/arch/x86/smpboot.c	Wed Feb 02 09:38:32 2005 +0000
    11.2 +++ b/xen/arch/x86/smpboot.c	Wed Feb 02 15:24:31 2005 +0000
    11.3 @@ -409,7 +409,7 @@ void __init start_secondary(void)
    11.4       * At this point, boot CPU has fully initialised the IDT. It is
    11.5       * now safe to make ourselves a private copy.
    11.6       */
    11.7 -    idt_tables[cpu] = xmalloc(IDT_ENTRIES*8);
    11.8 +    idt_tables[cpu] = xmalloc_array(struct desc_struct, IDT_ENTRIES);
    11.9      memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*8);
   11.10      *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*8)-1;
   11.11      *(unsigned long  *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
    12.1 --- a/xen/common/ac_timer.c	Wed Feb 02 09:38:32 2005 +0000
    12.2 +++ b/xen/common/ac_timer.c	Wed Feb 02 15:24:31 2005 +0000
    12.3 @@ -130,7 +130,7 @@ static int add_entry(struct ac_timer **h
    12.4      if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
    12.5      {
    12.6          int i, limit = (GET_HEAP_LIMIT(heap)+1) << 1;
    12.7 -        struct ac_timer **new_heap = xmalloc(limit*sizeof(struct ac_timer *));
    12.8 +        struct ac_timer **new_heap = xmalloc_array(struct ac_timer *, limit);
    12.9          if ( new_heap == NULL ) BUG();
   12.10          memcpy(new_heap, heap, (limit>>1)*sizeof(struct ac_timer *));
   12.11          for ( i = 0; i < smp_num_cpus; i++ )
   12.12 @@ -278,8 +278,7 @@ void __init ac_timer_init(void)
   12.13  
   12.14      for ( i = 0; i < smp_num_cpus; i++ )
   12.15      {
   12.16 -        ac_timers[i].heap = xmalloc(
   12.17 -            (DEFAULT_HEAP_LIMIT+1) * sizeof(struct ac_timer *));
   12.18 +        ac_timers[i].heap = xmalloc_array(struct ac_timer *, DEFAULT_HEAP_LIMIT+1);
   12.19          if ( ac_timers[i].heap == NULL ) BUG();
   12.20          SET_HEAP_SIZE(ac_timers[i].heap, 0);
   12.21          SET_HEAP_LIMIT(ac_timers[i].heap, DEFAULT_HEAP_LIMIT);
    13.1 --- a/xen/common/dom0_ops.c	Wed Feb 02 09:38:32 2005 +0000
    13.2 +++ b/xen/common/dom0_ops.c	Wed Feb 02 15:24:31 2005 +0000
    13.3 @@ -383,7 +383,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    13.4  
    13.5          if ( op->u.getdomaininfo.ctxt != NULL )
    13.6          {
    13.7 -            if ( (c = xmalloc(sizeof(*c))) == NULL )
    13.8 +            if ( (c = xmalloc(full_execution_context_t)) == NULL )
    13.9              {
   13.10                  ret = -ENOMEM;
   13.11                  put_domain(d);
    14.1 --- a/xen/common/domain.c	Wed Feb 02 09:38:32 2005 +0000
    14.2 +++ b/xen/common/domain.c	Wed Feb 02 15:24:31 2005 +0000
    14.3 @@ -264,7 +264,7 @@ int final_setup_guestos(struct domain *p
    14.4      int rc = 0;
    14.5      full_execution_context_t *c;
    14.6  
    14.7 -    if ( (c = xmalloc(sizeof(*c))) == NULL )
    14.8 +    if ( (c = xmalloc(full_execution_context_t)) == NULL )
    14.9          return -ENOMEM;
   14.10  
   14.11      if ( test_bit(DF_CONSTRUCTED, &p->d_flags) )
   14.12 @@ -311,7 +311,7 @@ long do_boot_vcpu(unsigned long vcpu, fu
   14.13      if ( alloc_exec_domain_struct(d, vcpu) == NULL )
   14.14          return -ENOMEM;
   14.15  
   14.16 -    if ( (c = xmalloc(sizeof(*c))) == NULL )
   14.17 +    if ( (c = xmalloc(full_execution_context_t)) == NULL )
   14.18      {
   14.19          rc = -ENOMEM;
   14.20          goto out;
    15.1 --- a/xen/common/event_channel.c	Wed Feb 02 09:38:32 2005 +0000
    15.2 +++ b/xen/common/event_channel.c	Wed Feb 02 15:24:31 2005 +0000
    15.3 @@ -54,7 +54,7 @@ static int get_free_port(struct exec_dom
    15.4          else
    15.5              max = port + EVENT_CHANNELS_SPREAD;
    15.6          
    15.7 -        chn = xmalloc(max * sizeof(event_channel_t));
    15.8 +        chn = xmalloc_array(event_channel_t, max);
    15.9          if ( unlikely(chn == NULL) )
   15.10              return -ENOMEM;
   15.11  
    16.1 --- a/xen/common/grant_table.c	Wed Feb 02 09:38:32 2005 +0000
    16.2 +++ b/xen/common/grant_table.c	Wed Feb 02 15:24:31 2005 +0000
    16.3 @@ -565,7 +565,7 @@ grant_table_create(
    16.4      grant_table_t *t;
    16.5      int            i;
    16.6  
    16.7 -    if ( (t = xmalloc(sizeof(*t))) == NULL )
    16.8 +    if ( (t = xmalloc(grant_table_t)) == NULL )
    16.9          goto no_mem;
   16.10  
   16.11      /* Simple stuff. */
   16.12 @@ -573,8 +573,8 @@ grant_table_create(
   16.13      spin_lock_init(&t->lock);
   16.14  
   16.15      /* Active grant table. */
   16.16 -    if ( (t->active = xmalloc(sizeof(active_grant_entry_t) * 
   16.17 -                              NR_GRANT_ENTRIES)) == NULL )
   16.18 +    if ( (t->active = xmalloc_array(active_grant_entry_t, NR_GRANT_ENTRIES))
   16.19 +	 == NULL )
   16.20          goto no_mem;
   16.21      memset(t->active, 0, sizeof(active_grant_entry_t) * NR_GRANT_ENTRIES);
   16.22  
    17.1 --- a/xen/common/physdev.c	Wed Feb 02 09:38:32 2005 +0000
    17.2 +++ b/xen/common/physdev.c	Wed Feb 02 15:24:31 2005 +0000
    17.3 @@ -98,7 +98,7 @@ static void add_dev_to_task(struct domai
    17.4          return;
    17.5      }
    17.6  
    17.7 -    if ( (pdev = xmalloc(sizeof(phys_dev_t))) == NULL )
    17.8 +    if ( (pdev = xmalloc(phys_dev_t)) == NULL )
    17.9      {
   17.10          INFO("Error allocating pdev structure.\n");
   17.11          return;
   17.12 @@ -174,7 +174,7 @@ int physdev_pci_access_modify(
   17.13  
   17.14      if ( ed->thread.io_bitmap == NULL )
   17.15      {
   17.16 -        if ( (ed->thread.io_bitmap = xmalloc(IOBMP_BYTES)) == NULL )
   17.17 +        if ( (ed->thread.io_bitmap = xmalloc_array(u8, IOBMP_BYTES)) == NULL )
   17.18          {
   17.19              rc = -ENOMEM;
   17.20              goto out;
   17.21 @@ -765,7 +765,7 @@ void physdev_init_dom0(struct domain *p)
   17.22          if ( (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) &&
   17.23               (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) )
   17.24              continue;
   17.25 -        pdev = xmalloc(sizeof(phys_dev_t));
   17.26 +        pdev = xmalloc(phys_dev_t);
   17.27          pdev->dev = dev;
   17.28          pdev->flags = ACC_WRITE;
   17.29          pdev->state = 0;
    18.1 --- a/xen/common/resource.c	Wed Feb 02 09:38:32 2005 +0000
    18.2 +++ b/xen/common/resource.c	Wed Feb 02 15:24:31 2005 +0000
    18.3 @@ -220,7 +220,7 @@ int allocate_resource(struct resource *r
    18.4   */
    18.5  struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name)
    18.6  {
    18.7 -	struct resource *res = xmalloc(sizeof(*res));
    18.8 +	struct resource *res = xmalloc(struct resource);
    18.9  
   18.10  	if (res) {
   18.11  		memset(res, 0, sizeof(*res));
    19.1 --- a/xen/common/sched_atropos.c	Wed Feb 02 09:38:32 2005 +0000
    19.2 +++ b/xen/common/sched_atropos.c	Wed Feb 02 15:24:31 2005 +0000
    19.3 @@ -69,8 +69,6 @@ struct at_cpu_info
    19.4  
    19.5  static void at_dump_cpu_state(int cpu);
    19.6  
    19.7 -static xmem_cache_t *dom_info_cache;
    19.8 -
    19.9  static inline void __add_to_runqueue_head(struct domain *d)
   19.10  {
   19.11      list_add(RUNLIST(d), RUNQ(d->processor));
   19.12 @@ -173,7 +171,7 @@ static int at_alloc_task(struct domain *
   19.13  {
   19.14      ASSERT(p != NULL);
   19.15      
   19.16 -    p->sched_priv = xmem_cache_alloc(dom_info_cache);
   19.17 +    p->sched_priv = xmalloc(struct at_dom_info);
   19.18      if ( p->sched_priv == NULL )
   19.19          return -1;
   19.20      
   19.21 @@ -558,10 +556,6 @@ static int at_init_scheduler()
   19.22          INIT_LIST_HEAD(RUNQ(i));
   19.23      }
   19.24  
   19.25 -    dom_info_cache = xmem_cache_create("Atropos dom info",
   19.26 -                                       sizeof(struct at_dom_info),
   19.27 -                                       0, 0, NULL, NULL);
   19.28 -
   19.29      return 0;
   19.30  }
   19.31  
   19.32 @@ -649,7 +643,7 @@ static int at_adjdom(struct domain *p, s
   19.33  /* free memory associated with a task */
   19.34  static void at_free_task(struct domain *p)
   19.35  {
   19.36 -    xmem_cache_free( dom_info_cache, DOM_INFO(p) );
   19.37 +    xfree( DOM_INFO(p) );
   19.38  }
   19.39  
   19.40  
    20.1 --- a/xen/common/sched_bvt.c	Wed Feb 02 09:38:32 2005 +0000
    20.2 +++ b/xen/common/sched_bvt.c	Wed Feb 02 15:24:31 2005 +0000
    20.3 @@ -71,8 +71,6 @@ struct bvt_cpu_info
    20.4  #define TIME_SLOP      (s32)MICROSECS(50)     /* allow time to slip a bit */
    20.5  static s32 ctx_allow = (s32)MILLISECS(5);     /* context switch allowance */
    20.6  
    20.7 -static xmem_cache_t *dom_info_cache;
    20.8 -
    20.9  static inline void __add_to_runqueue_head(struct exec_domain *d)
   20.10  {
   20.11      list_add(RUNLIST(d), RUNQUEUE(d->processor));
   20.12 @@ -173,7 +171,7 @@ int bvt_alloc_task(struct exec_domain *e
   20.13  {
   20.14      struct domain *d = ed->domain;
   20.15      if ( (d->sched_priv == NULL) ) {
   20.16 -        if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
   20.17 +        if ( (d->sched_priv = xmalloc(struct bvt_dom_info)) == NULL )
   20.18              return -1;
   20.19          memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
   20.20      }
   20.21 @@ -295,7 +293,7 @@ static void bvt_sleep(struct exec_domain
   20.22  void bvt_free_task(struct domain *d)
   20.23  {
   20.24      ASSERT(d->sched_priv != NULL);
   20.25 -    xmem_cache_free(dom_info_cache, d->sched_priv);
   20.26 +    xfree(d->sched_priv);
   20.27  }
   20.28  
   20.29  /* Control the scheduler. */
   20.30 @@ -557,7 +555,7 @@ int bvt_init_scheduler()
   20.31  
   20.32      for ( i = 0; i < NR_CPUS; i++ )
   20.33      {
   20.34 -        schedule_data[i].sched_priv = xmalloc(sizeof(struct bvt_cpu_info));
   20.35 +        schedule_data[i].sched_priv = xmalloc(struct bvt_cpu_info);
   20.36         
   20.37          if ( schedule_data[i].sched_priv == NULL )
   20.38          {
   20.39 @@ -570,14 +568,6 @@ int bvt_init_scheduler()
   20.40          CPU_SVT(i) = 0; /* XXX do I really need to do this? */
   20.41      }
   20.42  
   20.43 -    dom_info_cache = xmem_cache_create(
   20.44 -        "BVT dom info", sizeof(struct bvt_dom_info), 0, 0, NULL, NULL);
   20.45 -    if ( dom_info_cache == NULL )
   20.46 -    {
   20.47 -        printk("BVT: Failed to allocate domain info SLAB cache");
   20.48 -        return -1;
   20.49 -    }
   20.50 -
   20.51      return 0;
   20.52  }
   20.53  
    21.1 --- a/xen/common/sched_rrobin.c	Wed Feb 02 09:38:32 2005 +0000
    21.2 +++ b/xen/common/sched_rrobin.c	Wed Feb 02 15:24:31 2005 +0000
    21.3 @@ -27,8 +27,6 @@ struct rrobin_dom_info
    21.4  #define RUNLIST(d)      ((struct list_head *)&(RR_INFO(d)->run_list))
    21.5  #define RUNQUEUE(cpu)   RUNLIST(schedule_data[cpu].idle)
    21.6  
    21.7 -static xmem_cache_t *dom_info_cache;
    21.8 -
    21.9  static inline void __add_to_runqueue_head(struct domain *d)
   21.10  {
   21.11      list_add(RUNLIST(d), RUNQUEUE(d->processor));
   21.12 @@ -59,21 +57,12 @@ static int rr_init_scheduler()
   21.13      for ( i = 0; i < NR_CPUS; i++ )
   21.14          INIT_LIST_HEAD(RUNQUEUE(i));
   21.15     
   21.16 -    dom_info_cache = xmem_cache_create(
   21.17 -        "RR dom info", sizeof(struct rrobin_dom_info), 0, 0, 0, NULL);
   21.18 -    if ( dom_info_cache == NULL )
   21.19 -    {
   21.20 -        printk("Could not allocate SLAB cache.\n");
   21.21 -        return -1;
   21.22 -    }
   21.23 -
   21.24      return 0;                                                                
   21.25  }
   21.26 -
   21.27  /* Allocates memory for per domain private scheduling data*/
   21.28  static int rr_alloc_task(struct domain *d)
   21.29  {
   21.30 -    if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
   21.31 +    if ( (d->sched_priv = new(struct rrobin_dom_info) == NULL )
   21.32          return -1;
   21.33      memset(d->sched_priv, 0, sizeof(struct rrobin_dom_info));
   21.34      return 0;
   21.35 @@ -91,7 +80,7 @@ static void rr_add_task(struct domain *d
   21.36  static void rr_free_task(struct domain *d)
   21.37  {
   21.38      ASSERT(d->sched_priv != NULL);
   21.39 -    xmem_cache_free(dom_info_cache, d->sched_priv);
   21.40 +    xfree(d->sched_priv);
   21.41  }
   21.42  
   21.43  /* Initialises idle task */
    22.1 --- a/xen/common/slab.c	Wed Feb 02 09:38:32 2005 +0000
    22.2 +++ b/xen/common/slab.c	Wed Feb 02 15:24:31 2005 +0000
    22.3 @@ -1449,10 +1449,10 @@ void *xmem_cache_alloc(xmem_cache_t *cac
    22.4  }
    22.5  
    22.6  /**
    22.7 - * xmalloc - allocate memory
    22.8 + * _xmalloc - allocate memory
    22.9   * @size: how many bytes of memory are required.
   22.10   */
   22.11 -void *xmalloc(size_t size)
   22.12 +void *_xmalloc(size_t size)
   22.13  {
   22.14      cache_sizes_t *csizep = cache_sizes;
   22.15  
   22.16 @@ -1548,7 +1548,7 @@ static int xmem_tune_cpucache (xmem_cach
   22.17          for (i = 0; i< smp_num_cpus; i++) {
   22.18              cpucache_t* ccnew;
   22.19  
   22.20 -            ccnew = xmalloc(sizeof(void*)*limit+sizeof(cpucache_t));
   22.21 +            ccnew = _xmalloc(sizeof(void*)*limit+sizeof(cpucache_t));
   22.22              if (!ccnew)
   22.23                  goto oom;
   22.24              ccnew->limit = limit;
    23.1 --- a/xen/drivers/pci/pci.c	Wed Feb 02 09:38:32 2005 +0000
    23.2 +++ b/xen/drivers/pci/pci.c	Wed Feb 02 15:24:31 2005 +0000
    23.3 @@ -1126,7 +1126,7 @@ static struct pci_bus * __devinit pci_al
    23.4  {
    23.5  	struct pci_bus *b;
    23.6  
    23.7 -	b = xmalloc(sizeof(*b));
    23.8 +	b = xmalloc(struct pci_bus);
    23.9  	if (b) {
   23.10  		memset(b, 0, sizeof(*b));
   23.11  		INIT_LIST_HEAD(&b->children);
   23.12 @@ -1351,7 +1351,7 @@ struct pci_dev * __devinit pci_scan_devi
   23.13  	if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
   23.14  		return NULL;
   23.15  
   23.16 -	dev = xmalloc(sizeof(*dev));
   23.17 +	dev = xmalloc(struct pci_dev);
   23.18  	if (!dev)
   23.19  		return NULL;
   23.20  
   23.21 @@ -1431,7 +1431,7 @@ unsigned int __devinit pci_do_scan_bus(s
   23.22  	max = bus->secondary;
   23.23  
   23.24  	/* Create a device template */
   23.25 -	dev0 = xmalloc(sizeof(struct pci_dev));
   23.26 +	dev0 = xmalloc(struct pci_dev);
   23.27  	if(!dev0) {
   23.28  	  panic("Out of memory scanning PCI bus!\n");
   23.29  	}
    24.1 --- a/xen/drivers/pci/setup-res.c	Wed Feb 02 09:38:32 2005 +0000
    24.2 +++ b/xen/drivers/pci/setup-res.c	Wed Feb 02 15:24:31 2005 +0000
    24.3 @@ -171,10 +171,10 @@ pdev_sort_resources(struct pci_dev *dev,
    24.4  					ln->res->start;
    24.5  			}
    24.6  			if (r_align > align) {
    24.7 -				tmp = xmalloc(sizeof(*tmp));
    24.8 +				tmp = xmalloc(struct resource_list);
    24.9  				if (!tmp)
   24.10  					panic("pdev_sort_resources(): "
   24.11 -					      "xmalloc() failed!\n");
   24.12 +					      "malloc() failed!\n");
   24.13  				tmp->next = ln;
   24.14  				tmp->res = r;
   24.15  				tmp->dev = dev;
    25.1 --- a/xen/include/asm-x86/processor.h	Wed Feb 02 09:38:32 2005 +0000
    25.2 +++ b/xen/include/asm-x86/processor.h	Wed Feb 02 15:24:31 2005 +0000
    25.3 @@ -16,6 +16,7 @@
    25.4  #include <asm/pdb.h>
    25.5  #include <xen/config.h>
    25.6  #include <xen/spinlock.h>
    25.7 +#include <xen/cache.h>
    25.8  #include <asm/vmx_vmcs.h>
    25.9  #include <public/xen.h>
   25.10  #endif
   25.11 @@ -412,11 +413,11 @@ struct thread_struct {
   25.12       * for segment registers %ds, %es, %fs and %gs:
   25.13       * 	%ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
   25.14       */
   25.15 -    unsigned long event_selector;    /* 08: entry CS  */
   25.16 -    unsigned long event_address;     /* 12: entry EIP */
   25.17 +    unsigned long event_selector;    /* entry CS  */
   25.18 +    unsigned long event_address;     /* entry EIP */
   25.19  
   25.20 -    unsigned long failsafe_selector; /* 16: entry CS  */
   25.21 -    unsigned long failsafe_address;  /* 20: entry EIP */
   25.22 +    unsigned long failsafe_selector; /* entry CS  */
   25.23 +    unsigned long failsafe_address;  /* entry EIP */
   25.24  
   25.25      /* Bounce information for propagating an exception to guest OS. */
   25.26      struct trap_bounce trap_bounce;
   25.27 @@ -435,7 +436,7 @@ struct thread_struct {
   25.28  #ifdef CONFIG_VMX
   25.29      struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
   25.30  #endif
   25.31 -};
   25.32 +} __cacheline_aligned;
   25.33  
   25.34  #define IDT_ENTRIES 256
   25.35  extern idt_entry_t idt_table[];
    26.1 --- a/xen/include/asm-x86/shadow.h	Wed Feb 02 09:38:32 2005 +0000
    26.2 +++ b/xen/include/asm-x86/shadow.h	Wed Feb 02 15:24:31 2005 +0000
    26.3 @@ -616,7 +616,7 @@ static inline void set_shadow_status(
    26.4      {
    26.5          SH_LOG("Allocate more shadow hashtable blocks.");
    26.6  
    26.7 -        extra = xmalloc(
    26.8 +        extra = _xmalloc(
    26.9              sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
   26.10  
   26.11          /* XXX Should be more graceful here. */
    27.1 --- a/xen/include/xen/slab.h	Wed Feb 02 09:38:32 2005 +0000
    27.2 +++ b/xen/include/xen/slab.h	Wed Feb 02 15:24:31 2005 +0000
    27.3 @@ -18,6 +18,7 @@ typedef struct xmem_cache_s xmem_cache_t
    27.4  
    27.5  #include <xen/mm.h>
    27.6  #include <xen/cache.h>
    27.7 +#include <xen/types.h>
    27.8  
    27.9  /* Flags to pass to xmem_cache_create(). */
   27.10  /* NB. The first 3 are only valid when built with SLAB_DEBUG_SUPPORT. */
   27.11 @@ -45,13 +46,24 @@ extern int xmem_cache_shrink(xmem_cache_
   27.12  extern void *xmem_cache_alloc(xmem_cache_t *);
   27.13  extern void xmem_cache_free(xmem_cache_t *, void *);
   27.14  
   27.15 -extern void *xmalloc(size_t);
   27.16 +extern void *_xmalloc(size_t);
   27.17  extern void xfree(const void *);
   27.18  
   27.19  extern int xmem_cache_reap(void);
   27.20  
   27.21  extern void dump_slabinfo();
   27.22  
   27.23 +/* Nicely typesafe for you. */
   27.24 +#define xmalloc(type) ((type *)_xmalloc(sizeof(type)))
   27.25 +#define xmalloc_array(type, num) ((type *)_xmalloc_array(sizeof(type), (num)))
   27.26 +
   27.27 +static inline void *_xmalloc_array(size_t size, size_t num)
   27.28 +{
   27.29 +	/* Check for overflow. */
   27.30 +	if (size && num > UINT_MAX / size)
   27.31 +		return NULL;
   27.32 +	return _xmalloc(size * num);
   27.33 +}
   27.34  #endif /* __ARCH_HAS_SLAB_ALLOCATOR */
   27.35  
   27.36  #endif /* __SLAB_H__ */