ia64/xen-unstable

changeset 11125:c841daf98bb0

[POWERPC] memory cleanup (phase 2), destroy (et. al) now work

The patch fixes the following problems:
- Fix initializing the heaps so we can Link anywhere < RMA. We not
link xen at 4MiB, which is way saner.
- We track and allocate pages (especially RMA) based on order.
- free domain memory as a resource not as domain
- clarify "order" arithmetic
- update stale def of fix IS_XEN_HEAP_FRAME()

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Mon Aug 14 09:53:46 2006 -0400 (2006-08-14)
parents 552d50b3abf2
children 653588bfc0f9
files xen/arch/powerpc/Makefile xen/arch/powerpc/domain.c xen/arch/powerpc/htab.c xen/arch/powerpc/mm.c xen/arch/powerpc/powerpc64/ppc970.c xen/arch/powerpc/setup.c xen/include/asm-powerpc/domain.h xen/include/asm-powerpc/htab.h xen/include/asm-powerpc/mm.h
line diff
     1.1 --- a/xen/arch/powerpc/Makefile	Sun Aug 13 19:19:37 2006 -0400
     1.2 +++ b/xen/arch/powerpc/Makefile	Mon Aug 14 09:53:46 2006 -0400
     1.3 @@ -49,7 +49,7 @@ obj-y += elf32.o
     1.4  PPC_C_WARNINGS += -Wundef -Wmissing-prototypes -Wmissing-declarations
     1.5  CFLAGS += $(PPC_C_WARNINGS)
     1.6  
     1.7 -LINK=0x3000000
     1.8 +LINK=0x400000
     1.9  boot32_link_base = $(LINK)
    1.10  xen_link_offset  = 100
    1.11  xen_link_base    = $(patsubst %000,%$(xen_link_offset),$(LINK))
     2.1 --- a/xen/arch/powerpc/domain.c	Sun Aug 13 19:19:37 2006 -0400
     2.2 +++ b/xen/arch/powerpc/domain.c	Mon Aug 14 09:53:46 2006 -0400
     2.3 @@ -74,7 +74,7 @@ unsigned long hypercall_create_continuat
     2.4  int arch_domain_create(struct domain *d)
     2.5  {
     2.6      unsigned long rma_base;
     2.7 -    unsigned long rma_size;
     2.8 +    unsigned long rma_sz;
     2.9      uint htab_order;
    2.10  
    2.11      if (d->domain_id == IDLE_DOMAIN_ID) {
    2.12 @@ -85,19 +85,20 @@ int arch_domain_create(struct domain *d)
    2.13      }
    2.14  
    2.15      d->arch.rma_order = cpu_rma_order();
    2.16 -    rma_size = 1UL << d->arch.rma_order << PAGE_SHIFT;
    2.17 +    rma_sz = rma_size(d->arch.rma_order);
    2.18  
    2.19      /* allocate the real mode area */
    2.20      d->max_pages = 1UL << d->arch.rma_order;
    2.21 +    d->tot_pages = 0;
    2.22      d->arch.rma_page = alloc_domheap_pages(d, d->arch.rma_order, 0);
    2.23      if (NULL == d->arch.rma_page)
    2.24          return 1;
    2.25      rma_base = page_to_maddr(d->arch.rma_page);
    2.26  
    2.27 -    BUG_ON(rma_base & (rma_size-1)); /* check alignment */
    2.28 +    BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
    2.29  
    2.30 -    printk("clearing RMO: 0x%lx[0x%lx]\n", rma_base, rma_size);
    2.31 -    memset((void *)rma_base, 0, rma_size);
    2.32 +    printk("clearing RMO: 0x%lx[0x%lx]\n", rma_base, rma_sz);
    2.33 +    memset((void *)rma_base, 0, rma_sz);
    2.34  
    2.35      d->shared_info = (shared_info_t *)
    2.36          (rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
    2.37 @@ -120,7 +121,6 @@ int arch_domain_create(struct domain *d)
    2.38  
    2.39  void arch_domain_destroy(struct domain *d)
    2.40  {
    2.41 -    free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
    2.42      htab_free(d);
    2.43  }
    2.44  
    2.45 @@ -263,7 +263,7 @@ void sync_vcpu_execstate(struct vcpu *v)
    2.46  
    2.47  void domain_relinquish_resources(struct domain *d)
    2.48  {
    2.49 -    /* nothing to do? */
    2.50 +    free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
    2.51  }
    2.52  
    2.53  void arch_dump_domain_info(struct domain *d)
     3.1 --- a/xen/arch/powerpc/htab.c	Sun Aug 13 19:19:37 2006 -0400
     3.2 +++ b/xen/arch/powerpc/htab.c	Mon Aug 14 09:53:46 2006 -0400
     3.3 @@ -44,27 +44,25 @@ void htab_alloc(struct domain *d, uint o
     3.4      htab_raddr = (ulong)alloc_xenheap_pages(order);
     3.5      ASSERT(htab_raddr != 0);
     3.6      /* XXX check alignment guarantees */
     3.7 -    ASSERT((htab_raddr & (htab_bytes-1)) == 0);
     3.8 +    ASSERT((htab_raddr & (htab_bytes - 1)) == 0);
     3.9  
    3.10      /* XXX slow. move memset out to service partition? */
    3.11      memset((void *)htab_raddr, 0, htab_bytes);
    3.12  
    3.13 +    d->arch.htab.order = order;
    3.14      d->arch.htab.log_num_ptes = log_htab_bytes - LOG_PTE_SIZE;
    3.15      d->arch.htab.sdr1 = htab_calc_sdr1(htab_raddr, log_htab_bytes);
    3.16      d->arch.htab.map = (union pte *)htab_raddr;
    3.17      d->arch.htab.shadow = xmalloc_array(ulong,
    3.18                                          1UL << d->arch.htab.log_num_ptes);
    3.19      ASSERT(d->arch.htab.shadow != NULL);
    3.20 -
    3.21 -    printf("%s: dom%x sdr1: %lx\n", __func__, d->domain_id, d->arch.htab.sdr1);
    3.22  }
    3.23  
    3.24  void htab_free(struct domain *d)
    3.25  {
    3.26      ulong htab_raddr = GET_HTAB(d);
    3.27  
    3.28 -    free_xenheap_pages((void *)htab_raddr,
    3.29 -                       (1UL << d->arch.htab.log_num_ptes) << LOG_PTE_SIZE);
    3.30 +    free_xenheap_pages((void *)htab_raddr, d->arch.htab.order);
    3.31      xfree(d->arch.htab.shadow);
    3.32  }
    3.33  
     4.1 --- a/xen/arch/powerpc/mm.c	Sun Aug 13 19:19:37 2006 -0400
     4.2 +++ b/xen/arch/powerpc/mm.c	Mon Aug 14 09:53:46 2006 -0400
     4.3 @@ -154,5 +154,4 @@ void guest_physmap_remove_page(
     4.4  void shadow_drop_references(
     4.5      struct domain *d, struct page_info *page)
     4.6  {
     4.7 -    panic("%s\n", __func__);
     4.8  }
     5.1 --- a/xen/arch/powerpc/powerpc64/ppc970.c	Sun Aug 13 19:19:37 2006 -0400
     5.2 +++ b/xen/arch/powerpc/powerpc64/ppc970.c	Mon Aug 14 09:53:46 2006 -0400
     5.3 @@ -34,7 +34,8 @@
     5.4  unsigned int cpu_rma_order(void)
     5.5  {
     5.6      /* XXX what about non-HV mode? */
     5.7 -    return 14; /* 1<<14<<PAGE_SIZE = 64M */
     5.8 +    uint rma_log_size = 6 + 20; /* 64M */
     5.9 +    return rma_log_size - PAGE_SHIFT;
    5.10  }
    5.11  
    5.12  void cpu_initialize(void)
     6.1 --- a/xen/arch/powerpc/setup.c	Sun Aug 13 19:19:37 2006 -0400
     6.2 +++ b/xen/arch/powerpc/setup.c	Mon Aug 14 09:53:46 2006 -0400
     6.3 @@ -273,12 +273,26 @@ static void __init __start_xen(multiboot
     6.4  
     6.5      printk("System RAM: %luMB (%lukB)\n", eomem >> 20, eomem >> 10);
     6.6  
     6.7 +    /* top of memory */
     6.8      max_page = PFN_DOWN(ALIGN_DOWN(eomem, PAGE_SIZE));
     6.9      total_pages = max_page;
    6.10  
    6.11 -    /* skip the exception handlers */
    6.12 +    /* Architecturally the first 4 pages are exception hendlers, we
    6.13 +     * will also be copying down some code there */
    6.14      heap_start = init_boot_allocator(4 << PAGE_SHIFT);
    6.15  
    6.16 +    /* we give the first RMA to the hypervisor */
    6.17 +    xenheap_phys_end = rma_size(cpu_rma_order());
    6.18 +
    6.19 +    /* allow everything else to be allocated */
    6.20 +    init_boot_pages(xenheap_phys_end, eomem);
    6.21 +    init_frametable();
    6.22 +    end_boot_allocator();
    6.23 +
    6.24 +    /* Add memory between the beginning of the heap and the beginning
    6.25 +     * of out text */
    6.26 +    init_xenheap_pages(heap_start, (ulong)_start);
    6.27 +
    6.28      /* move the modules to just after _end */
    6.29      if (modules_start) {
    6.30          printk("modules at: %016lx - %016lx\n", modules_start,
    6.31 @@ -293,27 +307,22 @@ static void __init __start_xen(multiboot
    6.32                  modules_start + modules_size);
    6.33      }
    6.34  
    6.35 +    /* the rest of the xenheap, starting at the end of modules */
    6.36 +    init_xenheap_pages(freemem, xenheap_phys_end);
    6.37 +
    6.38 +
    6.39  #ifdef OF_DEBUG
    6.40      printk("ofdump:\n");
    6.41      /* make sure the OF devtree is good */
    6.42      ofd_walk((void *)oftree, OFD_ROOT, ofd_dump_props, OFD_DUMP_ALL);
    6.43  #endif
    6.44  
    6.45 -    percpu_init_areas();
    6.46 -
    6.47 -    /* mark all memory from modules onward as unused */
    6.48 -    init_boot_pages(freemem, eomem);
    6.49 -
    6.50 -    init_frametable();
    6.51 -    end_boot_allocator();
    6.52 -
    6.53 -    /* place the heap from after the allocator bitmap to _start */
    6.54 -    xenheap_phys_end = (ulong)_start;
    6.55 -    init_xenheap_pages(heap_start, xenheap_phys_end);
    6.56      heap_size = xenheap_phys_end - heap_start;
    6.57  
    6.58      printk("Xen heap: %luMB (%lukB)\n", heap_size >> 20, heap_size >> 10);
    6.59  
    6.60 +    percpu_init_areas();
    6.61 +
    6.62      cpu_initialize();
    6.63  
    6.64  #ifdef CONFIG_GDB
     7.1 --- a/xen/include/asm-powerpc/domain.h	Sun Aug 13 19:19:37 2006 -0400
     7.2 +++ b/xen/include/asm-powerpc/domain.h	Mon Aug 14 09:53:46 2006 -0400
     7.3 @@ -107,7 +107,7 @@ extern void load_float(struct vcpu *);
     7.4  #define RMA_CONSOLE 3
     7.5  #define RMA_LAST_DOMU 3
     7.6  
     7.7 -#define rma_size(rma_order) (1UL << (rma_order) << PAGE_SHIFT)
     7.8 +#define rma_size(rma_order) (1UL << ((rma_order) + PAGE_SHIFT))
     7.9  
    7.10  static inline ulong rma_addr(struct arch_domain *ad, int type)
    7.11  {
     8.1 --- a/xen/include/asm-powerpc/htab.h	Sun Aug 13 19:19:37 2006 -0400
     8.2 +++ b/xen/include/asm-powerpc/htab.h	Mon Aug 14 09:53:46 2006 -0400
     8.3 @@ -128,7 +128,8 @@ union ptel {
     8.4  
     8.5  struct domain_htab {
     8.6      ulong sdr1;
     8.7 -    ulong log_num_ptes; /* log number of PTEs in HTAB. */
     8.8 +    uint log_num_ptes;  /* log number of PTEs in HTAB. */
     8.9 +    uint order;         /* order for freeing. */
    8.10      union pte *map;     /* access the htab like an array */
    8.11      ulong *shadow;      /* idx -> logical translation array */
    8.12  };
     9.1 --- a/xen/include/asm-powerpc/mm.h	Sun Aug 13 19:19:37 2006 -0400
     9.2 +++ b/xen/include/asm-powerpc/mm.h	Mon Aug 14 09:53:46 2006 -0400
     9.3 @@ -33,7 +33,7 @@
     9.4  #define memguard_unguard_range(_p,_l)    ((void)0)
     9.5  
     9.6  extern unsigned long xenheap_phys_end;
     9.7 -#define IS_XEN_HEAP_FRAME(_pfn) (page_to_mfn(_pfn) < xenheap_phys_end)
     9.8 +#define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
     9.9  
    9.10  /*
    9.11   * Per-page-frame information.