direct-io.hg

changeset 5399:849b58da37b7

bitkeeper revision 1.1701 (42a86974YOr10ovEkVsFyCaBsXe73A)

Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/cl349/xen-unstable.bk
author cl349@firebug.cl.cam.ac.uk
date Thu Jun 09 16:08:20 2005 +0000 (2005-06-09)
parents 6d3e8f90c2df fcbdfa6fe74d
children d2b6518a768a
files xen/arch/ia64/domain.c xen/arch/ia64/patch/linux-2.6.7/mm_contig.c xen/arch/ia64/xenmem.c xen/arch/x86/apic.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/smpboot.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/common/domain.c xen/common/grant_table.c xen/common/page_alloc.c xen/common/trace.c xen/common/xmalloc.c xen/drivers/char/console.c xen/drivers/char/serial.c xen/include/asm-x86/mm.h xen/include/asm-x86/page.h xen/include/asm-x86/x86_32/page-2level.h xen/include/xen/mm.h
line diff
     1.1 --- a/xen/arch/ia64/domain.c	Thu Jun 09 14:40:39 2005 +0000
     1.2 +++ b/xen/arch/ia64/domain.c	Thu Jun 09 16:08:20 2005 +0000
     1.3 @@ -680,7 +680,9 @@ void alloc_dom0(void)
     1.4        * Some old version linux, like 2.4, assumes physical memory existing
     1.5        * in 2nd 64M space.
     1.6        */
     1.7 -     dom0_start = alloc_boot_pages(dom0_size,dom0_align);
     1.8 +     dom0_start = alloc_boot_pages(
     1.9 +         dom0_size >> PAGE_SHIFT, dom0_align >> PAGE_SHIFT);
    1.10 +     dom0_start <<= PAGE_SHIFT;
    1.11  	if (!dom0_start) {
    1.12  	printf("construct_dom0: can't allocate contiguous memory size=%p\n",
    1.13  		dom0_size);
    1.14 @@ -698,7 +700,9 @@ void alloc_domU_staging(void)
    1.15  {
    1.16  	domU_staging_size = 32*1024*1024; //FIXME: Should be configurable
    1.17  	printf("alloc_domU_staging: starting (initializing %d MB...)\n",domU_staging_size/(1024*1024));
    1.18 -	domU_staging_start= alloc_boot_pages(domU_staging_size,domU_staging_align);
    1.19 +	domU_staging_start = alloc_boot_pages(
    1.20 +            domU_staging_size >> PAGE_SHIFT, domU_staging_align >> PAGE_SHIFT);
    1.21 +        domU_staging_start <<= PAGE_SHIFT;
    1.22  	if (!domU_staging_size) {
    1.23  		printf("alloc_domU_staging: can't allocate, spinning...\n");
    1.24  		while(1);
     2.1 --- a/xen/arch/ia64/patch/linux-2.6.7/mm_contig.c	Thu Jun 09 14:40:39 2005 +0000
     2.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/mm_contig.c	Thu Jun 09 16:08:20 2005 +0000
     2.3 @@ -204,7 +204,7 @@
     2.4  +
     2.5  +	/* Request continuous trunk from boot allocator, since HV
     2.6  +	 * address is identity mapped */
     2.7 -+	p = alloc_boot_pages(frame_table_size, FT_ALIGN_SIZE);
     2.8 ++	p = alloc_boot_pages(frame_table_size>>PAGE_SHIFT, FT_ALIGN_SIZE>>PAGE_SHIFT) << PAGE_SHIFT;
     2.9  +	if (p == 0)
    2.10  +		panic("Not enough memory for frame table.\n");
    2.11  +
     3.1 --- a/xen/arch/ia64/xenmem.c	Thu Jun 09 14:40:39 2005 +0000
     3.2 +++ b/xen/arch/ia64/xenmem.c	Thu Jun 09 16:08:20 2005 +0000
     3.3 @@ -82,17 +82,18 @@ paging_init (void)
     3.4  #define FT_ALIGN_SIZE	(16UL << 20)
     3.5  void __init init_frametable(void)
     3.6  {
     3.7 -	unsigned long i, p;
     3.8 +	unsigned long i, pfn;
     3.9  	frame_table_size = max_page * sizeof(struct pfn_info);
    3.10  	frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
    3.11  
    3.12  	/* Request continuous trunk from boot allocator, since HV
    3.13  	 * address is identity mapped */
    3.14 -	p = alloc_boot_pages(frame_table_size, FT_ALIGN_SIZE);
    3.15 -	if (p == 0)
    3.16 +	pfn = alloc_boot_pages(
    3.17 +            frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
    3.18 +	if (pfn == 0)
    3.19  		panic("Not enough memory for frame table.\n");
    3.20  
    3.21 -	frame_table = __va(p);
    3.22 +	frame_table = __va(pfn << PAGE_SHIFT);
    3.23  	memset(frame_table, 0, frame_table_size);
    3.24  	printk("size of frame_table: %lukB\n",
    3.25  		frame_table_size >> 10);
     4.1 --- a/xen/arch/x86/apic.c	Thu Jun 09 14:40:39 2005 +0000
     4.2 +++ b/xen/arch/x86/apic.c	Thu Jun 09 16:08:20 2005 +0000
     4.3 @@ -580,10 +580,9 @@ void __init init_apic_mappings(void)
     4.4       * zeroes page to simulate the local APIC and another
     4.5       * one for the IO-APIC.
     4.6       */
     4.7 -    if (!smp_found_config && detect_init_APIC()) {
     4.8 -        apic_phys = alloc_xenheap_page();
     4.9 -        apic_phys = __pa(apic_phys);
    4.10 -    } else
    4.11 +    if (!smp_found_config && detect_init_APIC())
    4.12 +        apic_phys = __pa(alloc_xenheap_page());
    4.13 +    else
    4.14          apic_phys = mp_lapic_addr;
    4.15  
    4.16      set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
    4.17 @@ -616,8 +615,7 @@ void __init init_apic_mappings(void)
    4.18                  }
    4.19              } else {
    4.20  fake_ioapic_page:
    4.21 -                ioapic_phys = alloc_xenheap_page();
    4.22 -                ioapic_phys = __pa(ioapic_phys);
    4.23 +                ioapic_phys = __pa(alloc_xenheap_page());
    4.24              }
    4.25              set_fixmap_nocache(idx, ioapic_phys);
    4.26              apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
     5.1 --- a/xen/arch/x86/dom0_ops.c	Thu Jun 09 14:40:39 2005 +0000
     5.2 +++ b/xen/arch/x86/dom0_ops.c	Thu Jun 09 16:08:20 2005 +0000
     5.3 @@ -259,7 +259,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
     5.4              break;
     5.5          }
     5.6  
     5.7 -        l_arr = (unsigned long *)alloc_xenheap_page();
     5.8 +        l_arr = alloc_xenheap_page();
     5.9   
    5.10          ret = 0;
    5.11          for( n = 0; n < num; )
    5.12 @@ -324,7 +324,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    5.13              n += j;
    5.14          }
    5.15  
    5.16 -        free_xenheap_page((unsigned long)l_arr);
    5.17 +        free_xenheap_page(l_arr);
    5.18  
    5.19          put_domain(d);
    5.20      }
     6.1 --- a/xen/arch/x86/domain.c	Thu Jun 09 14:40:39 2005 +0000
     6.2 +++ b/xen/arch/x86/domain.c	Thu Jun 09 16:08:20 2005 +0000
     6.3 @@ -222,10 +222,10 @@ void arch_free_vcpu_struct(struct vcpu *
     6.4  
     6.5  void free_perdomain_pt(struct domain *d)
     6.6  {
     6.7 -    free_xenheap_page((unsigned long)d->arch.mm_perdomain_pt);
     6.8 +    free_xenheap_page(d->arch.mm_perdomain_pt);
     6.9  #ifdef __x86_64__
    6.10 -    free_xenheap_page((unsigned long)d->arch.mm_perdomain_l2);
    6.11 -    free_xenheap_page((unsigned long)d->arch.mm_perdomain_l3);
    6.12 +    free_xenheap_page(d->arch.mm_perdomain_l2);
    6.13 +    free_xenheap_page(d->arch.mm_perdomain_l3);
    6.14  #endif
    6.15  }
    6.16  
    6.17 @@ -240,7 +240,7 @@ void arch_do_createdomain(struct vcpu *v
    6.18  
    6.19      v->arch.schedule_tail = continue_nonidle_task;
    6.20      
    6.21 -    d->shared_info = (void *)alloc_xenheap_page();
    6.22 +    d->shared_info = alloc_xenheap_page();
    6.23      memset(d->shared_info, 0, PAGE_SIZE);
    6.24      v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
    6.25      v->cpumap = CPUMAP_RUNANYWHERE;
    6.26 @@ -248,7 +248,7 @@ void arch_do_createdomain(struct vcpu *v
    6.27      machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 
    6.28                             PAGE_SHIFT] = INVALID_M2P_ENTRY;
    6.29      
    6.30 -    d->arch.mm_perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
    6.31 +    d->arch.mm_perdomain_pt = alloc_xenheap_page();
    6.32      memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
    6.33      machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> 
    6.34                             PAGE_SHIFT] = INVALID_M2P_ENTRY;
    6.35 @@ -263,12 +263,12 @@ void arch_do_createdomain(struct vcpu *v
    6.36      v->arch.guest_vl3table = __linear_l3_table;
    6.37      v->arch.guest_vl4table = __linear_l4_table;
    6.38      
    6.39 -    d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
    6.40 +    d->arch.mm_perdomain_l2 = alloc_xenheap_page();
    6.41      memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
    6.42      d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] = 
    6.43          l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt),
    6.44                          __PAGE_HYPERVISOR);
    6.45 -    d->arch.mm_perdomain_l3 = (l3_pgentry_t *)alloc_xenheap_page();
    6.46 +    d->arch.mm_perdomain_l3 = alloc_xenheap_page();
    6.47      memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
    6.48      d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = 
    6.49          l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),
     7.1 --- a/xen/arch/x86/mm.c	Thu Jun 09 14:40:39 2005 +0000
     7.2 +++ b/xen/arch/x86/mm.c	Thu Jun 09 16:08:20 2005 +0000
     7.3 @@ -145,31 +145,28 @@ static struct domain *dom_xen, *dom_io;
     7.4  
     7.5  /* Frame table and its size in pages. */
     7.6  struct pfn_info *frame_table;
     7.7 -unsigned long frame_table_size;
     7.8  unsigned long max_page;
     7.9  
    7.10  void __init init_frametable(void)
    7.11  {
    7.12 -    unsigned long i, p, step;
    7.13 -
    7.14 -    frame_table      = (struct pfn_info *)FRAMETABLE_VIRT_START;
    7.15 -    frame_table_size = max_page * sizeof(struct pfn_info);
    7.16 -    frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
    7.17 -
    7.18 -    step = (1 << L2_PAGETABLE_SHIFT);
    7.19 -    for ( i = 0; i < frame_table_size; i += step )
    7.20 +    unsigned long nr_pages, page_step, i, pfn;
    7.21 +
    7.22 +    frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START;
    7.23 +
    7.24 +    nr_pages  = PFN_UP(max_page * sizeof(*frame_table));
    7.25 +    page_step = (1 << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT;
    7.26 +
    7.27 +    for ( i = 0; i < nr_pages; i += page_step )
    7.28      {
    7.29 -        p = alloc_boot_pages(min(frame_table_size - i, step), step);
    7.30 -        if ( p == 0 )
    7.31 +        pfn = alloc_boot_pages(min(nr_pages - i, page_step), page_step);
    7.32 +        if ( pfn == 0 )
    7.33              panic("Not enough memory for frame table\n");
    7.34          map_pages_to_xen(
    7.35 -            FRAMETABLE_VIRT_START + i,
    7.36 -            p >> PAGE_SHIFT,
    7.37 -            step >> PAGE_SHIFT,
    7.38 -            PAGE_HYPERVISOR);
    7.39 +            FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
    7.40 +            pfn, page_step, PAGE_HYPERVISOR);
    7.41      }
    7.42  
    7.43 -    memset(frame_table, 0, frame_table_size);
    7.44 +    memset(frame_table, 0, nr_pages << PAGE_SHIFT);
    7.45  }
    7.46  
    7.47  void arch_init_memory(void)
    7.48 @@ -2954,15 +2951,15 @@ int ptwr_do_page_fault(struct domain *d,
    7.49  
    7.50  int ptwr_init(struct domain *d)
    7.51  {
    7.52 -    void *x = (void *)alloc_xenheap_page();
    7.53 -    void *y = (void *)alloc_xenheap_page();
    7.54 +    void *x = alloc_xenheap_page();
    7.55 +    void *y = alloc_xenheap_page();
    7.56  
    7.57      if ( (x == NULL) || (y == NULL) )
    7.58      {
    7.59          if ( x != NULL )
    7.60 -            free_xenheap_page((unsigned long)x);
    7.61 +            free_xenheap_page(x);
    7.62          if ( y != NULL )
    7.63 -            free_xenheap_page((unsigned long)y);
    7.64 +            free_xenheap_page(y);
    7.65          return -ENOMEM;
    7.66      }
    7.67  
    7.68 @@ -2975,8 +2972,8 @@ int ptwr_init(struct domain *d)
    7.69  void ptwr_destroy(struct domain *d)
    7.70  {
    7.71      cleanup_writable_pagetable(d);
    7.72 -    free_xenheap_page((unsigned long)d->arch.ptwr[PTWR_PT_ACTIVE].page);
    7.73 -    free_xenheap_page((unsigned long)d->arch.ptwr[PTWR_PT_INACTIVE].page);
    7.74 +    free_xenheap_page(d->arch.ptwr[PTWR_PT_ACTIVE].page);
    7.75 +    free_xenheap_page(d->arch.ptwr[PTWR_PT_INACTIVE].page);
    7.76  }
    7.77  
    7.78  void cleanup_writable_pagetable(struct domain *d)
     8.1 --- a/xen/arch/x86/smpboot.c	Thu Jun 09 14:40:39 2005 +0000
     8.2 +++ b/xen/arch/x86/smpboot.c	Thu Jun 09 16:08:20 2005 +0000
     8.3 @@ -781,7 +781,7 @@ static int __init do_boot_cpu(int apicid
     8.4  	/* So we see what's up   */
     8.5  	printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
     8.6  
     8.7 -	stack = (void *)alloc_xenheap_pages(STACK_ORDER);
     8.8 +	stack = alloc_xenheap_pages(STACK_ORDER);
     8.9  #if defined(__i386__)
    8.10  	stack_start.esp = (void *)__pa(stack);
    8.11  #elif defined(__x86_64__)
     9.1 --- a/xen/arch/x86/vmx_vmcs.c	Thu Jun 09 14:40:39 2005 +0000
     9.2 +++ b/xen/arch/x86/vmx_vmcs.c	Thu Jun 09 16:08:20 2005 +0000
     9.3 @@ -41,8 +41,8 @@ struct vmcs_struct *alloc_vmcs(void)
     9.4  
     9.5      rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
     9.6      vmcs_size = vmx_msr_high & 0x1fff;
     9.7 -    vmcs = (struct vmcs_struct *) alloc_xenheap_pages(get_order(vmcs_size)); 
     9.8 -    memset((char *) vmcs, 0, vmcs_size); /* don't remove this */
     9.9 +    vmcs = alloc_xenheap_pages(get_order(vmcs_size)); 
    9.10 +    memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
    9.11  
    9.12      vmcs->vmcs_revision_id = vmx_msr_low;
    9.13      return vmcs;
    9.14 @@ -53,7 +53,7 @@ void free_vmcs(struct vmcs_struct *vmcs)
    9.15      int order;
    9.16  
    9.17      order = (vmcs_size >> PAGE_SHIFT) - 1;
    9.18 -    free_xenheap_pages((unsigned long) vmcs, order);
    9.19 +    free_xenheap_pages(vmcs, order);
    9.20  }
    9.21  
    9.22  static inline int construct_vmcs_controls(void)
    10.1 --- a/xen/arch/x86/x86_32/mm.c	Thu Jun 09 14:40:39 2005 +0000
    10.2 +++ b/xen/arch/x86/x86_32/mm.c	Thu Jun 09 16:08:20 2005 +0000
    10.3 @@ -43,7 +43,7 @@ struct pfn_info *alloc_xen_pagetable(voi
    10.4  
    10.5      if ( !early_boot )
    10.6      {
    10.7 -        void *v = (void *)alloc_xenheap_page();
    10.8 +        void *v = alloc_xenheap_page();
    10.9          return ((v == NULL) ? NULL : virt_to_page(v));
   10.10      }
   10.11  
   10.12 @@ -54,7 +54,7 @@ struct pfn_info *alloc_xen_pagetable(voi
   10.13  
   10.14  void free_xen_pagetable(struct pfn_info *pg)
   10.15  {
   10.16 -    free_xenheap_page((unsigned long)page_to_virt(pg));
   10.17 +    free_xenheap_page(page_to_virt(pg));
   10.18  }
   10.19  
   10.20  l2_pgentry_t *virt_to_xen_l2e(unsigned long v)
   10.21 @@ -113,7 +113,7 @@ void __init paging_init(void)
   10.22      /* Create page tables for ioremap(). */
   10.23      for ( i = 0; i < (IOREMAP_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
   10.24      {
   10.25 -        ioremap_pt = (void *)alloc_xenheap_page();
   10.26 +        ioremap_pt = alloc_xenheap_page();
   10.27          clear_page(ioremap_pt);
   10.28          idle_pg_table_l2[l2_linear_offset(IOREMAP_VIRT_START) + i] =
   10.29              l2e_from_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR);
   10.30 @@ -121,7 +121,7 @@ void __init paging_init(void)
   10.31  
   10.32      /* Set up mapping cache for domain pages. */
   10.33      mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
   10.34 -    mapcache = (l1_pgentry_t *)alloc_xenheap_pages(mapcache_order);
   10.35 +    mapcache = alloc_xenheap_pages(mapcache_order);
   10.36      memset(mapcache, 0, PAGE_SIZE << mapcache_order);
   10.37      for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
   10.38          idle_pg_table_l2[l2_linear_offset(MAPCACHE_VIRT_START) + i] =
    11.1 --- a/xen/arch/x86/x86_64/mm.c	Thu Jun 09 14:40:39 2005 +0000
    11.2 +++ b/xen/arch/x86/x86_64/mm.c	Thu Jun 09 16:08:20 2005 +0000
    11.3 @@ -32,13 +32,13 @@
    11.4  struct pfn_info *alloc_xen_pagetable(void)
    11.5  {
    11.6      extern int early_boot;
    11.7 -    unsigned long p;
    11.8 +    unsigned long pfn;
    11.9  
   11.10      if ( !early_boot )
   11.11          return alloc_domheap_page(NULL);
   11.12  
   11.13 -    p = alloc_boot_pages(PAGE_SIZE, PAGE_SIZE);
   11.14 -    return ((p == 0) ? NULL : phys_to_page(p));
   11.15 +    pfn = alloc_boot_pages(1, 1);
   11.16 +    return ((pfn == 0) ? NULL : pfn_to_page(pfn));
   11.17  }
   11.18  
   11.19  void free_xen_pagetable(struct pfn_info *pg)
   11.20 @@ -82,12 +82,12 @@ void __init paging_init(void)
   11.21      idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
   11.22  
   11.23      /* Create user-accessible L2 directory to map the MPT for guests. */
   11.24 -    l3_ro_mpt = (l3_pgentry_t *)alloc_xenheap_page();
   11.25 +    l3_ro_mpt = alloc_xenheap_page();
   11.26      clear_page(l3_ro_mpt);
   11.27      idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
   11.28          l4e_from_page(
   11.29              virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
   11.30 -    l2_ro_mpt = (l2_pgentry_t *)alloc_xenheap_page();
   11.31 +    l2_ro_mpt = alloc_xenheap_page();
   11.32      clear_page(l2_ro_mpt);
   11.33      l3_ro_mpt[l3_table_offset(RO_MPT_VIRT_START)] =
   11.34          l3e_from_page(
    12.1 --- a/xen/common/domain.c	Thu Jun 09 14:40:39 2005 +0000
    12.2 +++ b/xen/common/domain.c	Thu Jun 09 16:08:20 2005 +0000
    12.3 @@ -266,7 +266,7 @@ void domain_destruct(struct domain *d)
    12.4      grant_table_destroy(d);
    12.5  
    12.6      free_perdomain_pt(d);
    12.7 -    free_xenheap_page((unsigned long)d->shared_info);
    12.8 +    free_xenheap_page(d->shared_info);
    12.9  
   12.10      free_domain_struct(d);
   12.11  
    13.1 --- a/xen/common/grant_table.c	Thu Jun 09 14:40:39 2005 +0000
    13.2 +++ b/xen/common/grant_table.c	Thu Jun 09 16:08:20 2005 +0000
    13.3 @@ -375,7 +375,7 @@ static int
    13.4          grant_table_t   *lgt      = ld->grant_table;
    13.5  
    13.6          /* Grow the maptrack table. */
    13.7 -        new_mt = (void *)alloc_xenheap_pages(lgt->maptrack_order + 1);
    13.8 +        new_mt = alloc_xenheap_pages(lgt->maptrack_order + 1);
    13.9          if ( new_mt == NULL )
   13.10          {
   13.11              put_domain(rd);
   13.12 @@ -388,7 +388,7 @@ static int
   13.13          for ( i = lgt->maptrack_limit; i < (lgt->maptrack_limit << 1); i++ )
   13.14              new_mt[i].ref_and_flags = (i+1) << MAPTRACK_REF_SHIFT;
   13.15  
   13.16 -        free_xenheap_pages((unsigned long)lgt->maptrack, lgt->maptrack_order);
   13.17 +        free_xenheap_pages(lgt->maptrack, lgt->maptrack_order);
   13.18          lgt->maptrack          = new_mt;
   13.19          lgt->maptrack_order   += 1;
   13.20          lgt->maptrack_limit  <<= 1;
   13.21 @@ -1095,7 +1095,7 @@ grant_table_create(
   13.22      memset(t->active, 0, sizeof(active_grant_entry_t) * NR_GRANT_ENTRIES);
   13.23  
   13.24      /* Tracking of mapped foreign frames table */
   13.25 -    if ( (t->maptrack = (void *)alloc_xenheap_page()) == NULL )
   13.26 +    if ( (t->maptrack = alloc_xenheap_page()) == NULL )
   13.27          goto no_mem;
   13.28      t->maptrack_order = 0;
   13.29      t->maptrack_limit = PAGE_SIZE / sizeof(grant_mapping_t);
   13.30 @@ -1104,7 +1104,7 @@ grant_table_create(
   13.31          t->maptrack[i].ref_and_flags = (i+1) << MAPTRACK_REF_SHIFT;
   13.32  
   13.33      /* Shared grant table. */
   13.34 -    t->shared = (void *)alloc_xenheap_pages(ORDER_GRANT_FRAMES);
   13.35 +    t->shared = alloc_xenheap_pages(ORDER_GRANT_FRAMES);
   13.36      if ( t->shared == NULL )
   13.37          goto no_mem;
   13.38      memset(t->shared, 0, NR_GRANT_FRAMES * PAGE_SIZE);
   13.39 @@ -1127,7 +1127,7 @@ grant_table_create(
   13.40      {
   13.41          xfree(t->active);
   13.42          if ( t->maptrack != NULL )
   13.43 -            free_xenheap_page((unsigned long)t->maptrack);
   13.44 +            free_xenheap_page(t->maptrack);
   13.45          xfree(t);
   13.46      }
   13.47      return -ENOMEM;
   13.48 @@ -1216,8 +1216,8 @@ grant_table_destroy(
   13.49      {
   13.50          /* Free memory relating to this grant table. */
   13.51          d->grant_table = NULL;
   13.52 -        free_xenheap_pages((unsigned long)t->shared, ORDER_GRANT_FRAMES);
   13.53 -        free_xenheap_page((unsigned long)t->maptrack);
   13.54 +        free_xenheap_pages(t->shared, ORDER_GRANT_FRAMES);
   13.55 +        free_xenheap_page(t->maptrack);
   13.56          xfree(t->active);
   13.57          xfree(t);
   13.58      }
    14.1 --- a/xen/common/page_alloc.c	Thu Jun 09 14:40:39 2005 +0000
    14.2 +++ b/xen/common/page_alloc.c	Thu Jun 09 16:08:20 2005 +0000
    14.3 @@ -133,7 +133,7 @@ static void map_free(unsigned long first
    14.4   */
    14.5  
    14.6  /* Initialise allocator to handle up to @max_page pages. */
    14.7 -unsigned long init_boot_allocator(unsigned long bitmap_start)
    14.8 +physaddr_t init_boot_allocator(physaddr_t bitmap_start)
    14.9  {
   14.10      bitmap_start = round_pgup(bitmap_start);
   14.11  
   14.12 @@ -148,7 +148,7 @@ unsigned long init_boot_allocator(unsign
   14.13      return bitmap_start + bitmap_size;
   14.14  }
   14.15  
   14.16 -void init_boot_pages(unsigned long ps, unsigned long pe)
   14.17 +void init_boot_pages(physaddr_t ps, physaddr_t pe)
   14.18  {
   14.19      unsigned long bad_pfn;
   14.20      char *p;
   14.21 @@ -179,23 +179,20 @@ void init_boot_pages(unsigned long ps, u
   14.22      }
   14.23  }
   14.24  
   14.25 -unsigned long alloc_boot_pages(unsigned long size, unsigned long align)
   14.26 +unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align)
   14.27  {
   14.28      unsigned long pg, i;
   14.29  
   14.30 -    size  = round_pgup(size) >> PAGE_SHIFT;
   14.31 -    align = round_pgup(align) >> PAGE_SHIFT;
   14.32 -
   14.33 -    for ( pg = 0; (pg + size) < (bitmap_size*8); pg += align )
   14.34 +    for ( pg = 0; (pg + nr_pfns) < (bitmap_size*8); pg += pfn_align )
   14.35      {
   14.36 -        for ( i = 0; i < size; i++ )
   14.37 +        for ( i = 0; i < nr_pfns; i++ )
   14.38              if ( allocated_in_map(pg + i) )
   14.39                   break;
   14.40  
   14.41 -        if ( i == size )
   14.42 +        if ( i == nr_pfns )
   14.43          {
   14.44 -            map_alloc(pg, size);
   14.45 -            return pg << PAGE_SHIFT;
   14.46 +            map_alloc(pg, nr_pfns);
   14.47 +            return pg;
   14.48          }
   14.49      }
   14.50  
   14.51 @@ -402,14 +399,14 @@ void scrub_heap_pages(void)
   14.52   * XEN-HEAP SUB-ALLOCATOR
   14.53   */
   14.54  
   14.55 -void init_xenheap_pages(unsigned long ps, unsigned long pe)
   14.56 +void init_xenheap_pages(physaddr_t ps, physaddr_t pe)
   14.57  {
   14.58      unsigned long flags;
   14.59  
   14.60      ps = round_pgup(ps);
   14.61      pe = round_pgdown(pe);
   14.62  
   14.63 -    memguard_guard_range(__va(ps), pe - ps);
   14.64 +    memguard_guard_range(phys_to_virt(ps), pe - ps);
   14.65  
   14.66      /*
   14.67       * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
   14.68 @@ -424,7 +421,7 @@ void init_xenheap_pages(unsigned long ps
   14.69  }
   14.70  
   14.71  
   14.72 -unsigned long alloc_xenheap_pages(unsigned int order)
   14.73 +void *alloc_xenheap_pages(unsigned int order)
   14.74  {
   14.75      unsigned long flags;
   14.76      struct pfn_info *pg;
   14.77 @@ -446,22 +443,22 @@ unsigned long alloc_xenheap_pages(unsign
   14.78          pg[i].u.inuse.type_info = 0;
   14.79      }
   14.80  
   14.81 -    return (unsigned long)page_to_virt(pg);
   14.82 +    return page_to_virt(pg);
   14.83  
   14.84   no_memory:
   14.85      printk("Cannot handle page request order %d!\n", order);
   14.86 -    return 0;
   14.87 +    return NULL;
   14.88  }
   14.89  
   14.90  
   14.91 -void free_xenheap_pages(unsigned long p, unsigned int order)
   14.92 +void free_xenheap_pages(void *v, unsigned int order)
   14.93  {
   14.94      unsigned long flags;
   14.95  
   14.96 -    memguard_guard_range((void *)p, 1 << (order + PAGE_SHIFT));    
   14.97 +    memguard_guard_range(v, 1 << (order + PAGE_SHIFT));    
   14.98  
   14.99      local_irq_save(flags);
  14.100 -    free_heap_pages(MEMZONE_XEN, virt_to_page(p), order);
  14.101 +    free_heap_pages(MEMZONE_XEN, virt_to_page(v), order);
  14.102      local_irq_restore(flags);
  14.103  }
  14.104  
  14.105 @@ -471,7 +468,7 @@ void free_xenheap_pages(unsigned long p,
  14.106   * DOMAIN-HEAP SUB-ALLOCATOR
  14.107   */
  14.108  
  14.109 -void init_domheap_pages(unsigned long ps, unsigned long pe)
  14.110 +void init_domheap_pages(physaddr_t ps, physaddr_t pe)
  14.111  {
  14.112      ASSERT(!in_irq());
  14.113  
    15.1 --- a/xen/common/trace.c	Thu Jun 09 14:40:39 2005 +0000
    15.2 +++ b/xen/common/trace.c	Thu Jun 09 16:08:20 2005 +0000
    15.3 @@ -68,7 +68,7 @@ void init_trace_bufs(void)
    15.4      nr_pages = num_online_cpus() * opt_tbuf_size;
    15.5      order    = get_order(nr_pages * PAGE_SIZE);
    15.6      
    15.7 -    if ( (rawbuf = (char *)alloc_xenheap_pages(order)) == NULL )
    15.8 +    if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
    15.9      {
   15.10          printk("Xen trace buffers: memory allocation failed\n");
   15.11          return;
    16.1 --- a/xen/common/xmalloc.c	Thu Jun 09 14:40:39 2005 +0000
    16.2 +++ b/xen/common/xmalloc.c	Thu Jun 09 16:08:20 2005 +0000
    16.3 @@ -71,7 +71,7 @@ static void *xmalloc_new_page(size_t siz
    16.4      struct xmalloc_hdr *hdr;
    16.5      unsigned long flags;
    16.6  
    16.7 -    hdr = (struct xmalloc_hdr *)alloc_xenheap_pages(0);
    16.8 +    hdr = alloc_xenheap_page();
    16.9      if ( hdr == NULL )
   16.10          return NULL;
   16.11  
   16.12 @@ -88,7 +88,7 @@ static void *xmalloc_whole_pages(size_t 
   16.13      struct xmalloc_hdr *hdr;
   16.14      unsigned int pageorder = get_order(size);
   16.15  
   16.16 -    hdr = (struct xmalloc_hdr *)alloc_xenheap_pages(pageorder);
   16.17 +    hdr = alloc_xenheap_pages(pageorder);
   16.18      if ( hdr == NULL )
   16.19          return NULL;
   16.20  
   16.21 @@ -157,7 +157,7 @@ void xfree(const void *p)
   16.22      /* Big allocs free directly. */
   16.23      if ( hdr->size >= PAGE_SIZE )
   16.24      {
   16.25 -        free_xenheap_pages((unsigned long)hdr, get_order(hdr->size));
   16.26 +        free_xenheap_pages(hdr, get_order(hdr->size));
   16.27          return;
   16.28      }
   16.29  
   16.30 @@ -192,7 +192,7 @@ void xfree(const void *p)
   16.31      if ( hdr->size == PAGE_SIZE )
   16.32      {
   16.33          BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0);
   16.34 -        free_xenheap_pages((unsigned long)hdr, 0);
   16.35 +        free_xenheap_pages(hdr, 0);
   16.36      }
   16.37      else
   16.38      {
    17.1 --- a/xen/drivers/char/console.c	Thu Jun 09 14:40:39 2005 +0000
    17.2 +++ b/xen/drivers/char/console.c	Thu Jun 09 16:08:20 2005 +0000
    17.3 @@ -628,7 +628,7 @@ static int __init debugtrace_init(void)
    17.4          return 0;
    17.5  
    17.6      order = get_order(bytes);
    17.7 -    debugtrace_buf = (char *)alloc_xenheap_pages(order);
    17.8 +    debugtrace_buf = alloc_xenheap_pages(order);
    17.9      ASSERT(debugtrace_buf != NULL);
   17.10  
   17.11      memset(debugtrace_buf, '\0', bytes);
    18.1 --- a/xen/drivers/char/serial.c	Thu Jun 09 14:40:39 2005 +0000
    18.2 +++ b/xen/drivers/char/serial.c	Thu Jun 09 16:08:20 2005 +0000
    18.3 @@ -363,7 +363,7 @@ void serial_async_transmit(struct serial
    18.4  {
    18.5      BUG_ON(!port->driver->tx_empty);
    18.6      if ( !port->txbuf )
    18.7 -        port->txbuf = (char *)alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ));
    18.8 +        port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ));
    18.9  }
   18.10  
   18.11  /*
    19.1 --- a/xen/include/asm-x86/mm.h	Thu Jun 09 14:40:39 2005 +0000
    19.2 +++ b/xen/include/asm-x86/mm.h	Thu Jun 09 16:08:20 2005 +0000
    19.3 @@ -141,7 +141,6 @@ static inline u32 pickle_domptr(struct d
    19.4      } while ( 0 )
    19.5  
    19.6  extern struct pfn_info *frame_table;
    19.7 -extern unsigned long frame_table_size;
    19.8  extern unsigned long max_page;
    19.9  void init_frametable(void);
   19.10  
    20.1 --- a/xen/include/asm-x86/page.h	Thu Jun 09 14:40:39 2005 +0000
    20.2 +++ b/xen/include/asm-x86/page.h	Thu Jun 09 16:08:20 2005 +0000
    20.3 @@ -7,7 +7,7 @@
    20.4  #else
    20.5  #define PAGE_SIZE           (1 << PAGE_SHIFT)
    20.6  #endif
    20.7 -#define PAGE_MASK           (~(PAGE_SIZE-1))
    20.8 +#define PAGE_MASK           (~(intpte_t)(PAGE_SIZE-1))
    20.9  #define PAGE_FLAG_MASK      (~0U)
   20.10  
   20.11  #ifndef __ASSEMBLY__
    21.1 --- a/xen/include/asm-x86/x86_32/page-2level.h	Thu Jun 09 14:40:39 2005 +0000
    21.2 +++ b/xen/include/asm-x86/x86_32/page-2level.h	Thu Jun 09 16:08:20 2005 +0000
    21.3 @@ -48,7 +48,7 @@ typedef l2_pgentry_t root_pgentry_t;
    21.4  
    21.5  /* Extract flags into 12-bit integer, or turn 12-bit flags into a pte mask. */
    21.6  #define get_pte_flags(x) ((int)(x) & 0xFFF)
    21.7 -#define put_pte_flags(x) ((intpte_t)(x))
    21.8 +#define put_pte_flags(x) ((intpte_t)((x) & 0xFFF))
    21.9  
   21.10  #define L1_DISALLOW_MASK (0xFFFFF180U) /* PAT/GLOBAL */
   21.11  #define L2_DISALLOW_MASK (0xFFFFF180U) /* PSE/GLOBAL */
    22.1 --- a/xen/include/xen/mm.h	Thu Jun 09 14:40:39 2005 +0000
    22.2 +++ b/xen/include/xen/mm.h	Thu Jun 09 16:08:20 2005 +0000
    22.3 @@ -3,6 +3,7 @@
    22.4  #define __XEN_MM_H__
    22.5  
    22.6  #include <xen/config.h>
    22.7 +#include <xen/types.h>
    22.8  #include <xen/list.h>
    22.9  #include <xen/spinlock.h>
   22.10  
   22.11 @@ -10,9 +11,9 @@ struct domain;
   22.12  struct pfn_info;
   22.13  
   22.14  /* Boot-time allocator. Turns into generic allocator after bootstrap. */
   22.15 -unsigned long init_boot_allocator(unsigned long bitmap_start);
   22.16 -void init_boot_pages(unsigned long ps, unsigned long pe);
   22.17 -unsigned long alloc_boot_pages(unsigned long size, unsigned long align);
   22.18 +physaddr_t init_boot_allocator(physaddr_t bitmap_start);
   22.19 +void init_boot_pages(physaddr_t ps, physaddr_t pe);
   22.20 +unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
   22.21  void end_boot_allocator(void);
   22.22  
   22.23  /* Generic allocator. These functions are *not* interrupt-safe. */
   22.24 @@ -24,19 +25,19 @@ void free_heap_pages(
   22.25  void scrub_heap_pages(void);
   22.26  
   22.27  /* Xen suballocator. These functions are interrupt-safe. */
   22.28 -void init_xenheap_pages(unsigned long ps, unsigned long pe);
   22.29 -unsigned long alloc_xenheap_pages(unsigned int order);
   22.30 -void free_xenheap_pages(unsigned long p, unsigned int order);
   22.31 +void init_xenheap_pages(physaddr_t ps, physaddr_t pe);
   22.32 +void *alloc_xenheap_pages(unsigned int order);
   22.33 +void free_xenheap_pages(void *v, unsigned int order);
   22.34  #define alloc_xenheap_page() (alloc_xenheap_pages(0))
   22.35 -#define free_xenheap_page(_p) (free_xenheap_pages(_p,0))
   22.36 +#define free_xenheap_page(v) (free_xenheap_pages(v,0))
   22.37  
   22.38  /* Domain suballocator. These functions are *not* interrupt-safe.*/
   22.39 -void init_domheap_pages(unsigned long ps, unsigned long pe);
   22.40 +void init_domheap_pages(physaddr_t ps, physaddr_t pe);
   22.41  struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order);
   22.42  void free_domheap_pages(struct pfn_info *pg, unsigned int order);
   22.43  unsigned long avail_domheap_pages(void);
   22.44 -#define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0))
   22.45 -#define free_domheap_page(_p) (free_domheap_pages(_p,0))
   22.46 +#define alloc_domheap_page(d) (alloc_domheap_pages(d,0))
   22.47 +#define free_domheap_page(p)  (free_domheap_pages(p,0))
   22.48  
   22.49  /* Automatic page scrubbing for dead domains. */
   22.50  extern struct list_head page_scrub_list;