ia64/xen-unstable

changeset 4282:267e5b0b3880

bitkeeper revision 1.1236.25.32 (4241ed59XAbItk7cnA_BXQ0SPQdMHA)

switch back to Xen memory allocator code
author djm@djmnc4000.(none)
date Wed Mar 23 22:27:37 2005 +0000 (2005-03-23)
parents 2f89618be8d4
children c76dc28fbdeb
files .rootkeys xen/arch/ia64/Makefile xen/arch/ia64/domain.c xen/arch/ia64/irq.c xen/arch/ia64/mm_init.c xen/arch/ia64/patch/linux-2.6.7/mm_contig.c xen/arch/ia64/patch/linux-2.6.7/page.h xen/arch/ia64/patch/linux-2.6.7/pgalloc.h xen/arch/ia64/patch/linux-2.6.7/setup.c xen/arch/ia64/patch/linux-2.6.7/swiotlb.c xen/arch/ia64/smp.c xen/arch/ia64/tools/mkbuildtree xen/arch/ia64/vhpt.c xen/arch/ia64/xenmisc.c xen/arch/ia64/xensetup.c xen/common/Makefile xen/include/asm-ia64/config.h xen/include/asm-ia64/domain_page.h xen/include/asm-ia64/flushtlb.h xen/include/asm-ia64/mm.h
line diff
     1.1 --- a/.rootkeys	Fri Mar 18 07:02:50 2005 +0000
     1.2 +++ b/.rootkeys	Wed Mar 23 22:27:37 2005 +0000
     1.3 @@ -932,11 +932,13 @@ 421098b53IVBoQPcDjFciZy86YEhRQ xen/arch/
     1.4  421098b5pZw41QuBTvhjvSol6aAHDw xen/arch/ia64/patch/linux-2.6.7/mmzone.h
     1.5  421098b5B_dClZDGuPYeY3IXo8Hlbw xen/arch/ia64/patch/linux-2.6.7/page.h
     1.6  421098b5saClfxPj36l47H9Um7h1Fw xen/arch/ia64/patch/linux-2.6.7/page_alloc.c
     1.7 +4241ed05l9ZdG7Aj0tygIxIwPRXhog xen/arch/ia64/patch/linux-2.6.7/pgalloc.h
     1.8  421098b5OkmcjMBq8gxs7ZrTa4Ao6g xen/arch/ia64/patch/linux-2.6.7/processor.h
     1.9  421098b51RLB6jWr6rIlpB2SNObxZg xen/arch/ia64/patch/linux-2.6.7/sal.h
    1.10  421098b5WFeRnwGtZnHkSvHVzA4blg xen/arch/ia64/patch/linux-2.6.7/setup.c
    1.11  421098b5Jm2i8abzb0mpT6mlEiKZDg xen/arch/ia64/patch/linux-2.6.7/slab.c
    1.12  421098b5w6MBnluEpQJAWDTBFrbWSQ xen/arch/ia64/patch/linux-2.6.7/slab.h
    1.13 +4241eb584dcZqssR_Uuz2-PgMJXZ5Q xen/arch/ia64/patch/linux-2.6.7/swiotlb.c
    1.14  421098b5Cg7nbIXm3RhUF-uG3SKaUA xen/arch/ia64/patch/linux-2.6.7/system.h
    1.15  421098b5XrkDYW_Nd9lg5CDgNzHLmg xen/arch/ia64/patch/linux-2.6.7/time.c
    1.16  421098b5_kFbvZIIPM3bdCES1Ocqnw xen/arch/ia64/patch/linux-2.6.7/tlb.c
    1.17 @@ -1099,6 +1101,8 @@ 421098b6Y3xqcv873Gvg1rQ5CChfFw xen/inclu
    1.18  421098b6ZcIrn_gdqjUtdJyCE0YkZQ xen/include/asm-ia64/debugger.h
    1.19  421098b6z0zSuW1rcSJK1gR8RUi-fw xen/include/asm-ia64/dom_fw.h
    1.20  421098b6Nn0I7hGB8Mkd1Cis0KMkhA xen/include/asm-ia64/domain.h
    1.21 +4241e879ry316Y_teC18DuK7mGKaQw xen/include/asm-ia64/domain_page.h
    1.22 +4241e880hAyo_dk0PPDYj3LsMIvf-Q xen/include/asm-ia64/flushtlb.h
    1.23  421098b6X3Fs2yht42TE2ufgKqt2Fw xen/include/asm-ia64/ia64_int.h
    1.24  421098b7psFAn8kbeR-vcRCdc860Vw xen/include/asm-ia64/init.h
    1.25  421098b7XC1A5PhA-lrU9pIO3sSSmA xen/include/asm-ia64/mm.h
     2.1 --- a/xen/arch/ia64/Makefile	Fri Mar 18 07:02:50 2005 +0000
     2.2 +++ b/xen/arch/ia64/Makefile	Wed Mar 23 22:27:37 2005 +0000
     2.3 @@ -6,7 +6,7 @@ OBJS = xensetup.o setup.o time.o irq.o i
     2.4  	xenmisc.o pdb-stub.o acpi.o hypercall.o \
     2.5  	machvec.o dom0_ops.o domain.o \
     2.6  	idle0_task.o pal.o hpsim.o efi.o efi_stub.o ivt.o mm_contig.o \
     2.7 -	mm_bootmem.o sal.o cmdline.o mm_init.o tlb.o page_alloc.o slab.o \
     2.8 +	sal.o cmdline.o mm_init.o tlb.o \
     2.9  	extable.o linuxextable.o \
    2.10  	regionreg.o entry.o unaligned.o privop.o vcpu.o \
    2.11  	irq_ia64.o irq_lsapic.o hpsim_irq.o vhpt.o xenasm.o dom_fw.o
    2.12 @@ -32,9 +32,22 @@ default: $(OBJS) head.o ia64lib.o xen.ld
    2.13  	$(OBJCOPY) -R .note -R .comment -S $(TARGET)-syms $(TARGET)
    2.14  #	$(BASEDIR)/tools/elf-reloc $(MONITOR_BASE) $(LOAD_BASE) $(TARGET)
    2.15  
    2.16 -asm-offsets.s: asm-offsets.c
    2.17 +asm-offsets.s: asm-offsets.c $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
    2.18  	$(CC) $(CFLAGS) -S -o $@ $<
    2.19  
    2.20 +$(BASEDIR)/include/asm-ia64/.offsets.h.stamp:
    2.21 +# Need such symbol link to make linux headers available
    2.22 +	[ -e $(BASEDIR)/include/linux ] \
    2.23 +	 || ln -s $(BASEDIR)/include/xen $(BASEDIR)/include/linux
    2.24 +	[ -e $(BASEDIR)/include/asm-ia64/xen ] \
    2.25 +	 || ln -s $(BASEDIR)/include/asm-ia64/linux $(BASEDIR)/include/asm-ia64/xen
    2.26 +# Solve circular reference on asm-offsets.h
    2.27 +	[ -f $(BASEDIR)/include/asm-ia64/asm-offsets.h ] \
    2.28 +	 || echo "#define IA64_TASK_SIZE 0" > $(BASEDIR)/include/asm-ia64/asm-offsets.h
    2.29 +#Bad hack. Force asm-offsets.h out-of-date
    2.30 +	 sleep 1
    2.31 +	 touch $@
    2.32 +
    2.33  # I'm sure a Makefile wizard would know a better way to do this
    2.34  xen.lds.s: xen.lds.S
    2.35  	$(CC) -E $(CPPFLAGS) -P -DXEN -D__ASSEMBLY__ \
    2.36 @@ -44,7 +57,7 @@ ia64lib.o:
    2.37  	$(MAKE) -C lib && cp lib/ia64lib.o .
    2.38  
    2.39  clean:
    2.40 -	rm -f *.o *~ core  xen.lds.s
    2.41 +	rm -f *.o *~ core  xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
    2.42  	$(MAKE) -C lib clean
    2.43  
    2.44  # setup.o contains bits of compile.h so it must be blown away
     3.1 --- a/xen/arch/ia64/domain.c	Fri Mar 18 07:02:50 2005 +0000
     3.2 +++ b/xen/arch/ia64/domain.c	Wed Mar 23 22:27:37 2005 +0000
     3.3 @@ -39,8 +39,6 @@ unsigned long dom0_size = 512*1024*1024;
     3.4  //FIXME: alignment should be 256MB, lest Linux use a 256MB page size
     3.5  unsigned long dom0_align = 64*1024*1024;
     3.6  
     3.7 -extern kmem_cache_t *domain_struct_cachep;
     3.8 -
     3.9  // initialized by arch/ia64/setup.c:find_initrd()
    3.10  unsigned long initrd_start = 0, initrd_end = 0;
    3.11  
    3.12 @@ -136,22 +134,24 @@ void startup_cpu_idle_loop(void)
    3.13  
    3.14  struct domain *arch_alloc_domain_struct(void)
    3.15  {
    3.16 -	return xmem_cache_alloc(domain_struct_cachep);
    3.17 +	return xmalloc(struct domain);
    3.18  }
    3.19  
    3.20  void arch_free_domain_struct(struct domain *d)
    3.21  {
    3.22 -	xmem_cache_free(domain_struct_cachep,d);
    3.23 +	xfree(d);
    3.24  }
    3.25  
    3.26  struct exec_domain *arch_alloc_exec_domain_struct(void)
    3.27  {
    3.28 -	return alloc_task_struct();
    3.29 +	/* Per-vp stack is used here. So we need keep exec_domain
    3.30 +	 * same page as per-vp stack */
    3.31 +	return alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER);
    3.32  }
    3.33  
    3.34  void arch_free_exec_domain_struct(struct exec_domain *ed)
    3.35  {
    3.36 -	free_task_struct(ed);
    3.37 +	free_xenheap_pages(ed, KERNEL_STACK_SIZE_ORDER);
    3.38  }
    3.39  
    3.40  void arch_do_createdomain(struct exec_domain *ed)
    3.41 @@ -307,7 +307,7 @@ extern unsigned long vhpt_paddr, vhpt_pe
    3.42  		if (d == dom0) p = map_new_domain0_page(mpaddr);
    3.43  		else
    3.44  #endif
    3.45 -			p = alloc_page(GFP_KERNEL);
    3.46 +			p = alloc_domheap_page(d);
    3.47  		if (unlikely(!p)) {
    3.48  printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
    3.49  			return(p);
    3.50 @@ -509,7 +509,13 @@ void alloc_dom0(void)
    3.51  	dom0_size = 128*1024*1024; //FIXME: Should be configurable
    3.52  	}
    3.53  	printf("alloc_dom0: starting (initializing %d MB...)\n",dom0_size/(1024*1024));
    3.54 -	dom0_start = __alloc_bootmem(dom0_size,dom0_align,__pa(MAX_DMA_ADDRESS));
    3.55 + 
    3.56 +     /* FIXME: The first trunk (say 256M) should always be assigned to
    3.57 +      * Dom0, since Dom0's physical == machine address for DMA purpose.
    3.58 +      * Some old version linux, like 2.4, assumes physical memory existing
    3.59 +      * in 2nd 64M space.
    3.60 +      */
    3.61 +     dom0_start = alloc_boot_pages(dom0_size,dom0_align);
    3.62  	if (!dom0_start) {
    3.63  	printf("construct_dom0: can't allocate contiguous memory size=%p\n",
    3.64  		dom0_size);
    3.65 @@ -611,7 +617,7 @@ int construct_dom0(struct domain *d,
    3.66  
    3.67  	// prepare domain0 pagetable (maps METAphysical to physical)
    3.68  	// following is roughly mm_init() in linux/kernel/fork.c
    3.69 -	d->arch.mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
    3.70 +	d->arch.mm = xmalloc(struct mm_struct);
    3.71  	if (unlikely(!d->arch.mm)) {
    3.72  	    	printk("Can't allocate mm_struct for domain0\n");
    3.73  	    	return -ENOMEM;
    3.74 @@ -721,7 +727,7 @@ int construct_domN(struct domain *d,
    3.75  	printk("parsedomainelfimage returns %d\n",rc);
    3.76  	if ( rc != 0 ) return rc;
    3.77  
    3.78 -	d->arch.mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
    3.79 +	d->arch.mm = xmalloc(struct mm_struct);
    3.80  	if (unlikely(!d->arch.mm)) {
    3.81  	    	printk("Can't allocate mm_struct for domain %d\n",d->id);
    3.82  	    	return -ENOMEM;
     4.1 --- a/xen/arch/ia64/irq.c	Fri Mar 18 07:02:50 2005 +0000
     4.2 +++ b/xen/arch/ia64/irq.c	Wed Mar 23 22:27:37 2005 +0000
     4.3 @@ -649,8 +649,7 @@ int request_irq(unsigned int irq,
     4.4  	if (!handler)
     4.5  		return -EINVAL;
     4.6  
     4.7 -	action = (struct irqaction *)
     4.8 -			kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
     4.9 +	action = xmalloc(struct irqaction);
    4.10  	if (!action)
    4.11  		return -ENOMEM;
    4.12  
    4.13 @@ -667,7 +666,7 @@ int request_irq(unsigned int irq,
    4.14  
    4.15  	retval = setup_irq(irq, action);
    4.16  	if (retval)
    4.17 -		kfree(action);
    4.18 +		xfree(action);
    4.19  	return retval;
    4.20  }
    4.21  
    4.22 @@ -730,7 +729,7 @@ void free_irq(unsigned int irq, void *de
    4.23  
    4.24  			/* Wait to make sure it's not being used on another CPU */
    4.25  			synchronize_irq(irq);
    4.26 -			kfree(action);
    4.27 +			xfree(action);
    4.28  			return;
    4.29  		}
    4.30  		printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
    4.31 @@ -1389,7 +1388,7 @@ int pirq_guest_bind(struct exec_domain *
    4.32              goto out;
    4.33          }
    4.34  
    4.35 -        action = xmalloc(sizeof(irq_guest_action_t));
    4.36 +        action = xmalloc(irq_guest_action_t);
    4.37          if ( (desc->action = (struct irqaction *)action) == NULL )
    4.38          {
    4.39              DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
     5.1 --- a/xen/arch/ia64/mm_init.c	Fri Mar 18 07:02:50 2005 +0000
     5.2 +++ b/xen/arch/ia64/mm_init.c	Wed Mar 23 22:27:37 2005 +0000
     5.3 @@ -163,24 +163,6 @@ out:
     5.4  #endif /* XEN */
     5.5  
     5.6  void
     5.7 -check_pgt_cache (void)
     5.8 -{
     5.9 -	int low, high;
    5.10 -
    5.11 -	low = pgt_cache_water[0];
    5.12 -	high = pgt_cache_water[1];
    5.13 -
    5.14 -	if (pgtable_cache_size > (u64) high) {
    5.15 -		do {
    5.16 -			if (pgd_quicklist)
    5.17 -				free_page((unsigned long)pgd_alloc_one_fast(0));
    5.18 -			if (pmd_quicklist)
    5.19 -				free_page((unsigned long)pmd_alloc_one_fast(0, 0));
    5.20 -		} while (pgtable_cache_size > (u64) low);
    5.21 -	}
    5.22 -}
    5.23 -
    5.24 -void
    5.25  update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
    5.26  {
    5.27  	unsigned long addr;
    5.28 @@ -261,132 +243,9 @@ printf("ia64_init_addr_space: called, no
    5.29  #endif
    5.30  }
    5.31  
    5.32 -void
    5.33 -free_initmem (void)
    5.34 -{
    5.35 -	unsigned long addr, eaddr;
    5.36 -
    5.37 -	addr = (unsigned long) ia64_imva(__init_begin);
    5.38 -	eaddr = (unsigned long) ia64_imva(__init_end);
    5.39 -	while (addr < eaddr) {
    5.40 -		ClearPageReserved(virt_to_page(addr));
    5.41 -		set_page_count(virt_to_page(addr), 1);
    5.42 -		free_page(addr);
    5.43 -		++totalram_pages;
    5.44 -		addr += PAGE_SIZE;
    5.45 -	}
    5.46 -	printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
    5.47 -	       (__init_end - __init_begin) >> 10);
    5.48 -}
    5.49 -
    5.50 -void
    5.51 -free_initrd_mem (unsigned long start, unsigned long end)
    5.52 -{
    5.53 -	struct page *page;
    5.54 -	/*
    5.55 -	 * EFI uses 4KB pages while the kernel can use 4KB  or bigger.
    5.56 -	 * Thus EFI and the kernel may have different page sizes. It is
    5.57 -	 * therefore possible to have the initrd share the same page as
    5.58 -	 * the end of the kernel (given current setup).
    5.59 -	 *
    5.60 -	 * To avoid freeing/using the wrong page (kernel sized) we:
    5.61 -	 *	- align up the beginning of initrd
    5.62 -	 *	- align down the end of initrd
    5.63 -	 *
    5.64 -	 *  |             |
    5.65 -	 *  |=============| a000
    5.66 -	 *  |             |
    5.67 -	 *  |             |
    5.68 -	 *  |             | 9000
    5.69 -	 *  |/////////////|
    5.70 -	 *  |/////////////|
    5.71 -	 *  |=============| 8000
    5.72 -	 *  |///INITRD////|
    5.73 -	 *  |/////////////|
    5.74 -	 *  |/////////////| 7000
    5.75 -	 *  |             |
    5.76 -	 *  |KKKKKKKKKKKKK|
    5.77 -	 *  |=============| 6000
    5.78 -	 *  |KKKKKKKKKKKKK|
    5.79 -	 *  |KKKKKKKKKKKKK|
    5.80 -	 *  K=kernel using 8KB pages
    5.81 -	 *
    5.82 -	 * In this example, we must free page 8000 ONLY. So we must align up
    5.83 -	 * initrd_start and keep initrd_end as is.
    5.84 -	 */
    5.85 -	start = PAGE_ALIGN(start);
    5.86 -	end = end & PAGE_MASK;
    5.87 -
    5.88 -	if (start < end)
    5.89 -		printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
    5.90 -
    5.91 -	for (; start < end; start += PAGE_SIZE) {
    5.92 -		if (!virt_addr_valid(start))
    5.93 -			continue;
    5.94 -		page = virt_to_page(start);
    5.95 -		ClearPageReserved(page);
    5.96 -		set_page_count(page, 1);
    5.97 -		free_page(start);
    5.98 -		++totalram_pages;
    5.99 -	}
   5.100 -}
   5.101 -
   5.102 -/*
   5.103 - * This installs a clean page in the kernel's page table.
   5.104 - */
   5.105 -struct page *
   5.106 -put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
   5.107 -{
   5.108 -	pgd_t *pgd;
   5.109 -	pmd_t *pmd;
   5.110 -	pte_t *pte;
   5.111 -
   5.112 -	if (!PageReserved(page))
   5.113 -		printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
   5.114 -		       page_address(page));
   5.115 -
   5.116 -	pgd = pgd_offset_k(address);		/* note: this is NOT pgd_offset()! */
   5.117 -
   5.118 -	spin_lock(&init_mm.page_table_lock);
   5.119 -	{
   5.120 -		pmd = pmd_alloc(&init_mm, pgd, address);
   5.121 -		if (!pmd)
   5.122 -			goto out;
   5.123 -		pte = pte_alloc_map(&init_mm, pmd, address);
   5.124 -		if (!pte)
   5.125 -			goto out;
   5.126 -		if (!pte_none(*pte)) {
   5.127 -			pte_unmap(pte);
   5.128 -			goto out;
   5.129 -		}
   5.130 -		set_pte(pte, mk_pte(page, pgprot));
   5.131 -		pte_unmap(pte);
   5.132 -	}
   5.133 -  out:	spin_unlock(&init_mm.page_table_lock);
   5.134 -	/* no need for flush_tlb */
   5.135 -	return page;
   5.136 -}
   5.137 -
   5.138 -static void
   5.139  setup_gate (void)
   5.140  {
   5.141 -#ifndef XEN
   5.142 -	struct page *page;
   5.143 -
   5.144 -	/*
   5.145 -	 * Map the gate page twice: once read-only to export the ELF headers etc. and once
   5.146 -	 * execute-only page to enable privilege-promotion via "epc":
   5.147 -	 */
   5.148 -	page = virt_to_page(ia64_imva(__start_gate_section));
   5.149 -	put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
   5.150 -#ifdef HAVE_BUGGY_SEGREL
   5.151 -	page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
   5.152 -	put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
   5.153 -#else
   5.154 -	put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
   5.155 -#endif
   5.156 -	ia64_patch_gate();
   5.157 -#endif
   5.158 +	printk("setup_gate not-implemented.\n");
   5.159  }
   5.160  
   5.161  void __devinit
   5.162 @@ -441,8 +300,8 @@ ia64_mmu_init (void *my_cpu_data)
   5.163  
   5.164  #ifdef XEN
   5.165  	vhpt_init();
   5.166 -	alloc_dom0();
   5.167 -#else
   5.168 +#endif
   5.169 +#if 0
   5.170  	/* place the VMLPT at the end of each page-table mapped region: */
   5.171  	pta = POW2(61) - POW2(vmlpt_bits);
   5.172  
   5.173 @@ -457,7 +316,6 @@ ia64_mmu_init (void *my_cpu_data)
   5.174  	 */
   5.175  	ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
   5.176  #endif
   5.177 -
   5.178  	ia64_tlb_init();
   5.179  
   5.180  #ifdef	CONFIG_HUGETLB_PAGE
   5.181 @@ -643,14 +501,6 @@ nolwsys_setup (char *s)
   5.182  void
   5.183  mem_init (void)
   5.184  {
   5.185 -	long reserved_pages, codesize, datasize, initsize;
   5.186 -	unsigned long num_pgt_pages;
   5.187 -	pg_data_t *pgdat;
   5.188 -	int i;
   5.189 -#ifndef XEN
   5.190 -	static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
   5.191 -#endif
   5.192 -
   5.193  #ifdef CONFIG_PCI
   5.194  	/*
   5.195  	 * This needs to be called _after_ the command line has been parsed but _before_
   5.196 @@ -660,65 +510,4 @@ mem_init (void)
   5.197  	platform_dma_init();
   5.198  #endif
   5.199  
   5.200 -#ifndef CONFIG_DISCONTIGMEM
   5.201 -	if (!mem_map)
   5.202 -		BUG();
   5.203 -	max_mapnr = max_low_pfn;
   5.204 -#endif
   5.205 -
   5.206 -	high_memory = __va(max_low_pfn * PAGE_SIZE);
   5.207 -
   5.208 -#ifndef XEN
   5.209 -	kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
   5.210 -	kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
   5.211 -	kclist_add(&kcore_kernel, _stext, _end - _stext);
   5.212 -#endif
   5.213 -
   5.214 -	for_each_pgdat(pgdat)
   5.215 -		totalram_pages += free_all_bootmem_node(pgdat);
   5.216 -
   5.217 -	reserved_pages = 0;
   5.218 -	efi_memmap_walk(count_reserved_pages, &reserved_pages);
   5.219 -
   5.220 -	codesize =  (unsigned long) _etext - (unsigned long) _stext;
   5.221 -	datasize =  (unsigned long) _edata - (unsigned long) _etext;
   5.222 -	initsize =  (unsigned long) __init_end - (unsigned long) __init_begin;
   5.223 -
   5.224 -	printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
   5.225 -	       "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
   5.226 -	       num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
   5.227 -	       reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
   5.228 -
   5.229 -	/*
   5.230 -	 * Allow for enough (cached) page table pages so that we can map the entire memory
   5.231 -	 * at least once.  Each task also needs a couple of page tables pages, so add in a
   5.232 -	 * fudge factor for that (don't use "threads-max" here; that would be wrong!).
   5.233 -	 * Don't allow the cache to be more than 10% of total memory, though.
   5.234 -	 */
   5.235 -#	define NUM_TASKS	500	/* typical number of tasks */
   5.236 -	num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
   5.237 -	if (num_pgt_pages > nr_free_pages() / 10)
   5.238 -		num_pgt_pages = nr_free_pages() / 10;
   5.239 -	if (num_pgt_pages > (u64) pgt_cache_water[1])
   5.240 -		pgt_cache_water[1] = num_pgt_pages;
   5.241 -
   5.242 -#ifndef XEN
   5.243 -	/*
   5.244 -	 * For fsyscall entrpoints with no light-weight handler, use the ordinary
   5.245 -	 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
   5.246 -	 * code can tell them apart.
   5.247 -	 */
   5.248 -	for (i = 0; i < NR_syscalls; ++i) {
   5.249 -		extern unsigned long fsyscall_table[NR_syscalls];
   5.250 -		extern unsigned long sys_call_table[NR_syscalls];
   5.251 -
   5.252 -		if (!fsyscall_table[i] || nolwsys)
   5.253 -			fsyscall_table[i] = sys_call_table[i] | 1;
   5.254 -	}
   5.255 -#endif
   5.256 -	setup_gate();	/* setup gate pages before we free up boot memory... */
   5.257 -
   5.258 -#ifdef CONFIG_IA32_SUPPORT
   5.259 -	ia32_boot_gdt_init();
   5.260 -#endif
   5.261  }
     6.1 --- a/xen/arch/ia64/patch/linux-2.6.7/mm_contig.c	Fri Mar 18 07:02:50 2005 +0000
     6.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/mm_contig.c	Wed Mar 23 22:27:37 2005 +0000
     6.3 @@ -1,6 +1,6 @@
     6.4 ---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/mm/contig.c	2004-06-15 23:19:12.000000000 -0600
     6.5 -+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/mm_contig.c	2004-10-05 18:09:45.000000000 -0600
     6.6 -@@ -15,11 +15,23 @@
     6.7 +--- ../../linux-2.6.7/arch/ia64/mm/contig.c	2004-06-15 23:19:12.000000000 -0600
     6.8 ++++ arch/ia64/mm_contig.c	2005-03-23 14:54:06.000000000 -0700
     6.9 +@@ -15,11 +15,21 @@
    6.10    * memory.
    6.11    */
    6.12   #include <linux/config.h>
    6.13 @@ -14,8 +14,6 @@
    6.14   
    6.15  +#ifdef XEN
    6.16  +#undef reserve_bootmem
    6.17 -+unsigned long max_mapnr;
    6.18 -+unsigned long num_physpages;
    6.19  +extern struct page *zero_page_memmap_ptr;
    6.20  +struct page *mem_map;
    6.21  +#define MAX_DMA_ADDRESS ~0UL	// FIXME???
    6.22 @@ -24,7 +22,39 @@
    6.23   #include <asm/meminit.h>
    6.24   #include <asm/pgalloc.h>
    6.25   #include <asm/pgtable.h>
    6.26 -@@ -80,6 +92,9 @@
    6.27 +@@ -37,30 +47,7 @@
    6.28 + void
    6.29 + show_mem (void)
    6.30 + {
    6.31 +-	int i, total = 0, reserved = 0;
    6.32 +-	int shared = 0, cached = 0;
    6.33 +-
    6.34 +-	printk("Mem-info:\n");
    6.35 +-	show_free_areas();
    6.36 +-
    6.37 +-	printk("Free swap:       %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
    6.38 +-	i = max_mapnr;
    6.39 +-	while (i-- > 0) {
    6.40 +-		if (!pfn_valid(i))
    6.41 +-			continue;
    6.42 +-		total++;
    6.43 +-		if (PageReserved(mem_map+i))
    6.44 +-			reserved++;
    6.45 +-		else if (PageSwapCache(mem_map+i))
    6.46 +-			cached++;
    6.47 +-		else if (page_count(mem_map + i))
    6.48 +-			shared += page_count(mem_map + i) - 1;
    6.49 +-	}
    6.50 +-	printk("%d pages of RAM\n", total);
    6.51 +-	printk("%d reserved pages\n", reserved);
    6.52 +-	printk("%d pages shared\n", shared);
    6.53 +-	printk("%d pages swap cached\n", cached);
    6.54 +-	printk("%ld pages in page table cache\n", pgtable_cache_size);
    6.55 ++	printk("Dummy show_mem\n");
    6.56 + }
    6.57 + 
    6.58 + /* physical address where the bootmem map is located */
    6.59 +@@ -80,6 +67,9 @@
    6.60   {
    6.61   	unsigned long *max_pfnp = arg, pfn;
    6.62   
    6.63 @@ -34,58 +64,153 @@
    6.64   	pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
    6.65   	if (pfn > *max_pfnp)
    6.66   		*max_pfnp = pfn;
    6.67 -@@ -149,6 +164,9 @@
    6.68 - 	/* first find highest page frame number */
    6.69 - 	max_pfn = 0;
    6.70 - 	efi_memmap_walk(find_max_pfn, &max_pfn);
    6.71 -+#ifdef XEN
    6.72 -+//printf("find_memory: efi_memmap_walk returns max_pfn=%lx\n",max_pfn);
    6.73 -+#endif
    6.74 +@@ -133,41 +123,6 @@
    6.75 + 	return 0;
    6.76 + }
    6.77   
    6.78 - 	/* how many bytes to cover all the pages */
    6.79 - 	bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
    6.80 -@@ -242,6 +260,9 @@
    6.81 - 	efi_memmap_walk(count_pages, &num_physpages);
    6.82 +-/**
    6.83 +- * find_memory - setup memory map
    6.84 +- *
    6.85 +- * Walk the EFI memory map and find usable memory for the system, taking
    6.86 +- * into account reserved areas.
    6.87 +- */
    6.88 +-void
    6.89 +-find_memory (void)
    6.90 +-{
    6.91 +-	unsigned long bootmap_size;
    6.92 +-
    6.93 +-	reserve_memory();
    6.94 +-
    6.95 +-	/* first find highest page frame number */
    6.96 +-	max_pfn = 0;
    6.97 +-	efi_memmap_walk(find_max_pfn, &max_pfn);
    6.98 +-
    6.99 +-	/* how many bytes to cover all the pages */
   6.100 +-	bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
   6.101 +-
   6.102 +-	/* look for a location to hold the bootmap */
   6.103 +-	bootmap_start = ~0UL;
   6.104 +-	efi_memmap_walk(find_bootmap_location, &bootmap_size);
   6.105 +-	if (bootmap_start == ~0UL)
   6.106 +-		panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
   6.107 +-
   6.108 +-	bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
   6.109 +-
   6.110 +-	/* Free all available memory, then mark bootmem-map as being in use. */
   6.111 +-	efi_memmap_walk(filter_rsvd_memory, free_bootmem);
   6.112 +-	reserve_bootmem(bootmap_start, bootmap_size);
   6.113 +-
   6.114 +-	find_initrd();
   6.115 +-}
   6.116 +-
   6.117 + #ifdef CONFIG_SMP
   6.118 + /**
   6.119 +  * per_cpu_init - setup per-cpu variables
   6.120 +@@ -227,73 +182,42 @@
   6.121 + void
   6.122 + paging_init (void)
   6.123 + {
   6.124 +-	unsigned long max_dma;
   6.125 +-	unsigned long zones_size[MAX_NR_ZONES];
   6.126 +-#ifdef CONFIG_VIRTUAL_MEM_MAP
   6.127 +-	unsigned long zholes_size[MAX_NR_ZONES];
   6.128 +-	unsigned long max_gap;
   6.129 +-#endif
   6.130 +-
   6.131 +-	/* initialize mem_map[] */
   6.132 ++	struct pfn_info *pg;
   6.133 ++	/* Allocate and map the machine-to-phys table */
   6.134 ++	if ((pg = alloc_domheap_pages(NULL, 10)) == NULL)
   6.135 ++		panic("Not enough memory to bootstrap Xen.\n");
   6.136 ++	memset(page_to_virt(pg), 0x55, 16UL << 20);
   6.137 + 
   6.138 +-	memset(zones_size, 0, sizeof(zones_size));
   6.139 ++	/* Other mapping setup */
   6.140 + 
   6.141 +-	num_physpages = 0;
   6.142 +-	efi_memmap_walk(count_pages, &num_physpages);
   6.143   
   6.144 - 	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
   6.145 -+#ifdef XEN
   6.146 -+//printf("paging_init: num_physpages=%lx, max_dma=%lx\n",num_physpages,max_dma);
   6.147 -+#endif
   6.148 - 
   6.149 - #ifdef CONFIG_VIRTUAL_MEM_MAP
   6.150 - 	memset(zholes_size, 0, sizeof(zholes_size));
   6.151 -@@ -265,7 +286,13 @@
   6.152 - 
   6.153 - 	max_gap = 0;
   6.154 - 	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
   6.155 -+#ifdef XEN
   6.156 -+//printf("paging_init: max_gap=%lx\n",max_gap);
   6.157 +-	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
   6.158 +-
   6.159 +-#ifdef CONFIG_VIRTUAL_MEM_MAP
   6.160 +-	memset(zholes_size, 0, sizeof(zholes_size));
   6.161 +-
   6.162 +-	num_dma_physpages = 0;
   6.163 +-	efi_memmap_walk(count_dma_pages, &num_dma_physpages);
   6.164 +-
   6.165 +-	if (max_low_pfn < max_dma) {
   6.166 +-		zones_size[ZONE_DMA] = max_low_pfn;
   6.167 +-		zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
   6.168 +-	} else {
   6.169 +-		zones_size[ZONE_DMA] = max_dma;
   6.170 +-		zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
   6.171 +-		if (num_physpages > num_dma_physpages) {
   6.172 +-			zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
   6.173 +-			zholes_size[ZONE_NORMAL] =
   6.174 +-				((max_low_pfn - max_dma) -
   6.175 +-				 (num_physpages - num_dma_physpages));
   6.176 +-		}
   6.177 +-	}
   6.178 +-
   6.179 +-	max_gap = 0;
   6.180 +-	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
   6.181 +-	if (max_gap < LARGE_GAP) {
   6.182 +-		vmem_map = (struct page *) 0;
   6.183 +-		free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
   6.184 +-				    zholes_size);
   6.185 +-		mem_map = contig_page_data.node_mem_map;
   6.186 +-	} else {
   6.187 +-		unsigned long map_size;
   6.188 +-
   6.189 +-		/* allocate virtual_mem_map */
   6.190 +-
   6.191 +-		map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
   6.192 +-		vmalloc_end -= map_size;
   6.193 +-		vmem_map = (struct page *) vmalloc_end;
   6.194 +-		efi_memmap_walk(create_mem_map_page_table, 0);
   6.195 +-
   6.196 +-		free_area_init_node(0, &contig_page_data, vmem_map, zones_size,
   6.197 +-				    0, zholes_size);
   6.198 +-
   6.199 +-		mem_map = contig_page_data.node_mem_map;
   6.200 +-		printk("Virtual mem_map starts at 0x%p\n", mem_map);
   6.201 +-	}
   6.202 +-#else /* !CONFIG_VIRTUAL_MEM_MAP */
   6.203 +-	if (max_low_pfn < max_dma)
   6.204 +-		zones_size[ZONE_DMA] = max_low_pfn;
   6.205 +-	else {
   6.206 +-		zones_size[ZONE_DMA] = max_dma;
   6.207 +-		zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
   6.208 +-	}
   6.209 +-	free_area_init(zones_size);
   6.210 +-#endif /* !CONFIG_VIRTUAL_MEM_MAP */
   6.211 + 	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
   6.212 + }
   6.213 ++
   6.214 ++struct pfn_info *frame_table;
   6.215 ++unsigned long frame_table_size;
   6.216 ++unsigned long max_page;
   6.217 ++
   6.218 ++/* FIXME: postpone support to machines with big holes between physical memorys.
   6.219 ++ * Current hack allows only efi memdesc upto 4G place. (See efi.c)
   6.220 ++ */ 
   6.221 ++#ifndef CONFIG_VIRTUAL_MEM_MAP
   6.222 ++#define FT_ALIGN_SIZE	(16UL << 20)
   6.223 ++void __init init_frametable(void)
   6.224 ++{
   6.225 ++	unsigned long i, p;
   6.226 ++	frame_table_size = max_page * sizeof(struct pfn_info);
   6.227 ++	frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
   6.228 ++
   6.229 ++	/* Request continuous trunk from boot allocator, since HV
   6.230 ++	 * address is identity mapped */
   6.231 ++	p = alloc_boot_pages(frame_table_size, FT_ALIGN_SIZE);
   6.232 ++	if (p == 0)
   6.233 ++		panic("Not enough memory for frame table.\n");
   6.234 ++
   6.235 ++	frame_table = __va(p);
   6.236 ++	memset(frame_table, 0, frame_table_size);
   6.237 ++	printk("size of frame_table: %lukB\n",
   6.238 ++		frame_table_size >> 10);
   6.239 ++}
   6.240  +#endif
   6.241 - 	if (max_gap < LARGE_GAP) {
   6.242 -+#ifdef XEN
   6.243 -+//printf("paging_init: no large gap\n");
   6.244 -+#endif
   6.245 - 		vmem_map = (struct page *) 0;
   6.246 - 		free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
   6.247 - 				    zholes_size);
   6.248 -@@ -274,6 +301,9 @@
   6.249 - 		unsigned long map_size;
   6.250 - 
   6.251 - 		/* allocate virtual_mem_map */
   6.252 -+#ifdef XEN
   6.253 -+//printf("paging_init: large gap, allocating virtual_mem_map\n");
   6.254 -+#endif
   6.255 - 
   6.256 - 		map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
   6.257 - 		vmalloc_end -= map_size;
   6.258 -@@ -293,6 +323,10 @@
   6.259 - 		zones_size[ZONE_DMA] = max_dma;
   6.260 - 		zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
   6.261 - 	}
   6.262 -+#ifdef XEN
   6.263 -+//printf("paging_init: zones_size[ZONE_DMA]=%lx, zones_size[ZONE_NORMAL]=%lx, max_low_pfn=%lx\n",
   6.264 -+//zones_size[ZONE_DMA],zones_size[ZONE_NORMAL],max_low_pfn);
   6.265 -+#endif
   6.266 - 	free_area_init(zones_size);
   6.267 - #endif /* !CONFIG_VIRTUAL_MEM_MAP */
   6.268 - 	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
     7.1 --- a/xen/arch/ia64/patch/linux-2.6.7/page.h	Fri Mar 18 07:02:50 2005 +0000
     7.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/page.h	Wed Mar 23 22:27:37 2005 +0000
     7.3 @@ -1,6 +1,16 @@
     7.4 ---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/page.h	2004-06-15 23:18:58.000000000 -0600
     7.5 -+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/page.h	2004-12-17 13:47:03.000000000 -0700
     7.6 -@@ -84,7 +84,11 @@
     7.7 +--- ../../linux-2.6.7/include/asm-ia64/page.h	2004-06-15 23:18:58.000000000 -0600
     7.8 ++++ include/asm-ia64/page.h	2005-03-23 14:54:11.000000000 -0700
     7.9 +@@ -12,6 +12,9 @@
    7.10 + #include <asm/intrinsics.h>
    7.11 + #include <asm/types.h>
    7.12 + 
    7.13 ++#ifndef __ASSEMBLY__
    7.14 ++#include <asm/flushtlb.h>
    7.15 ++#endif
    7.16 + /*
    7.17 +  * PAGE_SHIFT determines the actual kernel page size.
    7.18 +  */
    7.19 +@@ -84,14 +87,22 @@
    7.20   #endif
    7.21   
    7.22   #ifndef CONFIG_DISCONTIGMEM
    7.23 @@ -8,11 +18,25 @@
    7.24  +#define pfn_valid(pfn)		(0)
    7.25  +#else
    7.26   #define pfn_valid(pfn)		(((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
    7.27 +-#define page_to_pfn(page)	((unsigned long) (page - mem_map))
    7.28 +-#define pfn_to_page(pfn)	(mem_map + (pfn))
    7.29  +#endif
    7.30 - #define page_to_pfn(page)	((unsigned long) (page - mem_map))
    7.31 - #define pfn_to_page(pfn)	(mem_map + (pfn))
    7.32   #endif /* CONFIG_DISCONTIGMEM */
    7.33 -@@ -107,8 +111,25 @@
    7.34 + 
    7.35 +-#define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
    7.36 ++#define page_to_pfn(_page)  ((unsigned long)((_page) - frame_table))
    7.37 ++#define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
    7.38 ++
    7.39 ++#define page_to_phys(_page)	(page_to_pfn(_page) << PAGE_SHIFT)
    7.40 + #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
    7.41 + 
    7.42 ++#define pfn_to_page(_pfn)   	(frame_table + (_pfn))
    7.43 ++#define phys_to_page(kaddr) 	pfn_to_page(((kaddr) >> PAGE_SHIFT))
    7.44 ++
    7.45 + typedef union ia64_va {
    7.46 + 	struct {
    7.47 + 		unsigned long off : 61;		/* intra-region offset */
    7.48 +@@ -107,8 +118,25 @@
    7.49    * expressed in this way to ensure they result in a single "dep"
    7.50    * instruction.
    7.51    */
    7.52 @@ -38,7 +62,7 @@
    7.53   
    7.54   #define REGION_NUMBER(x)	({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
    7.55   #define REGION_OFFSET(x)	({ia64_va _v; _v.l = (long) (x); _v.f.off;})
    7.56 -@@ -180,11 +201,19 @@
    7.57 +@@ -180,11 +208,19 @@
    7.58   # define __pgprot(x)	(x)
    7.59   #endif /* !STRICT_MM_TYPECHECKS */
    7.60   
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/pgalloc.h	Wed Mar 23 22:27:37 2005 +0000
     8.3 @@ -0,0 +1,65 @@
     8.4 +--- ../../linux-2.6.7/include/asm-ia64/pgalloc.h	2004-06-15 23:18:54.000000000 -0600
     8.5 ++++ include/asm-ia64/pgalloc.h	2005-03-23 14:54:11.000000000 -0700
     8.6 +@@ -34,6 +34,10 @@
     8.7 + #define pmd_quicklist		(local_cpu_data->pmd_quick)
     8.8 + #define pgtable_cache_size	(local_cpu_data->pgtable_cache_sz)
     8.9 + 
    8.10 ++/* FIXME: Later 3 level page table should be over, to create 
    8.11 ++ * new interface upon xen memory allocator. To simplify first
    8.12 ++ * effort moving to xen allocator, use xenheap pages temporarily. 
    8.13 ++ */
    8.14 + static inline pgd_t*
    8.15 + pgd_alloc_one_fast (struct mm_struct *mm)
    8.16 + {
    8.17 +@@ -55,7 +59,7 @@
    8.18 + 	pgd_t *pgd = pgd_alloc_one_fast(mm);
    8.19 + 
    8.20 + 	if (unlikely(pgd == NULL)) {
    8.21 +-		pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
    8.22 ++		pgd = (pgd_t *)alloc_xenheap_page();
    8.23 + 		if (likely(pgd != NULL))
    8.24 + 			clear_page(pgd);
    8.25 + 	}
    8.26 +@@ -93,7 +97,7 @@
    8.27 + static inline pmd_t*
    8.28 + pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
    8.29 + {
    8.30 +-	pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
    8.31 ++	pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
    8.32 + 
    8.33 + 	if (likely(pmd != NULL))
    8.34 + 		clear_page(pmd);
    8.35 +@@ -125,7 +129,7 @@
    8.36 + static inline struct page *
    8.37 + pte_alloc_one (struct mm_struct *mm, unsigned long addr)
    8.38 + {
    8.39 +-	struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
    8.40 ++	struct page *pte = alloc_xenheap_page();
    8.41 + 
    8.42 + 	if (likely(pte != NULL))
    8.43 + 		clear_page(page_address(pte));
    8.44 +@@ -135,7 +139,7 @@
    8.45 + static inline pte_t *
    8.46 + pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
    8.47 + {
    8.48 +-	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
    8.49 ++	pte_t *pte = (pte_t *)alloc_xenheap_page();
    8.50 + 
    8.51 + 	if (likely(pte != NULL))
    8.52 + 		clear_page(pte);
    8.53 +@@ -145,13 +149,13 @@
    8.54 + static inline void
    8.55 + pte_free (struct page *pte)
    8.56 + {
    8.57 +-	__free_page(pte);
    8.58 ++	free_xenheap_page(pte);
    8.59 + }
    8.60 + 
    8.61 + static inline void
    8.62 + pte_free_kernel (pte_t *pte)
    8.63 + {
    8.64 +-	free_page((unsigned long) pte);
    8.65 ++	free_xenheap_page((unsigned long) pte);
    8.66 + }
    8.67 + 
    8.68 + #define __pte_free_tlb(tlb, pte)	tlb_remove_page((tlb), (pte))
     9.1 --- a/xen/arch/ia64/patch/linux-2.6.7/setup.c	Fri Mar 18 07:02:50 2005 +0000
     9.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/setup.c	Wed Mar 23 22:27:37 2005 +0000
     9.3 @@ -1,5 +1,5 @@
     9.4 ---- /home/djm/linux-2.6.7/arch/ia64/kernel/setup.c	2004-06-15 23:18:58.000000000 -0600
     9.5 -+++ arch/ia64/setup.c	2005-02-17 10:53:00.000000000 -0700
     9.6 +--- ../../linux-2.6.7/arch/ia64/kernel/setup.c	2004-06-15 23:18:58.000000000 -0600
     9.7 ++++ arch/ia64/setup.c	2005-03-23 14:54:06.000000000 -0700
     9.8  @@ -21,6 +21,9 @@
     9.9   #include <linux/init.h>
    9.10   
    9.11 @@ -58,36 +58,83 @@
    9.12   /*
    9.13    * Filter incoming memory segments based on the primitive map created from the boot
    9.14    * parameters. Segments contained in the map are removed from the memory ranges. A
    9.15 -@@ -280,23 +293,40 @@
    9.16 +@@ -128,9 +141,12 @@
    9.17 + 	for (i = 0; i < num_rsvd_regions; ++i) {
    9.18 + 		range_start = max(start, prev_start);
    9.19 + 		range_end   = min(end, rsvd_region[i].start);
    9.20 +-
    9.21 +-		if (range_start < range_end)
    9.22 +-			call_pernode_memory(__pa(range_start), range_end - range_start, func);
    9.23 ++		/* init_boot_pages requires "ps, pe" */
    9.24 ++		if (range_start < range_end) {
    9.25 ++			printk("Init boot pages: 0x%lx -> 0x%lx.\n",
    9.26 ++				__pa(range_start), __pa(range_end));
    9.27 ++			(*func)(__pa(range_start), __pa(range_end), 0);
    9.28 ++		}
    9.29 + 
    9.30 + 		/* nothing more available in this segment */
    9.31 + 		if (range_end == end) return 0;
    9.32 +@@ -187,17 +203,17 @@
    9.33 + 				+ strlen(__va(ia64_boot_param->command_line)) + 1);
    9.34 + 	n++;
    9.35 + 
    9.36 ++	/* Reserve xen image/bitmap/xen-heap */
    9.37 + 	rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
    9.38 +-	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
    9.39 ++	rsvd_region[n].end   = rsvd_region[n].start + xenheap_size;
    9.40 + 	n++;
    9.41 + 
    9.42 +-#ifdef CONFIG_BLK_DEV_INITRD
    9.43 ++	/* This is actually dom0 image */
    9.44 + 	if (ia64_boot_param->initrd_start) {
    9.45 + 		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
    9.46 + 		rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->initrd_size;
    9.47 + 		n++;
    9.48 + 	}
    9.49 +-#endif
    9.50 + 
    9.51 + 	/* end of memory marker */
    9.52 + 	rsvd_region[n].start = ~0UL;
    9.53 +@@ -207,6 +223,16 @@
    9.54 + 	num_rsvd_regions = n;
    9.55 + 
    9.56 + 	sort_regions(rsvd_region, num_rsvd_regions);
    9.57 ++
    9.58 ++	{
    9.59 ++		int i;
    9.60 ++		printk("Reserved regions: \n");
    9.61 ++		for (i = 0; i < num_rsvd_regions; i++)
    9.62 ++			printk("  [%d] -> [0x%lx, 0x%lx]\n",
    9.63 ++				i,
    9.64 ++				rsvd_region[i].start,
    9.65 ++				rsvd_region[i].end);
    9.66 ++	}
    9.67 + }
    9.68 + 
    9.69 + /**
    9.70 +@@ -280,23 +306,26 @@
    9.71   }
    9.72   #endif
    9.73   
    9.74  +#ifdef XEN
    9.75 -+void __init
    9.76 -+early_setup_arch(void)
    9.77 -+{
    9.78 -+	efi_init();
    9.79 -+	io_port_init();
    9.80 -+}
    9.81 -+#endif
    9.82 -+
    9.83   void __init
    9.84 - setup_arch (char **cmdline_p)
    9.85 +-setup_arch (char **cmdline_p)
    9.86 ++early_setup_arch(char **cmdline_p)
    9.87   {
    9.88   	unw_init();
    9.89 - 
    9.90 -+#ifndef XEN
    9.91 - 	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
    9.92 -+#endif
    9.93 - 
    9.94 +-
    9.95 +-	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
    9.96 +-
    9.97 ++	
    9.98   	*cmdline_p = __va(ia64_boot_param->command_line);
    9.99   	strlcpy(saved_command_line, *cmdline_p, sizeof(saved_command_line));
   9.100 - 
   9.101 -+#ifndef XEN
   9.102 +-
   9.103 ++	cmdline_parse(*cmdline_p);
   9.104 ++	
   9.105   	efi_init();
   9.106 - 	io_port_init();
   9.107 -+#endif
   9.108 - 
   9.109 +-	io_port_init();
   9.110 +-
   9.111 ++	
   9.112   #ifdef CONFIG_IA64_GENERIC
   9.113   	machvec_init(acpi_get_sysname());
   9.114   #endif
   9.115 @@ -99,7 +146,31 @@
   9.116   #ifdef CONFIG_ACPI_BOOT
   9.117   	/* Initialize the ACPI boot-time table parser */
   9.118   	acpi_table_init();
   9.119 -@@ -413,6 +443,9 @@
   9.120 +@@ -308,9 +337,13 @@
   9.121 + 	smp_build_cpu_map();	/* happens, e.g., with the Ski simulator */
   9.122 + # endif
   9.123 + #endif /* CONFIG_APCI_BOOT */
   9.124 ++	io_port_init();
   9.125 ++}
   9.126 ++#endif
   9.127 + 
   9.128 +-	find_memory();
   9.129 +-
   9.130 ++void __init
   9.131 ++setup_arch (void)
   9.132 ++{
   9.133 + 	/* process SAL system table: */
   9.134 + 	ia64_sal_init(efi.sal_systab);
   9.135 + 
   9.136 +@@ -353,7 +386,6 @@
   9.137 + 	/* enable IA-64 Machine Check Abort Handling */
   9.138 + 	ia64_mca_init();
   9.139 + 
   9.140 +-	platform_setup(cmdline_p);
   9.141 + 	paging_init();
   9.142 + }
   9.143 + 
   9.144 +@@ -413,6 +445,9 @@
   9.145   		sprintf(cp, " 0x%lx", mask);
   9.146   	}
   9.147   
   9.148 @@ -109,7 +180,7 @@
   9.149   	seq_printf(m,
   9.150   		   "processor  : %d\n"
   9.151   		   "vendor     : %s\n"
   9.152 -@@ -667,6 +700,8 @@
   9.153 +@@ -667,6 +702,8 @@
   9.154   void
   9.155   check_bugs (void)
   9.156   {
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/swiotlb.c	Wed Mar 23 22:27:37 2005 +0000
    10.3 @@ -0,0 +1,47 @@
    10.4 +--- ../../linux-2.6.7/arch/ia64/lib/swiotlb.c	2004-06-15 23:19:43.000000000 -0600
    10.5 ++++ arch/ia64/lib/swiotlb.c	2005-03-23 14:54:05.000000000 -0700
    10.6 +@@ -100,7 +100,11 @@
    10.7 + 	/*
    10.8 + 	 * Get IO TLB memory from the low pages
    10.9 + 	 */
   10.10 +-	io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
   10.11 ++	/* FIXME: Do we really need swiotlb in HV? If all memory trunks
   10.12 ++	 * presented to guest as <4G, are actually <4G in machine range,
   10.13 ++	 * no DMA intevention from HV...
   10.14 ++	 */
   10.15 ++	io_tlb_start = alloc_xenheap_pages(get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)));
   10.16 + 	if (!io_tlb_start)
   10.17 + 		BUG();
   10.18 + 	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
   10.19 +@@ -110,11 +114,11 @@
   10.20 + 	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
   10.21 + 	 * between io_tlb_start and io_tlb_end.
   10.22 + 	 */
   10.23 +-	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
   10.24 ++	io_tlb_list = alloc_xenheap_pages(get_order(io_tlb_nslabs * sizeof(int)));
   10.25 + 	for (i = 0; i < io_tlb_nslabs; i++)
   10.26 +  		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
   10.27 + 	io_tlb_index = 0;
   10.28 +-	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
   10.29 ++	io_tlb_orig_addr = alloc_xenheap_pages(get_order(io_tlb_nslabs * sizeof(char *)));
   10.30 + 
   10.31 + 	printk(KERN_INFO "Placing software IO TLB between 0x%p - 0x%p\n",
   10.32 + 	       (void *) io_tlb_start, (void *) io_tlb_end);
   10.33 +@@ -279,7 +283,7 @@
   10.34 + 	/* XXX fix me: the DMA API should pass us an explicit DMA mask instead: */
   10.35 + 	flags |= GFP_DMA;
   10.36 + 
   10.37 +-	ret = (void *)__get_free_pages(flags, get_order(size));
   10.38 ++	ret = (void *)alloc_xenheap_pages(get_order(size));
   10.39 + 	if (!ret)
   10.40 + 		return NULL;
   10.41 + 
   10.42 +@@ -294,7 +298,7 @@
   10.43 + void
   10.44 + swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
   10.45 + {
   10.46 +-	free_pages((unsigned long) vaddr, get_order(size));
   10.47 ++	free_xenheap_pages((unsigned long) vaddr, get_order(size));
   10.48 + }
   10.49 + 
   10.50 + /*
    11.1 --- a/xen/arch/ia64/smp.c	Fri Mar 18 07:02:50 2005 +0000
    11.2 +++ b/xen/arch/ia64/smp.c	Wed Mar 23 22:27:37 2005 +0000
    11.3 @@ -18,6 +18,12 @@
    11.4  //#include <asm/smpboot.h>
    11.5  #include <asm/hardirq.h>
    11.6  
    11.7 +
    11.8 +//Huh? This seems to be used on ia64 even if !CONFIG_SMP
    11.9 +void flush_tlb_mask(unsigned long mask)
   11.10 +{
   11.11 +	dummy();
   11.12 +}
   11.13  //#if CONFIG_SMP || IA64
   11.14  #if CONFIG_SMP
   11.15  //Huh? This seems to be used on ia64 even if !CONFIG_SMP
   11.16 @@ -27,11 +33,6 @@ void smp_send_event_check_mask(unsigned 
   11.17  	//send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
   11.18  }
   11.19  
   11.20 -//Huh? This seems to be used on ia64 even if !CONFIG_SMP
   11.21 -void flush_tlb_mask(unsigned long mask)
   11.22 -{
   11.23 -	dummy();
   11.24 -}
   11.25  
   11.26  //Huh? This seems to be used on ia64 even if !CONFIG_SMP
   11.27  int try_flush_tlb_mask(unsigned long mask)
    12.1 --- a/xen/arch/ia64/tools/mkbuildtree	Fri Mar 18 07:02:50 2005 +0000
    12.2 +++ b/xen/arch/ia64/tools/mkbuildtree	Wed Mar 23 22:27:37 2005 +0000
    12.3 @@ -94,9 +94,9 @@ cp_patch arch/ia64/kernel/time.c arch/ia
    12.4  cp_patch arch/ia64/kernel/unaligned.c arch/ia64/unaligned.c unaligned.c
    12.5  cp_patch arch/ia64/kernel/vmlinux.lds.S arch/ia64/xen.lds.S lds.S
    12.6  
    12.7 -cp_patch mm/bootmem.c arch/ia64/mm_bootmem.c mm_bootmem.c
    12.8 -cp_patch mm/page_alloc.c arch/ia64/page_alloc.c page_alloc.c
    12.9 -cp_patch mm/slab.c arch/ia64/slab.c slab.c
   12.10 +#cp_patch mm/bootmem.c arch/ia64/mm_bootmem.c mm_bootmem.c
   12.11 +#cp_patch mm/page_alloc.c arch/ia64/page_alloc.c page_alloc.c
   12.12 +#cp_patch mm/slab.c arch/ia64/slab.c slab.c
   12.13  
   12.14  # following renamed to avoid conflict
   12.15  softlink kernel/extable.c arch/ia64/linuxextable.c
   12.16 @@ -140,7 +140,8 @@ softlink arch/ia64/lib/strlen.S arch/ia6
   12.17  softlink arch/ia64/lib/strlen_user.S arch/ia64/lib/strlen_user.S
   12.18  softlink arch/ia64/lib/strncpy_from_user.S arch/ia64/lib/strncpy_from_user.S
   12.19  softlink arch/ia64/lib/strnlen_user.S arch/ia64/lib/strnlen_user.S
   12.20 -softlink arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c
   12.21 +#softlink arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c
   12.22 +cp_patch arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c swiotlb.c
   12.23  softlink arch/ia64/lib/xor.S arch/ia64/lib/xor.S
   12.24  
   12.25  softlink lib/cmdline.c arch/ia64/cmdline.c
   12.26 @@ -222,7 +223,8 @@ softlink include/asm-ia64/param.h includ
   12.27  softlink include/asm-ia64/patch.h include/asm-ia64/patch.h
   12.28  softlink include/asm-ia64/pci.h include/asm-ia64/pci.h
   12.29  softlink include/asm-ia64/percpu.h include/asm-ia64/percpu.h
   12.30 -softlink include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h
   12.31 +#softlink include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h
   12.32 +cp_patch include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h pgalloc.h
   12.33  softlink include/asm-ia64/pgtable.h include/asm-ia64/pgtable.h
   12.34  softlink include/asm-ia64/ptrace.h include/asm-ia64/ptrace.h
   12.35  softlink include/asm-ia64/ptrace_offsets.h include/asm-ia64/ptrace_offsets.h
    13.1 --- a/xen/arch/ia64/vhpt.c	Fri Mar 18 07:02:50 2005 +0000
    13.2 +++ b/xen/arch/ia64/vhpt.c	Wed Mar 23 22:27:37 2005 +0000
    13.3 @@ -44,7 +44,6 @@ void vhpt_map(void)
    13.4  void vhpt_init(void)
    13.5  {
    13.6  	unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva;
    13.7 -	extern unsigned long __alloc_bootmem(unsigned long, unsigned long, unsigned long);
    13.8  #if !VHPT_ENABLED
    13.9  	return;
   13.10  #endif
   13.11 @@ -52,8 +51,12 @@ void vhpt_init(void)
   13.12  	vhpt_total_size = 1 << VHPT_SIZE_LOG2;	// 4MB, 16MB, 64MB, or 256MB
   13.13  	vhpt_alignment = 1 << VHPT_SIZE_LOG2;	// 4MB, 16MB, 64MB, or 256MB
   13.14  	printf("vhpt_init: vhpt size=%p, align=%p\n",vhpt_total_size,vhpt_alignment);
   13.15 -	vhpt_imva = __alloc_bootmem(vhpt_total_size,vhpt_alignment,
   13.16 -		__pa(MAX_DMA_ADDRESS));
   13.17 +	/* This allocation only holds true if vhpt table is unique for
   13.18 +	 * all domains. Or else later new vhpt table should be allocated
   13.19 +	 * from domain heap when each domain is created. Assume xen buddy
   13.20 +	 * allocator can provide natural aligned page by order?
   13.21 +	 */
   13.22 +	vhpt_imva = alloc_xenheap_pages(VHPT_SIZE_LOG2 - PAGE_SHIFT);
   13.23  	if (!vhpt_imva) {
   13.24  		printf("vhpt_init: can't allocate VHPT!\n");
   13.25  		while(1);
    14.1 --- a/xen/arch/ia64/xenmisc.c	Fri Mar 18 07:02:50 2005 +0000
    14.2 +++ b/xen/arch/ia64/xenmisc.c	Wed Mar 23 22:27:37 2005 +0000
    14.3 @@ -88,25 +88,10 @@ int reprogram_ac_timer(s_time_t timeout)
    14.4  }
    14.5  
    14.6  ///////////////////////////////
    14.7 -// from arch/x86/dompage.c
    14.8 +// from arch/ia64/page_alloc.c
    14.9  ///////////////////////////////
   14.10 -
   14.11 -struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
   14.12 -{
   14.13 -	printf("alloc_domheap_pages: called, not implemented\n");
   14.14 -}
   14.15 -
   14.16 -void free_domheap_pages(struct pfn_info *pg, unsigned int order)
   14.17 -{
   14.18 -	printf("free_domheap_pages: called, not implemented\n");
   14.19 -}
   14.20 -
   14.21 -
   14.22 -unsigned long avail_domheap_pages(void)
   14.23 -{
   14.24 -	printf("avail_domheap_pages: called, not implemented\n");
   14.25 -	return 0;
   14.26 -}
   14.27 +DEFINE_PER_CPU(struct page_state, page_states) = {0};
   14.28 +unsigned long totalram_pages;
   14.29  
   14.30  ///////////////////////////////
   14.31  // from arch/x86/flushtlb.c
    15.1 --- a/xen/arch/ia64/xensetup.c	Fri Mar 18 07:02:50 2005 +0000
    15.2 +++ b/xen/arch/ia64/xensetup.c	Wed Mar 23 22:27:37 2005 +0000
    15.3 @@ -25,34 +25,20 @@
    15.4  //#include <asm/uaccess.h>
    15.5  //#include <asm/domain_page.h>
    15.6  //#include <public/dom0_ops.h>
    15.7 +#include <asm/meminit.h>
    15.8 +#include <asm/page.h>
    15.9  
   15.10  unsigned long xenheap_phys_end;
   15.11  
   15.12  struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
   15.13  
   15.14 -xmem_cache_t *domain_struct_cachep;
   15.15 -#ifdef IA64
   15.16 -kmem_cache_t *mm_cachep;
   15.17 -kmem_cache_t *vm_area_cachep;
   15.18  #ifdef CLONE_DOMAIN0
   15.19  struct domain *clones[CLONE_DOMAIN0];
   15.20  #endif
   15.21 -#endif
   15.22  extern struct domain *dom0;
   15.23  extern unsigned long domain0_ready;
   15.24  
   15.25 -#ifndef IA64
   15.26 -vm_assist_info_t vm_assist_info[MAX_VMASST_TYPE + 1];
   15.27 -#endif
   15.28 -
   15.29 -#ifndef IA64
   15.30 -struct e820entry {
   15.31 -    unsigned long addr_lo, addr_hi;        /* start of memory segment */
   15.32 -    unsigned long size_lo, size_hi;        /* size of memory segment */
   15.33 -    unsigned long type;                    /* type of memory segment */
   15.34 -};
   15.35 -#endif
   15.36 -
   15.37 +int find_max_pfn (unsigned long, unsigned long, void *);
   15.38  void start_of_day(void);
   15.39  
   15.40  /* opt_console: comma-separated list of console outputs. */
   15.41 @@ -97,10 +83,17 @@ char opt_physdev_dom0_hide[200] = "";
   15.42  /* Example: 'leveltrigger=4,5,6,20 edgetrigger=21'. */
   15.43  char opt_leveltrigger[30] = "", opt_edgetrigger[30] = "";
   15.44  /*
   15.45 - * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
   15.46 - * pfn_info table and allocation bitmap.
   15.47 + * opt_xenheap_megabytes: Size of Xen heap in megabytes, including:
   15.48 + * 	xen image
   15.49 + *	bootmap bits
   15.50 + *	xen heap
   15.51 + * Note: To allow xenheap size configurable, the prerequisite is
   15.52 + * to configure elilo allowing relocation defaultly. Then since
   15.53 + * elilo chooses 256M as alignment when relocating, alignment issue
   15.54 + * on IPF can be addressed.
   15.55   */
   15.56  unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
   15.57 +unsigned long xenheap_size = XENHEAP_DEFAULT_SIZE;
   15.58  /*
   15.59   * opt_nmi: one of 'ignore', 'dom0', or 'fatal'.
   15.60   *  fatal:  Xen prints diagnostic message and then hangs.
   15.61 @@ -119,30 +112,55 @@ char opt_nmi[10] = "fatal";
   15.62  char opt_badpage[100] = "";
   15.63  
   15.64  extern long running_on_sim;
   15.65 +unsigned long xen_pstart;
   15.66 +
   15.67 +static int
   15.68 +xen_count_pages(u64 start, u64 end, void *arg)
   15.69 +{
   15.70 +    unsigned long *count = arg;
   15.71 +
   15.72 +    /* FIXME: do we need consider difference between DMA-usable memory and
   15.73 +     * normal memory? Seems that HV has no requirement to operate DMA which
   15.74 +     * is owned by Dom0? */
   15.75 +    *count += (end - start) >> PAGE_SHIFT;
   15.76 +    return 0;
   15.77 +}
   15.78 +
   15.79 +/* Find first hole after trunk for xen image */
   15.80 +static int
   15.81 +xen_find_first_hole(u64 start, u64 end, void *arg)
   15.82 +{
   15.83 +    unsigned long *first_hole = arg;
   15.84 +
   15.85 +    if ((*first_hole) == 0) {
   15.86 +	if ((start <= KERNEL_START) && (KERNEL_START < end))
   15.87 +	    *first_hole = __pa(end);
   15.88 +    }
   15.89 +
   15.90 +    return 0;
   15.91 +}
   15.92 +
   15.93 +
   15.94  
   15.95  void cmain(multiboot_info_t *mbi)
   15.96  {
   15.97 -    unsigned long max_page;
   15.98      unsigned char *cmdline;
   15.99      module_t *mod = (module_t *)__va(mbi->mods_addr);
  15.100      void *heap_start;
  15.101      int i;
  15.102 -    unsigned long max_mem;
  15.103 +    unsigned long max_mem, nr_pages, firsthole_start;
  15.104      unsigned long dom0_memory_start, dom0_memory_end;
  15.105      unsigned long initial_images_start, initial_images_end;
  15.106  
  15.107 -
  15.108      running_on_sim = is_platform_hp_ski();
  15.109 -
  15.110 -    /* Parse the command-line options. */
  15.111 -    cmdline = (unsigned char *)(mbi->cmdline ? __va(mbi->cmdline) : NULL);
  15.112 -    cmdline_parse(cmdline);
  15.113 +    /* Kernel may be relocated by EFI loader */
  15.114 +    xen_pstart = ia64_tpa(KERNEL_START);
  15.115  
  15.116      /* Must do this early -- e.g., spinlocks rely on get_current(). */
  15.117      set_current(&idle0_exec_domain);
  15.118      idle0_exec_domain.domain = &idle0_domain;
  15.119  
  15.120 -    early_setup_arch();
  15.121 +    early_setup_arch(&cmdline);
  15.122  
  15.123      /* We initialise the serial devices very early so we can get debugging. */
  15.124      serial_init_stage1();
  15.125 @@ -150,135 +168,69 @@ void cmain(multiboot_info_t *mbi)
  15.126      init_console(); 
  15.127      set_printk_prefix("(XEN) ");
  15.128  
  15.129 -#ifdef IA64
  15.130 -	//set_current(&idle0_exec_domain);
  15.131 -	{ char *cmdline;
  15.132 -	setup_arch(&cmdline);
  15.133 -	}
  15.134 -	setup_per_cpu_areas();
  15.135 -	build_all_zonelists();
  15.136 -	mem_init();
  15.137 -	//show_mem();	// call to dump lots of memory info for debug
  15.138 -#else
  15.139 -    /* We require memory and module information. */
  15.140 -    if ( (mbi->flags & 9) != 9 )
  15.141 -    {
  15.142 -        printk("FATAL ERROR: Bad flags passed by bootloader: 0x%x\n", 
  15.143 -               (unsigned)mbi->flags);
  15.144 -        for ( ; ; ) ;
  15.145 -    }
  15.146 +    /* xenheap should be in same TR-covered range with xen image */
  15.147 +    xenheap_phys_end = xen_pstart + xenheap_size;
  15.148 +    printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n",
  15.149 +	    xen_pstart, xenheap_phys_end);
  15.150  
  15.151 -    if ( mbi->mods_count == 0 )
  15.152 -    {
  15.153 -        printk("Require at least one Multiboot module!\n");
  15.154 -        for ( ; ; ) ;
  15.155 -    }
  15.156 +    /* Find next hole */
  15.157 +    firsthole_start = 0;
  15.158 +    efi_memmap_walk(xen_find_first_hole, &firsthole_start);
  15.159  
  15.160 -    if ( opt_xenheap_megabytes < 4 )
  15.161 -    {
  15.162 -        printk("Xen heap size is too small to safely continue!\n");
  15.163 -        for ( ; ; ) ;
  15.164 -    }
  15.165 -
  15.166 -    xenheap_phys_end = opt_xenheap_megabytes << 20;
  15.167 -
  15.168 -    max_mem = max_page = (mbi->mem_upper+1024) >> (PAGE_SHIFT - 10);
  15.169 -#endif
  15.170 -
  15.171 -#if defined(__i386__)
  15.172 +    initial_images_start = xenheap_phys_end;
  15.173 +    initial_images_end = initial_images_start + ia64_boot_param->initrd_size;
  15.174  
  15.175 -    initial_images_start = DIRECTMAP_PHYS_END;
  15.176 -    initial_images_end   = initial_images_start + 
  15.177 -        (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
  15.178 -    if ( initial_images_end > (max_page << PAGE_SHIFT) )
  15.179 -    {
  15.180 -        printk("Not enough memory to stash the DOM0 kernel image.\n");
  15.181 -        for ( ; ; ) ;
  15.182 -    }
  15.183 -    memmove((void *)initial_images_start,  /* use low mapping */
  15.184 -            (void *)mod[0].mod_start,      /* use low mapping */
  15.185 -            mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
  15.186 -
  15.187 -    if ( opt_xenheap_megabytes > XENHEAP_DEFAULT_MB )
  15.188 -    {
  15.189 -        printk("Xen heap size is limited to %dMB - you specified %dMB.\n",
  15.190 -               XENHEAP_DEFAULT_MB, opt_xenheap_megabytes);
  15.191 -        for ( ; ; ) ;
  15.192 +    /* Later may find another memory trunk, even away from xen image... */
  15.193 +    if (initial_images_end > firsthole_start) {
  15.194 +	printk("Not enough memory to stash the DOM0 kernel image.\n");
  15.195 +	printk("First hole:0x%lx, relocation end: 0x%lx\n",
  15.196 +		firsthole_start, initial_images_end);
  15.197 +	for ( ; ; );
  15.198      }
  15.199  
  15.200 -    ASSERT((sizeof(struct pfn_info) << 20) <=
  15.201 -           (FRAMETABLE_VIRT_END - FRAMETABLE_VIRT_START));
  15.202 -
  15.203 -    init_frametable((void *)FRAMETABLE_VIRT_START, max_page);
  15.204 -
  15.205 -#elif defined(__x86_64__)
  15.206 -
  15.207 -    init_frametable(__va(xenheap_phys_end), max_page);
  15.208 -
  15.209 -    initial_images_start = __pa(frame_table) + frame_table_size;
  15.210 -    initial_images_end   = initial_images_start + 
  15.211 -        (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
  15.212 -    if ( initial_images_end > (max_page << PAGE_SHIFT) )
  15.213 -    {
  15.214 -        printk("Not enough memory to stash the DOM0 kernel image.\n");
  15.215 -        for ( ; ; ) ;
  15.216 -    }
  15.217 +    /* This copy is time consuming, but elilo may load Dom0 image
  15.218 +     * within xenheap range */
  15.219 +    printk("ready to move Dom0 to 0x%lx...", initial_images_start);
  15.220      memmove(__va(initial_images_start),
  15.221 -            __va(mod[0].mod_start),
  15.222 -            mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
  15.223 -
  15.224 -#endif
  15.225 +	   __va(ia64_boot_param->initrd_start),
  15.226 +	   ia64_boot_param->initrd_size);
  15.227 +    ia64_boot_param->initrd_start = initial_images_start;
  15.228 +    printk("Done\n");
  15.229 +    
  15.230 +    /* first find highest page frame number */
  15.231 +    max_page = 0;
  15.232 +    efi_memmap_walk(find_max_pfn, &max_page);
  15.233 +    printf("find_memory: efi_memmap_walk returns max_page=%lx\n",max_page);
  15.234  
  15.235 -#ifndef IA64
  15.236 -    dom0_memory_start    = (initial_images_end + ((4<<20)-1)) & ~((4<<20)-1);
  15.237 -    dom0_memory_end      = dom0_memory_start + (opt_dom0_mem << 10);
  15.238 -    dom0_memory_end      = (dom0_memory_end + PAGE_SIZE - 1) & PAGE_MASK;
  15.239 -    
  15.240 -    /* Cheesy sanity check: enough memory for DOM0 allocation + some slack? */
  15.241 -    if ( (dom0_memory_end + (8<<20)) > (max_page << PAGE_SHIFT) )
  15.242 -    {
  15.243 -        printk("Not enough memory for DOM0 memory reservation.\n");
  15.244 -        for ( ; ; ) ;
  15.245 -    }
  15.246 -#endif
  15.247 +    heap_start = memguard_init(&_end);
  15.248 +    printf("Before heap_start: 0x%lx\n", heap_start);
  15.249 +    heap_start = __va(init_boot_allocator(__pa(heap_start)));
  15.250 +    printf("After heap_start: 0x%lx\n", heap_start);
  15.251 +
  15.252 +    reserve_memory();
  15.253  
  15.254 -    printk("Initialised %luMB memory (%lu pages) on a %luMB machine\n",
  15.255 -           max_page >> (20-PAGE_SHIFT), max_page,
  15.256 -	   max_mem  >> (20-PAGE_SHIFT));
  15.257 +    efi_memmap_walk(filter_rsvd_memory, init_boot_pages);
  15.258 +    efi_memmap_walk(xen_count_pages, &nr_pages);
  15.259  
  15.260 -#ifndef IA64
  15.261 -    heap_start = memguard_init(&_end);
  15.262 -    heap_start = __va(init_heap_allocator(__pa(heap_start), max_page));
  15.263 - 
  15.264 -    init_xenheap_pages(__pa(heap_start), xenheap_phys_end);
  15.265 -    printk("Xen heap size is %luKB\n", 
  15.266 -	   (xenheap_phys_end-__pa(heap_start))/1024 );
  15.267 +    printk("System RAM: %luMB (%lukB)\n", 
  15.268 +	nr_pages >> (20 - PAGE_SHIFT),
  15.269 +	nr_pages << (PAGE_SHIFT - 10));
  15.270  
  15.271 -    init_domheap_pages(dom0_memory_end, max_page << PAGE_SHIFT);
  15.272 -#endif
  15.273 +    init_frametable();
  15.274 +
  15.275 +    alloc_dom0();
  15.276  
  15.277 -    /* Initialise the slab allocator. */
  15.278 -#ifdef IA64
  15.279 -    kmem_cache_init();
  15.280 -#else
  15.281 -    xmem_cache_init();
  15.282 -    xmem_cache_sizes_init(max_page);
  15.283 -#endif
  15.284 +    end_boot_allocator();
  15.285  
  15.286 -    domain_struct_cachep = xmem_cache_create(
  15.287 -        "domain_cache", sizeof(struct domain),
  15.288 -        0, SLAB_HWCACHE_ALIGN, NULL, NULL);
  15.289 -    if ( domain_struct_cachep == NULL )
  15.290 -        panic("No slab cache for task structs.");
  15.291 +    init_xenheap_pages(__pa(heap_start), xenheap_phys_end);
  15.292 +    printk("Xen heap: %luMB (%lukB)\n",
  15.293 +	(xenheap_phys_end-__pa(heap_start)) >> 20,
  15.294 +	(xenheap_phys_end-__pa(heap_start)) >> 10);
  15.295  
  15.296 -#ifdef IA64
  15.297 -    // following from proc_caches_init in linux/kernel/fork.c
  15.298 -    vm_area_cachep = kmem_cache_create("vm_area_struct",
  15.299 -			sizeof(struct vm_area_struct), 0,
  15.300 -			SLAB_PANIC, NULL, NULL);
  15.301 -    mm_cachep = kmem_cache_create("mm_struct",
  15.302 -			sizeof(struct mm_struct), 0,
  15.303 -			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
  15.304 +    setup_arch();
  15.305 +    setup_per_cpu_areas();
  15.306 +    mem_init();
  15.307 +
  15.308  printk("About to call scheduler_init()\n");
  15.309      scheduler_init();
  15.310      local_irq_disable();
  15.311 @@ -291,11 +243,6 @@ printk("About to call ac_timer_init()\n"
  15.312  // do_initcalls(); ???
  15.313  printk("About to call sort_main_extable()\n");
  15.314      sort_main_extable();
  15.315 -#else
  15.316 -    start_of_day();
  15.317 -
  15.318 -    grant_table_init();
  15.319 -#endif
  15.320  
  15.321      /* Create initial domain 0. */
  15.322  printk("About to call do_createdomain()\n");
  15.323 @@ -325,39 +272,15 @@ printk("About to call init_idle_task()\n
  15.324  //printk("About to call shadow_mode_init()\n");
  15.325  //    shadow_mode_init();
  15.326  
  15.327 -    /* Grab the DOM0 command line. Skip past the image name. */
  15.328 -printk("About to  process command line\n");
  15.329 -#ifndef IA64
  15.330 -    cmdline = (unsigned char *)(mod[0].string ? __va(mod[0].string) : NULL);
  15.331 -    if ( cmdline != NULL )
  15.332 -    {
  15.333 -        while ( *cmdline == ' ' ) cmdline++;
  15.334 -        if ( (cmdline = strchr(cmdline, ' ')) != NULL )
  15.335 -            while ( *cmdline == ' ' ) cmdline++;
  15.336 -    }
  15.337 -#endif
  15.338 -
  15.339      /*
  15.340       * We're going to setup domain0 using the module(s) that we stashed safely
  15.341       * above our heap. The second module, if present, is an initrd ramdisk.
  15.342       */
  15.343 -#ifdef IA64
  15.344  printk("About to call construct_dom0()\n");
  15.345      if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_end,
  15.346  			0,
  15.347                          0,
  15.348  			0) != 0)
  15.349 -#else
  15.350 -    if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_end,
  15.351 -                        (char *)initial_images_start, 
  15.352 -                        mod[0].mod_end-mod[0].mod_start,
  15.353 -                        (mbi->mods_count == 1) ? 0 :
  15.354 -                        (char *)initial_images_start + 
  15.355 -                        (mod[1].mod_start-mod[0].mod_start),
  15.356 -                        (mbi->mods_count == 1) ? 0 :
  15.357 -                        mod[mbi->mods_count-1].mod_end - mod[1].mod_start,
  15.358 -                        cmdline) != 0)
  15.359 -#endif
  15.360          panic("Could not set up DOM0 guest OS\n");
  15.361  #ifdef CLONE_DOMAIN0
  15.362      {
  15.363 @@ -376,12 +299,9 @@ printk("CONSTRUCTING DOMAIN0 CLONE #%d\n
  15.364  #endif
  15.365  
  15.366      /* The stash space for the initial kernel image can now be freed up. */
  15.367 -#ifndef IA64
  15.368 -    init_domheap_pages(__pa(frame_table) + frame_table_size,
  15.369 -                       dom0_memory_start);
  15.370 -
  15.371 +    init_domheap_pages(ia64_boot_param->initrd_start,
  15.372 +		       ia64_boot_param->initrd_start + ia64_boot_param->initrd_size);
  15.373      scrub_heap_pages();
  15.374 -#endif
  15.375  
  15.376  printk("About to call init_trace_bufs()\n");
  15.377      init_trace_bufs();
    16.1 --- a/xen/common/Makefile	Fri Mar 18 07:02:50 2005 +0000
    16.2 +++ b/xen/common/Makefile	Wed Mar 23 22:27:37 2005 +0000
    16.3 @@ -4,9 +4,9 @@ include $(BASEDIR)/Rules.mk
    16.4  ifeq ($(TARGET_ARCH),ia64) 
    16.5  OBJS := $(subst dom_mem_ops.o,,$(OBJS))
    16.6  OBJS := $(subst grant_table.o,,$(OBJS))
    16.7 -OBJS := $(subst page_alloc.o,,$(OBJS))
    16.8 +#OBJS := $(subst page_alloc.o,,$(OBJS))
    16.9  OBJS := $(subst physdev.o,,$(OBJS))
   16.10 -OBJS := $(subst xmalloc.o,,$(OBJS))
   16.11 +#OBJS := $(subst xmalloc.o,,$(OBJS))
   16.12  endif
   16.13  
   16.14  ifneq ($(perfc),y)
    17.1 --- a/xen/include/asm-ia64/config.h	Fri Mar 18 07:02:50 2005 +0000
    17.2 +++ b/xen/include/asm-ia64/config.h	Wed Mar 23 22:27:37 2005 +0000
    17.3 @@ -25,13 +25,10 @@ typedef int pid_t;
    17.4  
    17.5  //////////////////////////////////////
    17.6  
    17.7 -// FIXME: generated automatically into offsets.h??
    17.8 -#define IA64_TASK_SIZE 0 // this probably needs to be fixed
    17.9 -//#define IA64_TASK_SIZE sizeof(struct task_struct)
   17.10 -
   17.11  #define FASTCALL(x) x	// see linux/include/linux/linkage.h
   17.12  #define fastcall	// " "
   17.13  
   17.14 +#define touch_nmi_watchdog()
   17.15  // from linux/include/linux/types.h
   17.16  #define BITS_TO_LONGS(bits) \
   17.17  	(((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
   17.18 @@ -46,6 +43,10 @@ typedef int pid_t;
   17.19  // FIXME?: x86-ism used in xen/mm.h
   17.20  #define LOCK_PREFIX
   17.21  
   17.22 +extern unsigned long xenheap_phys_end;
   17.23 +extern unsigned long xen_pstart;
   17.24 +extern unsigned long xenheap_size;
   17.25 +
   17.26  // from linux/include/linux/mm.h
   17.27  extern struct page *mem_map;
   17.28  
   17.29 @@ -72,7 +73,9 @@ extern char _end[]; /* standard ELF symb
   17.30  
   17.31  ///////////////////////////////////////////////////////////////
   17.32  // xen/include/asm/config.h
   17.33 -#define XENHEAP_DEFAULT_MB (16)
   17.34 +// Natural boundary upon TR size to define xenheap space
   17.35 +#define XENHEAP_DEFAULT_MB (1 << (KERNEL_TR_PAGE_SHIFT - 20))
   17.36 +#define XENHEAP_DEFAULT_SIZE	(1 << KERNEL_TR_PAGE_SHIFT)
   17.37  #define	ELFSIZE	64
   17.38  
   17.39  ///////////////////////////////////////////////////////////////
   17.40 @@ -184,15 +187,6 @@ void sort_main_extable(void);
   17.41  
   17.42  #define printk printf
   17.43  
   17.44 -#define __ARCH_HAS_SLAB_ALLOCATOR  // see include/xen/slab.h
   17.45 -#define xmem_cache_t kmem_cache_t
   17.46 -#define	xmem_cache_alloc(a)	kmem_cache_alloc(a,GFP_KERNEL)
   17.47 -#define	xmem_cache_free(a,b)	kmem_cache_free(a,b)
   17.48 -#define	xmem_cache_create	kmem_cache_create
   17.49 -#define	xmalloc(_type)		kmalloc(sizeof(_type),GFP_KERNEL)
   17.50 -#define	xmalloc_array(_type,_num)	kmalloc(sizeof(_type)*_num,GFP_KERNEL)
   17.51 -#define	xfree(a)		kfree(a)
   17.52 -
   17.53  #undef  __ARCH_IRQ_STAT
   17.54  
   17.55  #define find_first_set_bit(x)	(ffs(x)-1)	// FIXME: Is this right???
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/xen/include/asm-ia64/domain_page.h	Wed Mar 23 22:27:37 2005 +0000
    18.3 @@ -0,0 +1,14 @@
    18.4 +/******************************************************************************
    18.5 + * domain_page.h
    18.6 + * 
    18.7 + * This is a trivial no-op on ia64, where we can 1:1 map all RAM.
    18.8 + */
    18.9 +
   18.10 +#ifndef __ASM_DOMAIN_PAGE_H__
   18.11 +#define __ASM_DOMAIN_PAGE_H__
   18.12 +
   18.13 +#define map_domain_mem(_pa)   phys_to_virt(_pa)
   18.14 +#define unmap_domain_mem(_va) ((void)(_va))
   18.15 +
   18.16 +#endif /* __ASM_DOMAIN_PAGE_H__ */
   18.17 +
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/xen/include/asm-ia64/flushtlb.h	Wed Mar 23 22:27:37 2005 +0000
    19.3 @@ -0,0 +1,13 @@
    19.4 +#ifndef __FLUSHTLB_H__
    19.5 +#define __FLUSHTLB_H__
    19.6 +
    19.7 +/* The current time as shown by the virtual TLB clock. */
    19.8 +extern u32 tlbflush_clock;
    19.9 +
   19.10 +/* Time at which each CPU's TLB was last flushed. */
   19.11 +extern u32 tlbflush_time[NR_CPUS];
   19.12 +
   19.13 +#define tlbflush_current_time() tlbflush_clock
   19.14 +#define NEED_FLUSH(x, y) (0)
   19.15 +
   19.16 +#endif
    20.1 --- a/xen/include/asm-ia64/mm.h	Fri Mar 18 07:02:50 2005 +0000
    20.2 +++ b/xen/include/asm-ia64/mm.h	Wed Mar 23 22:27:37 2005 +0000
    20.3 @@ -23,21 +23,8 @@
    20.4   * The following is for page_alloc.c.
    20.5   */
    20.6  
    20.7 -//void init_page_allocator(unsigned long min, unsigned long max);
    20.8 -//unsigned long __get_free_pages(int order);
    20.9 -unsigned long __get_free_pages(unsigned int flags, unsigned int order);
   20.10 -//void __free_pages(unsigned long p, int order);
   20.11 -#define get_free_page()   (__get_free_pages(GFP_KERNEL,0))
   20.12 -//#define __get_free_page() (__get_free_pages(0))
   20.13 -//#define free_pages(_p,_o) (__free_pages(_p,_o))
   20.14 -#define free_xenheap_page(_p)     (__free_pages(_p,0))
   20.15 -#define	free_xenheap_pages(a,b)	(__free_pages(a,b))
   20.16 -#define	alloc_xenheap_page()	(__get_free_pages(GFP_KERNEL,0))
   20.17 -
   20.18  typedef unsigned long page_flags_t;
   20.19  
   20.20 -#define xmem_cache_t	kmem_cache_t
   20.21 -
   20.22  // from linux/include/linux/mm.h
   20.23  
   20.24  extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
   20.25 @@ -101,15 +88,20 @@ struct page
   20.26  {
   20.27      /* Each frame can be threaded onto a doubly-linked list. */
   20.28      struct list_head list;
   20.29 +
   20.30 +    /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
   20.31 +    u32 tlbflush_timestamp;
   20.32 +
   20.33 +    /* Reference count and various PGC_xxx flags and fields. */
   20.34 +    u32 count_info;
   20.35 +
   20.36      /* Context-dependent fields follow... */
   20.37      union {
   20.38  
   20.39          /* Page is in use by a domain. */
   20.40          struct {
   20.41              /* Owner of this page. */
   20.42 -            struct domain *domain;
   20.43 -            /* Reference count and various PGC_xxx flags and fields. */
   20.44 -            u32 count_info;
   20.45 +            u64	_domain;
   20.46              /* Type reference count and various PGT_xxx flags and fields. */
   20.47              u32 type_info;
   20.48          } inuse;
   20.49 @@ -117,16 +109,12 @@ struct page
   20.50          /* Page is on a free list. */
   20.51          struct {
   20.52              /* Mask of possibly-tainted TLBs. */
   20.53 -            unsigned long cpu_mask;
   20.54 -            /* Must be at same offset as 'u.inuse.count_flags'. */
   20.55 -            u32 __unavailable;
   20.56 +            u64 cpu_mask;
   20.57              /* Order-size of the free chunk this page is the head of. */
   20.58              u8 order;
   20.59          } free;
   20.60  
   20.61      } u;
   20.62 -    /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
   20.63 -    u32 tlbflush_timestamp;
   20.64  // following added for Linux compiling
   20.65      page_flags_t flags;
   20.66      atomic_t _count;
   20.67 @@ -152,13 +140,32 @@ struct page
   20.68   /* 28-bit count of uses of this frame as its current type. */
   20.69  #define PGT_count_mask      ((1<<28)-1)
   20.70  
   20.71 +/* Cleared when the owning guest 'frees' this page. */
   20.72 +#define _PGC_allocated      31
   20.73 +#define PGC_allocated       (1U<<_PGC_allocated)
   20.74 +#define PFN_ORDER(_pfn)	((_pfn)->u.free.order)
   20.75 +
   20.76 +#define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
   20.77 +				 && (page_to_phys(_pfn) >= xen_pstart))
   20.78 +
   20.79 +#define pickle_domptr(_d)	((u64)(_d))
   20.80 +#define unpickle_domptr(_d)	((struct domain*)(_d))
   20.81 +
   20.82 +#define page_get_owner(_p)	(unpickle_domptr((_p)->u.inuse._domain))
   20.83 +#define page_set_owner(_p, _d)	((_p)->u.inuse._domain = pickle_domptr(_d))
   20.84 +
   20.85  extern struct pfn_info *frame_table;
   20.86  extern unsigned long frame_table_size;
   20.87  extern struct list_head free_list;
   20.88  extern spinlock_t free_list_lock;
   20.89  extern unsigned int free_pfns;
   20.90  extern unsigned long max_page;
   20.91 -void init_frametable(void *frametable_vstart, unsigned long nr_pages);
   20.92 +
   20.93 +#ifdef CONFIG_VIRTUAL_MEM_MAP
   20.94 +void __init init_frametable(void *frametable_vstart, unsigned long nr_pages);
   20.95 +#else
   20.96 +extern void __init init_frametable(void);
   20.97 +#endif
   20.98  void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
   20.99  
  20.100  static inline void put_page(struct pfn_info *page)
  20.101 @@ -176,6 +183,18 @@ static inline int get_page(struct pfn_in
  20.102  // see alloc_new_dom_mem() in common/domain.c
  20.103  #define	set_machinetophys(_mfn, _pfn) do { } while(0);
  20.104  
  20.105 +#ifdef MEMORY_GUARD
  20.106 +void *memguard_init(void *heap_start);
  20.107 +void memguard_guard_stack(void *p);
  20.108 +void memguard_guard_range(void *p, unsigned long l);
  20.109 +void memguard_unguard_range(void *p, unsigned long l);
  20.110 +#else
  20.111 +#define memguard_init(_s)              (_s)
  20.112 +#define memguard_guard_stack(_p)       ((void)0)
  20.113 +#define memguard_guard_range(_p,_l)    ((void)0)
  20.114 +#define memguard_unguard_range(_p,_l)  ((void)0)
  20.115 +#endif
  20.116 +
  20.117  // FOLLOWING FROM linux-2.6.7/include/mm.h
  20.118  
  20.119  /*