ia64/xen-unstable

changeset 8726:0c94043f5c5b

Rename physical-address-related variables and functions
to follow a new ocnsistent naming scheme.

gpfn is a guest pseudophys frame number.
gmfn is a machine frame number (from guest p.o.v.)
mfn is a real bona fide machine number.
pfn is an arbitrary frame number (used in general-purpose
'polymorphic' functions).

pfn_info now called page_info.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Feb 01 16:28:50 2006 +0100 (2006-02-01)
parents a12e08eb0209
children 2c5873f99fe5
files xen/arch/ia64/linux-xen/mm_contig.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vtlb.c xen/arch/ia64/xen/dom0_ops.c xen/arch/ia64/xen/dom_fw.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/mm_init.c xen/arch/ia64/xen/xenmem.c xen/arch/ia64/xen/xenmisc.c xen/arch/x86/audit.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm.c xen/arch/x86/mpparse.c xen/arch/x86/setup.c xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/smpboot.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/common/grant_table.c xen/common/memory.c xen/common/page_alloc.c xen/common/xmalloc.c xen/include/asm-ia64/config.h xen/include/asm-ia64/grant_table.h xen/include/asm-ia64/linux-xen/asm/io.h xen/include/asm-ia64/linux-xen/asm/page.h xen/include/asm-ia64/linux-xen/asm/pgalloc.h xen/include/asm-ia64/linux-xen/asm/pgtable.h xen/include/asm-ia64/linux-xen/asm/uaccess.h xen/include/asm-ia64/linux/mmzone.h xen/include/asm-ia64/mm.h xen/include/asm-ia64/xenpage.h xen/include/asm-x86/grant_table.h xen/include/asm-x86/io.h xen/include/asm-x86/mach-default/bios_ebda.h xen/include/asm-x86/mach-default/mach_wakecpu.h xen/include/asm-x86/mach-es7000/mach_wakecpu.h xen/include/asm-x86/mm.h xen/include/asm-x86/page-guest32.h xen/include/asm-x86/page.h xen/include/asm-x86/shadow.h xen/include/asm-x86/shadow_public.h xen/include/asm-x86/types.h xen/include/xen/domain_page.h xen/include/xen/mm.h
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/mm_contig.c	Wed Feb 01 15:01:04 2006 +0000
     1.2 +++ b/xen/arch/ia64/linux-xen/mm_contig.c	Wed Feb 01 16:28:50 2006 +0100
     1.3 @@ -48,7 +48,7 @@ show_mem (void)
     1.4  	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
     1.5  	i = max_mapnr;
     1.6  	while (i-- > 0) {
     1.7 -		if (!pfn_valid(i))
     1.8 +		if (!mfn_valid(i))
     1.9  			continue;
    1.10  		total++;
    1.11  		if (PageReserved(mem_map+i))
    1.12 @@ -253,7 +253,7 @@ paging_init (void)
    1.13  	num_physpages = 0;
    1.14  	efi_memmap_walk(count_pages, &num_physpages);
    1.15  
    1.16 -	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
    1.17 +	max_dma = virt_to_maddr((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
    1.18  
    1.19  #ifdef CONFIG_VIRTUAL_MEM_MAP
    1.20  	memset(zholes_size, 0, sizeof(zholes_size));
     2.1 --- a/xen/arch/ia64/vmx/vmmu.c	Wed Feb 01 15:01:04 2006 +0000
     2.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Wed Feb 01 16:28:50 2006 +0100
     2.3 @@ -64,10 +64,10 @@ u64 get_mfn(domid_t domid, u64 gpfn, u64
     2.4          d = find_domain_by_id(domid);
     2.5      }
     2.6      xen_gppn = arch_ppn_to_xen_ppn(gpfn);
     2.7 -    xen_mppn = __gpfn_to_mfn(d, xen_gppn);
     2.8 +    xen_mppn = gmfn_to_mfn(d, xen_gppn);
     2.9  /*
    2.10      for (i=0; i<pages; i++) {
    2.11 -        if ( __gpfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
    2.12 +        if ( gmfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
    2.13              return INVALID_MFN;
    2.14          }
    2.15      }
    2.16 @@ -143,7 +143,7 @@ purge_machine_tc_by_domid(domid_t domid)
    2.17  
    2.18  static thash_cb_t *init_domain_vhpt(struct vcpu *d)
    2.19  {
    2.20 -    struct pfn_info *page;
    2.21 +    struct page_info *page;
    2.22      void   *vbase,*vcur;
    2.23      vhpt_special *vs;
    2.24      thash_cb_t  *vhpt;
    2.25 @@ -188,7 +188,7 @@ static thash_cb_t *init_domain_vhpt(stru
    2.26  
    2.27  thash_cb_t *init_domain_tlb(struct vcpu *d)
    2.28  {
    2.29 -    struct pfn_info *page;
    2.30 +    struct page_info *page;
    2.31      void    *vbase,*vcur;
    2.32      tlb_special_t  *ts;
    2.33      thash_cb_t  *tlb;
    2.34 @@ -228,7 +228,7 @@ thash_cb_t *init_domain_tlb(struct vcpu 
    2.35  void
    2.36  alloc_pmt(struct domain *d)
    2.37  {
    2.38 -    struct pfn_info *page;
    2.39 +    struct page_info *page;
    2.40  
    2.41      /* Only called once */
    2.42      ASSERT(d->arch.pmt);
    2.43 @@ -392,7 +392,7 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
    2.44          if ( tlb == NULL ) panic("No entry found in ITLB and DTLB\n");
    2.45          gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
    2.46      }
    2.47 -    mfn = __gpfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
    2.48 +    mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
    2.49      if ( mfn == INVALID_MFN ) return 0;
    2.50   
    2.51      mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT);
    2.52 @@ -789,7 +789,7 @@ long
    2.53      gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
    2.54      gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT)); 
    2.55  
    2.56 -    mpfn = __gpfn_to_mfn(v->domain, gpfn);
    2.57 +    mpfn = gmfn_to_mfn(v->domain, gpfn);
    2.58      m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
    2.59      /* machine address may be not continuous */
    2.60      end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
     3.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Wed Feb 01 15:01:04 2006 +0000
     3.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Wed Feb 01 16:28:50 2006 +0100
     3.3 @@ -307,7 +307,7 @@ int vmx_alloc_contig_pages(struct domain
     3.4  {
     3.5  	unsigned int order;
     3.6  	unsigned long i, j, start, end, pgnr, conf_nr;
     3.7 -	struct pfn_info *page;
     3.8 +	struct page_info *page;
     3.9  	struct vcpu *v = d->vcpu[0];
    3.10  
    3.11  	ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
    3.12 @@ -329,7 +329,7 @@ int vmx_alloc_contig_pages(struct domain
    3.13  	}
    3.14  
    3.15  	/* Map normal memory below 3G */
    3.16 -	pgnr = page_to_pfn(page);
    3.17 +	pgnr = page_to_mfn(page);
    3.18  	end = conf_nr << PAGE_SHIFT;
    3.19  	for (i = 0;
    3.20  	     i < (end < MMIO_START ? end : MMIO_START);
    3.21 @@ -354,7 +354,7 @@ int vmx_alloc_contig_pages(struct domain
    3.22  	}
    3.23  
    3.24  	/* Map guest firmware */
    3.25 -	pgnr = page_to_pfn(page);
    3.26 +	pgnr = page_to_mfn(page);
    3.27  	for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
    3.28  	    map_domain_page(d, i, pgnr << PAGE_SHIFT);
    3.29  
    3.30 @@ -364,7 +364,7 @@ int vmx_alloc_contig_pages(struct domain
    3.31  	}
    3.32  
    3.33  	/* Map for shared I/O page and xenstore */
    3.34 -	pgnr = page_to_pfn(page);
    3.35 +	pgnr = page_to_mfn(page);
    3.36  	map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
    3.37  	pgnr++;
    3.38  	map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
     4.1 --- a/xen/arch/ia64/vmx/vtlb.c	Wed Feb 01 15:01:04 2006 +0000
     4.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Wed Feb 01 16:28:50 2006 +0100
     4.3 @@ -988,7 +988,7 @@ u64 vtlb_chain_sanity(thash_cb_t *vtlb, 
     4.4  
     4.5  void check_vtlb_sanity(thash_cb_t *vtlb)
     4.6  {
     4.7 -//    struct pfn_info *page;
     4.8 +//    struct page_info *page;
     4.9      u64  hash_num, i, psr;
    4.10      static u64 check_ok_num, check_fail_num,check_invalid;
    4.11  //  void *vb1, *vb2;
     5.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Wed Feb 01 15:01:04 2006 +0000
     5.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Wed Feb 01 16:28:50 2006 +0100
     5.3 @@ -29,7 +29,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
     5.4      {
     5.5      case DOM0_GETPAGEFRAMEINFO:
     5.6      {
     5.7 -        struct pfn_info *page;
     5.8 +        struct page_info *page;
     5.9          unsigned long pfn = op->u.getpageframeinfo.pfn;
    5.10          domid_t dom = op->u.getpageframeinfo.domain;
    5.11          struct domain *d;
    5.12 @@ -102,7 +102,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    5.13       
    5.14              for( j = 0; j < k; j++ )
    5.15              {      
    5.16 -                struct pfn_info *page;
    5.17 +                struct page_info *page;
    5.18                  unsigned long mfn = l_arr[j];
    5.19  
    5.20                  if ( unlikely(mfn >= max_page) )
    5.21 @@ -177,7 +177,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    5.22  
    5.23              for ( i = start_page; i < (start_page + nr_pages); i++ )
    5.24              {
    5.25 -		pfn = __gpfn_to_mfn_foreign(d, i);
    5.26 +		pfn = gmfn_to_mfn_foreign(d, i);
    5.27  
    5.28                  if ( put_user(pfn, buffer) )
    5.29                  {
     6.1 --- a/xen/arch/ia64/xen/dom_fw.c	Wed Feb 01 15:01:04 2006 +0000
     6.2 +++ b/xen/arch/ia64/xen/dom_fw.c	Wed Feb 01 16:28:50 2006 +0100
     6.3 @@ -807,7 +807,7 @@ dom_fw_init (struct domain *d, char *arg
     6.4  		/*
     6.5  		 * This is a bad hack.  Dom0 may share other domains' memory
     6.6  		 * through a dom0 physical address.  Unfortunately, this
     6.7 -		 * address may be used in phys_to_page (e.g. in the loopback
     6.8 +		 * address may be used in maddr_to_page (e.g. in the loopback
     6.9  		 * driver) but when Linux initializes memory it only creates
    6.10  		 * page structs for the physical memory it knows about.  And
    6.11  		 * on ia64, only for full writeback granules.  So, we reserve
     7.1 --- a/xen/arch/ia64/xen/domain.c	Wed Feb 01 15:01:04 2006 +0000
     7.2 +++ b/xen/arch/ia64/xen/domain.c	Wed Feb 01 16:28:50 2006 +0100
     7.3 @@ -385,7 +385,7 @@ static struct page * map_new_domain0_pag
     7.4  printk("map_new_domain0_page: start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
     7.5  		while(1);
     7.6  	}
     7.7 -	return pfn_to_page((mpaddr >> PAGE_SHIFT));
     7.8 +	return mfn_to_page((mpaddr >> PAGE_SHIFT));
     7.9  }
    7.10  
    7.11  /* allocate new page for domain and map it to the specified metaphysical addr */
    7.12 @@ -425,16 +425,16 @@ extern unsigned long vhpt_paddr, vhpt_pe
    7.13  		{
    7.14  			p = alloc_domheap_page(d);
    7.15  			// zero out pages for security reasons
    7.16 -			if (p) memset(__va(page_to_phys(p)),0,PAGE_SIZE);
    7.17 +			if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
    7.18  		}
    7.19  		if (unlikely(!p)) {
    7.20  printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
    7.21  			return(p);
    7.22  		}
    7.23 -if (unlikely(page_to_phys(p) > vhpt_paddr && page_to_phys(p) < vhpt_pend)) {
    7.24 -  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_phys(p));
    7.25 +if (unlikely(page_to_maddr(p) > vhpt_paddr && page_to_maddr(p) < vhpt_pend)) {
    7.26 +  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
    7.27  }
    7.28 -		set_pte(pte, pfn_pte(page_to_phys(p) >> PAGE_SHIFT,
    7.29 +		set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT,
    7.30  			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
    7.31  	}
    7.32  	else printk("map_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    7.33 @@ -662,7 +662,7 @@ void loaddomainelfimage(struct domain *d
    7.34  #else
    7.35  		p = map_new_domain_page(d,dom_mpaddr);
    7.36  		if (unlikely(!p)) BUG();
    7.37 -		dom_imva = __va(page_to_phys(p));
    7.38 +		dom_imva = __va(page_to_maddr(p));
    7.39  #endif
    7.40  		if (filesz > 0) {
    7.41  			if (filesz >= PAGE_SIZE)
    7.42 @@ -778,7 +778,7 @@ int construct_dom0(struct domain *d,
    7.43  	unsigned long nr_pt_pages;
    7.44  	unsigned long count;
    7.45  	unsigned long alloc_start, alloc_end;
    7.46 -	struct pfn_info *page = NULL;
    7.47 +	struct page_info *page = NULL;
    7.48  	start_info_t *si;
    7.49  	struct vcpu *v = d->vcpu[0];
    7.50  
    7.51 @@ -915,7 +915,7 @@ int construct_dom0(struct domain *d,
    7.52  	si->nr_pages     = d->tot_pages;
    7.53  
    7.54  #if 0
    7.55 -	si->shared_info  = virt_to_phys(d->shared_info);
    7.56 +	si->shared_info  = virt_to_maddr(d->shared_info);
    7.57  	si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
    7.58  	//si->pt_base      = vpt_start;
    7.59  	//si->nr_pt_frames = nr_pt_pages;
     8.1 --- a/xen/arch/ia64/xen/mm_init.c	Wed Feb 01 15:01:04 2006 +0000
     8.2 +++ b/xen/arch/ia64/xen/mm_init.c	Wed Feb 01 16:28:50 2006 +0100
     8.3 @@ -446,7 +446,7 @@ virtual_memmap_init (u64 start, u64 end,
     8.4  
     8.5  	if (map_start < map_end)
     8.6  		memmap_init_zone(map_start, (unsigned long) (map_end - map_start),
     8.7 -				 args->nid, args->zone, page_to_pfn(map_start));
     8.8 +				 args->nid, args->zone, page_to_mfn(map_start));
     8.9  	return 0;
    8.10  }
    8.11  
    8.12 @@ -469,16 +469,16 @@ memmap_init (struct page *start, unsigne
    8.13  }
    8.14  
    8.15  int
    8.16 -ia64_pfn_valid (unsigned long pfn)
    8.17 +ia64_mfn_valid (unsigned long pfn)
    8.18  {
    8.19  	char byte;
    8.20 -	struct page *pg = pfn_to_page(pfn);
    8.21 +	struct page *pg = mfn_to_page(pfn);
    8.22  
    8.23  	return     (__get_user(byte, (char *) pg) == 0)
    8.24  		&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
    8.25  			|| (__get_user(byte, (char *) (pg + 1) - 1) == 0));
    8.26  }
    8.27 -EXPORT_SYMBOL(ia64_pfn_valid);
    8.28 +EXPORT_SYMBOL(ia64_mfn_valid);
    8.29  
    8.30  int
    8.31  find_largest_hole (u64 start, u64 end, void *arg)
     9.1 --- a/xen/arch/ia64/xen/xenmem.c	Wed Feb 01 15:01:04 2006 +0000
     9.2 +++ b/xen/arch/ia64/xen/xenmem.c	Wed Feb 01 16:28:50 2006 +0100
     9.3 @@ -14,7 +14,7 @@
     9.4  #include <xen/mm.h>
     9.5  
     9.6  extern struct page *zero_page_memmap_ptr;
     9.7 -struct pfn_info *frame_table;
     9.8 +struct page_info *frame_table;
     9.9  unsigned long frame_table_size;
    9.10  unsigned long max_page;
    9.11  
    9.12 @@ -34,7 +34,7 @@ unsigned long mpt_table_size;
    9.13  void
    9.14  paging_init (void)
    9.15  {
    9.16 -	struct pfn_info *pg;
    9.17 +	struct page_info *pg;
    9.18  	unsigned int mpt_order;
    9.19  	/* Create machine to physical mapping table
    9.20  	 * NOTE: similar to frame table, later we may need virtually
    9.21 @@ -62,7 +62,7 @@ paging_init (void)
    9.22  void __init init_frametable(void)
    9.23  {
    9.24  	unsigned long i, pfn;
    9.25 -	frame_table_size = max_page * sizeof(struct pfn_info);
    9.26 +	frame_table_size = max_page * sizeof(struct page_info);
    9.27  	frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
    9.28  
    9.29  	/* Request continuous trunk from boot allocator, since HV
    10.1 --- a/xen/arch/ia64/xen/xenmisc.c	Wed Feb 01 15:01:04 2006 +0000
    10.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Wed Feb 01 16:28:50 2006 +0100
    10.3 @@ -80,14 +80,14 @@ void raise_actimer_softirq(void)
    10.4  }
    10.5  
    10.6  unsigned long
    10.7 -__gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
    10.8 +gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
    10.9  {
   10.10  	if (d == dom0)
   10.11  		return(gpfn);
   10.12  	else {
   10.13  		unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
   10.14  		if (!pte) {
   10.15 -printk("__gpfn_to_mfn_foreign: bad gpfn. spinning...\n");
   10.16 +printk("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
   10.17  while(1);
   10.18  			return 0;
   10.19  		}
   10.20 @@ -96,11 +96,11 @@ while(1);
   10.21  }
   10.22  #if 0
   10.23  u32
   10.24 -__mfn_to_gpfn(struct domain *d, unsigned long frame)
   10.25 +mfn_to_gmfn(struct domain *d, unsigned long frame)
   10.26  {
   10.27  	// FIXME: is this right?
   10.28  if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
   10.29 -printk("__mfn_to_gpfn: bad frame. spinning...\n");
   10.30 +printk("mfn_to_gmfn: bad frame. spinning...\n");
   10.31  while(1);
   10.32  }
   10.33  	return frame;
   10.34 @@ -142,7 +142,7 @@ void init_percpu_info(void)
   10.35  }
   10.36  
   10.37  #if 0
   10.38 -void free_page_type(struct pfn_info *page, unsigned int type)
   10.39 +void free_page_type(struct page_info *page, unsigned int type)
   10.40  {
   10.41  	dummy();
   10.42  }
    11.1 --- a/xen/arch/x86/audit.c	Wed Feb 01 15:01:04 2006 +0000
    11.2 +++ b/xen/arch/x86/audit.c	Wed Feb 01 16:28:50 2006 +0100
    11.3 @@ -61,7 +61,7 @@ int audit_adjust_pgtables(struct domain 
    11.4  #ifdef __i386__
    11.5  #ifdef CONFIG_X86_PAE
    11.6          /* 32b PAE */
    11.7 -        if ( (( pfn_to_page(mfn)->u.inuse.type_info & PGT_va_mask ) 
    11.8 +        if ( (( mfn_to_page(mfn)->u.inuse.type_info & PGT_va_mask ) 
    11.9  	    >> PGT_va_shift) == 3 )
   11.10              return l2_table_offset(HYPERVISOR_VIRT_START); 
   11.11          else
   11.12 @@ -76,7 +76,7 @@ int audit_adjust_pgtables(struct domain 
   11.13  #endif
   11.14      }
   11.15  
   11.16 -    void _adjust(struct pfn_info *page, int adjtype ADJUST_EXTRA_ARGS)
   11.17 +    void _adjust(struct page_info *page, int adjtype ADJUST_EXTRA_ARGS)
   11.18      {
   11.19          int count;
   11.20  
   11.21 @@ -90,7 +90,7 @@ int audit_adjust_pgtables(struct domain 
   11.22              if ( page_get_owner(page) == NULL )
   11.23              {
   11.24                  APRINTK("adjust(mfn=%lx, dir=%d, adjtype=%d) owner=NULL",
   11.25 -                        page_to_pfn(page), dir, adjtype);
   11.26 +                        page_to_mfn(page), dir, adjtype);
   11.27                  errors++;
   11.28              }
   11.29  
   11.30 @@ -98,7 +98,7 @@ int audit_adjust_pgtables(struct domain 
   11.31              {
   11.32                  APRINTK("Audit %d: type count went below zero "
   11.33                          "mfn=%lx t=%" PRtype_info " ot=%x",
   11.34 -                        d->domain_id, page_to_pfn(page),
   11.35 +                        d->domain_id, page_to_mfn(page),
   11.36                          page->u.inuse.type_info,
   11.37                          page->tlbflush_timestamp);
   11.38                  errors++;
   11.39 @@ -107,7 +107,7 @@ int audit_adjust_pgtables(struct domain 
   11.40              {
   11.41                  APRINTK("Audit %d: type count overflowed "
   11.42                          "mfn=%lx t=%" PRtype_info " ot=%x",
   11.43 -                        d->domain_id, page_to_pfn(page),
   11.44 +                        d->domain_id, page_to_mfn(page),
   11.45                          page->u.inuse.type_info,
   11.46                          page->tlbflush_timestamp);
   11.47                  errors++;
   11.48 @@ -124,7 +124,7 @@ int audit_adjust_pgtables(struct domain 
   11.49          {
   11.50              APRINTK("Audit %d: general count went below zero "
   11.51                      "mfn=%lx t=%" PRtype_info " ot=%x",
   11.52 -                    d->domain_id, page_to_pfn(page),
   11.53 +                    d->domain_id, page_to_mfn(page),
   11.54                      page->u.inuse.type_info,
   11.55                      page->tlbflush_timestamp);
   11.56              errors++;
   11.57 @@ -133,7 +133,7 @@ int audit_adjust_pgtables(struct domain 
   11.58          {
   11.59              APRINTK("Audit %d: general count overflowed "
   11.60                      "mfn=%lx t=%" PRtype_info " ot=%x",
   11.61 -                    d->domain_id, page_to_pfn(page),
   11.62 +                    d->domain_id, page_to_mfn(page),
   11.63                      page->u.inuse.type_info,
   11.64                      page->tlbflush_timestamp);
   11.65              errors++;
   11.66 @@ -153,7 +153,7 @@ int audit_adjust_pgtables(struct domain 
   11.67              if ( l2e_get_flags(pt[i]) & _PAGE_PRESENT )
   11.68              {
   11.69  	        unsigned long l1mfn = l2e_get_pfn(pt[i]);
   11.70 -                struct pfn_info *l1page = pfn_to_page(l1mfn);
   11.71 +                struct page_info *l1page = mfn_to_page(l1mfn);
   11.72  
   11.73                  if ( noisy )
   11.74                  {
   11.75 @@ -223,7 +223,7 @@ int audit_adjust_pgtables(struct domain 
   11.76          {
   11.77              unsigned long hl2mfn =
   11.78                  l2e_get_pfn(pt[l2_table_offset(LINEAR_PT_VIRT_START)]);
   11.79 -            struct pfn_info *hl2page = pfn_to_page(hl2mfn);
   11.80 +            struct page_info *hl2page = mfn_to_page(hl2mfn);
   11.81              adjust(hl2page, 0);
   11.82          }
   11.83  
   11.84 @@ -240,7 +240,7 @@ int audit_adjust_pgtables(struct domain 
   11.85              if ( l2e_get_flags(pt[i]) & _PAGE_PRESENT )
   11.86              {
   11.87                  unsigned long gmfn = l2e_get_pfn(pt[i]);
   11.88 -                struct pfn_info *gpage = pfn_to_page(gmfn);
   11.89 +                struct page_info *gpage = mfn_to_page(gmfn);
   11.90  
   11.91                  if ( gmfn < 0x100 )
   11.92                  {
   11.93 @@ -287,7 +287,7 @@ int audit_adjust_pgtables(struct domain 
   11.94              if ( l1e_get_flags(pt[i]) & _PAGE_PRESENT )
   11.95              {
   11.96                  unsigned long gmfn = l1e_get_pfn(pt[i]);
   11.97 -                struct pfn_info *gpage = pfn_to_page(gmfn);
   11.98 +                struct page_info *gpage = mfn_to_page(gmfn);
   11.99  
  11.100                  if ( gmfn < 0x100 )
  11.101                  {
  11.102 @@ -354,7 +354,7 @@ int audit_adjust_pgtables(struct domain 
  11.103      {
  11.104          struct shadow_status *a;
  11.105          unsigned long smfn, gmfn;
  11.106 -        struct pfn_info *page;
  11.107 +        struct page_info *page;
  11.108          int i;
  11.109  
  11.110          for ( i = 0; i < shadow_ht_buckets; i++ )
  11.111 @@ -362,32 +362,32 @@ int audit_adjust_pgtables(struct domain 
  11.112              a = &d->arch.shadow_ht[i];
  11.113              while ( a && a->gpfn_and_flags )
  11.114              {
  11.115 -                gmfn = __gpfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
  11.116 +                gmfn = gmfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
  11.117                  smfn = a->smfn;
  11.118 -                page = pfn_to_page(smfn);
  11.119 +                page = mfn_to_page(smfn);
  11.120  
  11.121                  switch ( a->gpfn_and_flags & PGT_type_mask ) {
  11.122                  case PGT_writable_pred:
  11.123                      break;
  11.124                  case PGT_snapshot:
  11.125 -                    adjust(pfn_to_page(gmfn), 0);
  11.126 +                    adjust(mfn_to_page(gmfn), 0);
  11.127                      break;
  11.128                  case PGT_l1_shadow:
  11.129 -                    adjust(pfn_to_page(gmfn), 0);
  11.130 +                    adjust(mfn_to_page(gmfn), 0);
  11.131                      if ( shadow_refcounts )
  11.132                          adjust_l1_page(smfn);
  11.133                      if ( page->u.inuse.type_info & PGT_pinned )
  11.134                          adjust(page, 0);
  11.135                      break;
  11.136                  case PGT_hl2_shadow:
  11.137 -                    adjust(pfn_to_page(gmfn), 0);
  11.138 +                    adjust(mfn_to_page(gmfn), 0);
  11.139                      if ( shadow_refcounts )
  11.140                          adjust_hl2_page(smfn);
  11.141                      if ( page->u.inuse.type_info & PGT_pinned )
  11.142                          adjust(page, 0);
  11.143                      break;
  11.144                  case PGT_l2_shadow:
  11.145 -                    adjust(pfn_to_page(gmfn), 0);
  11.146 +                    adjust(mfn_to_page(gmfn), 0);
  11.147                      adjust_l2_page(smfn, 1);
  11.148                      if ( page->u.inuse.type_info & PGT_pinned )
  11.149                          adjust(page, 0);
  11.150 @@ -411,15 +411,15 @@ int audit_adjust_pgtables(struct domain 
  11.151  
  11.152          while ( oos )
  11.153          {
  11.154 -            adjust(pfn_to_page(oos->gmfn), 0);
  11.155 +            adjust(mfn_to_page(oos->gmfn), 0);
  11.156  
  11.157              // Only use entries that have low bits clear...
  11.158              //
  11.159              if ( !(oos->writable_pl1e & (sizeof(l1_pgentry_t)-1)) )
  11.160 -                adjust(pfn_to_page(oos->writable_pl1e >> PAGE_SHIFT), 0);
  11.161 +                adjust(mfn_to_page(oos->writable_pl1e >> PAGE_SHIFT), 0);
  11.162  
  11.163              if ( oos->snapshot_mfn != SHADOW_SNAPSHOT_ELSEWHERE )
  11.164 -                adjust(pfn_to_page(oos->snapshot_mfn), 0);
  11.165 +                adjust(mfn_to_page(oos->snapshot_mfn), 0);
  11.166  
  11.167              oos = oos->next;
  11.168              oos_count++;
  11.169 @@ -433,28 +433,28 @@ int audit_adjust_pgtables(struct domain 
  11.170          for_each_vcpu(d, v)
  11.171          {
  11.172              if ( pagetable_get_paddr(v->arch.guest_table) )
  11.173 -                adjust(pfn_to_page(pagetable_get_pfn(v->arch.guest_table)),
  11.174 +                adjust(mfn_to_page(pagetable_get_pfn(v->arch.guest_table)),
  11.175                         !shadow_mode_refcounts(d));
  11.176              if ( pagetable_get_paddr(v->arch.shadow_table) )
  11.177 -                adjust(pfn_to_page(pagetable_get_pfn(v->arch.shadow_table)),
  11.178 +                adjust(mfn_to_page(pagetable_get_pfn(v->arch.shadow_table)),
  11.179                         0);
  11.180              if ( v->arch.monitor_shadow_ref )
  11.181 -                adjust(pfn_to_page(v->arch.monitor_shadow_ref), 0);
  11.182 +                adjust(mfn_to_page(v->arch.monitor_shadow_ref), 0);
  11.183          }
  11.184      }
  11.185  
  11.186      void adjust_guest_pages()
  11.187      {
  11.188          struct list_head *list_ent = d->page_list.next;
  11.189 -        struct pfn_info *page;
  11.190 +        struct page_info *page;
  11.191          unsigned long mfn, snapshot_mfn;
  11.192  
  11.193          while ( list_ent != &d->page_list )
  11.194          {
  11.195              u32 page_type;
  11.196  
  11.197 -            page = list_entry(list_ent, struct pfn_info, list);
  11.198 -            snapshot_mfn = mfn = page_to_pfn(page);
  11.199 +            page = list_entry(list_ent, struct page_info, list);
  11.200 +            snapshot_mfn = mfn = page_to_mfn(page);
  11.201              page_type = page->u.inuse.type_info & PGT_type_mask;
  11.202  
  11.203              BUG_ON(page_get_owner(page) != d);
  11.204 @@ -464,7 +464,7 @@ int audit_adjust_pgtables(struct domain 
  11.205              if ( shadow_enabled && !shadow_refcounts &&
  11.206                   page_out_of_sync(page) )
  11.207              {
  11.208 -                unsigned long gpfn = __mfn_to_gpfn(d, mfn);
  11.209 +                unsigned long gpfn = mfn_to_gmfn(d, mfn);
  11.210                  ASSERT( VALID_M2P(gpfn) );
  11.211                  snapshot_mfn = __shadow_status(d, gpfn, PGT_snapshot);
  11.212                  ASSERT( snapshot_mfn );
  11.213 @@ -619,7 +619,7 @@ void _audit_domain(struct domain *d, int
  11.214      void scan_for_pfn_in_mfn(struct domain *d, unsigned long xmfn,
  11.215                               unsigned long mfn)
  11.216      {
  11.217 -        struct pfn_info *page = pfn_to_page(mfn);
  11.218 +        struct page_info *page = mfn_to_page(mfn);
  11.219          l1_pgentry_t *pt = map_domain_page(mfn);
  11.220          int i;
  11.221  
  11.222 @@ -662,17 +662,17 @@ void _audit_domain(struct domain *d, int
  11.223          if ( !shadow_mode_enabled(d) )
  11.224          {
  11.225              struct list_head *list_ent = d->page_list.next;
  11.226 -            struct pfn_info *page;
  11.227 +            struct page_info *page;
  11.228  
  11.229              while ( list_ent != &d->page_list )
  11.230              {
  11.231 -                page = list_entry(list_ent, struct pfn_info, list);
  11.232 +                page = list_entry(list_ent, struct page_info, list);
  11.233  
  11.234                  switch ( page->u.inuse.type_info & PGT_type_mask )
  11.235                  {
  11.236                  case PGT_l1_page_table:
  11.237                  case PGT_l2_page_table:
  11.238 -                    scan_for_pfn_in_mfn(d, xmfn, page_to_pfn(page));
  11.239 +                    scan_for_pfn_in_mfn(d, xmfn, page_to_mfn(page));
  11.240                      break;
  11.241                  default:
  11.242                      break;
  11.243 @@ -720,7 +720,7 @@ void _audit_domain(struct domain *d, int
  11.244  
  11.245      unsigned long mfn;
  11.246      struct list_head *list_ent;
  11.247 -    struct pfn_info *page;
  11.248 +    struct page_info *page;
  11.249      int errors = 0;
  11.250  
  11.251      if ( (d != current->domain) && shadow_mode_translate(d) )
  11.252 @@ -751,8 +751,8 @@ void _audit_domain(struct domain *d, int
  11.253          u32 page_type;
  11.254          unsigned long pfn;
  11.255  
  11.256 -        page = list_entry(list_ent, struct pfn_info, list);
  11.257 -        mfn = page_to_pfn(page);
  11.258 +        page = list_entry(list_ent, struct page_info, list);
  11.259 +        mfn = page_to_mfn(page);
  11.260          page_type = page->u.inuse.type_info & PGT_type_mask;
  11.261  
  11.262          BUG_ON(page_get_owner(page) != d);
  11.263 @@ -806,7 +806,7 @@ void _audit_domain(struct domain *d, int
  11.264                  printk("out of sync page mfn=%lx is not a page table\n", mfn);
  11.265                  errors++;
  11.266              }
  11.267 -            pfn = __mfn_to_gpfn(d, mfn);
  11.268 +            pfn = mfn_to_gmfn(d, mfn);
  11.269              if ( !__shadow_status(d, pfn, PGT_snapshot) )
  11.270              {
  11.271                  printk("out of sync page mfn=%lx doesn't have a snapshot\n",
  11.272 @@ -845,8 +845,8 @@ void _audit_domain(struct domain *d, int
  11.273      list_ent = d->page_list.next;
  11.274      while ( list_ent != &d->page_list )
  11.275      {
  11.276 -        page = list_entry(list_ent, struct pfn_info, list);
  11.277 -        mfn = page_to_pfn(page);
  11.278 +        page = list_entry(list_ent, struct page_info, list);
  11.279 +        mfn = page_to_mfn(page);
  11.280  
  11.281          switch ( page->u.inuse.type_info & PGT_type_mask)
  11.282          {
  11.283 @@ -898,7 +898,7 @@ void _audit_domain(struct domain *d, int
  11.284      if ( shadow_mode_enabled(d) )
  11.285      {
  11.286          struct shadow_status *a;
  11.287 -        struct pfn_info *page;
  11.288 +        struct page_info *page;
  11.289          u32 page_type;
  11.290          int i;
  11.291  
  11.292 @@ -907,7 +907,7 @@ void _audit_domain(struct domain *d, int
  11.293              a = &d->arch.shadow_ht[i];
  11.294              while ( a && a->gpfn_and_flags )
  11.295              {
  11.296 -                page = pfn_to_page(a->smfn);
  11.297 +                page = mfn_to_page(a->smfn);
  11.298                  page_type = a->gpfn_and_flags & PGT_type_mask;
  11.299  
  11.300                  switch ( page_type ) {
  11.301 @@ -920,7 +920,7 @@ void _audit_domain(struct domain *d, int
  11.302                      {
  11.303                          printk("Audit %d: shadow page counts wrong "
  11.304                                 "mfn=%lx t=%" PRtype_info " c=%08x\n",
  11.305 -                               d->domain_id, page_to_pfn(page),
  11.306 +                               d->domain_id, page_to_mfn(page),
  11.307                                 page->u.inuse.type_info,
  11.308                                 page->count_info);
  11.309                          printk("a->gpfn_and_flags=%p\n",
    12.1 --- a/xen/arch/x86/dom0_ops.c	Wed Feb 01 15:01:04 2006 +0000
    12.2 +++ b/xen/arch/x86/dom0_ops.c	Wed Feb 01 16:28:50 2006 +0100
    12.3 @@ -199,7 +199,7 @@ long arch_do_dom0_op(struct dom0_op *op,
    12.4      
    12.5      case DOM0_GETPAGEFRAMEINFO:
    12.6      {
    12.7 -        struct pfn_info *page;
    12.8 +        struct page_info *page;
    12.9          unsigned long pfn = op->u.getpageframeinfo.pfn;
   12.10          domid_t dom = op->u.getpageframeinfo.domain;
   12.11          struct domain *d;
   12.12 @@ -210,7 +210,7 @@ long arch_do_dom0_op(struct dom0_op *op,
   12.13               unlikely((d = find_domain_by_id(dom)) == NULL) )
   12.14              break;
   12.15  
   12.16 -        page = pfn_to_page(pfn);
   12.17 +        page = mfn_to_page(pfn);
   12.18  
   12.19          if ( likely(get_page(page, d)) )
   12.20          {
   12.21 @@ -282,12 +282,12 @@ long arch_do_dom0_op(struct dom0_op *op,
   12.22       
   12.23              for( j = 0; j < k; j++ )
   12.24              {      
   12.25 -                struct pfn_info *page;
   12.26 +                struct page_info *page;
   12.27                  unsigned long mfn = l_arr[j];
   12.28  
   12.29 -                page = pfn_to_page(mfn);
   12.30 +                page = mfn_to_page(mfn);
   12.31  
   12.32 -                if ( likely(pfn_valid(mfn) && get_page(page, d)) ) 
   12.33 +                if ( likely(mfn_valid(mfn) && get_page(page, d)) ) 
   12.34                  {
   12.35                      unsigned long type = 0;
   12.36  
   12.37 @@ -350,14 +350,14 @@ long arch_do_dom0_op(struct dom0_op *op,
   12.38              list_ent = d->page_list.next;
   12.39              for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
   12.40              {
   12.41 -                pfn = page_to_pfn(list_entry(list_ent, struct pfn_info, list));
   12.42 +                pfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
   12.43                  if ( put_user(pfn, buffer) )
   12.44                  {
   12.45                      ret = -EFAULT;
   12.46                      break;
   12.47                  }
   12.48                  buffer++;
   12.49 -                list_ent = pfn_to_page(pfn)->list.next;
   12.50 +                list_ent = mfn_to_page(pfn)->list.next;
   12.51              }
   12.52              spin_unlock(&d->page_alloc_lock);
   12.53  
   12.54 @@ -420,8 +420,8 @@ long arch_do_dom0_op(struct dom0_op *op,
   12.55              break;
   12.56  
   12.57          ret = -EACCES;
   12.58 -        if ( !pfn_valid(mfn) ||
   12.59 -             !get_page_and_type(pfn_to_page(mfn), d, PGT_writable_page) )
   12.60 +        if ( !mfn_valid(mfn) ||
   12.61 +             !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
   12.62          {
   12.63              put_domain(d);
   12.64              break;
   12.65 @@ -433,7 +433,7 @@ long arch_do_dom0_op(struct dom0_op *op,
   12.66          hypercall_page_initialise(hypercall_page);
   12.67          unmap_domain_page(hypercall_page);
   12.68  
   12.69 -        put_page_and_type(pfn_to_page(mfn));
   12.70 +        put_page_and_type(mfn_to_page(mfn));
   12.71  
   12.72          put_domain(d);
   12.73      }
    13.1 --- a/xen/arch/x86/domain.c	Wed Feb 01 15:01:04 2006 +0000
    13.2 +++ b/xen/arch/x86/domain.c	Wed Feb 01 16:28:50 2006 +0100
    13.3 @@ -179,7 +179,7 @@ void machine_restart(char * __unused)
    13.4  
    13.5  void dump_pageframe_info(struct domain *d)
    13.6  {
    13.7 -    struct pfn_info *page;
    13.8 +    struct page_info *page;
    13.9  
   13.10      printk("Memory pages belonging to domain %u:\n", d->domain_id);
   13.11  
   13.12 @@ -192,7 +192,7 @@ void dump_pageframe_info(struct domain *
   13.13          list_for_each_entry ( page, &d->page_list, list )
   13.14          {
   13.15              printk("    DomPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
   13.16 -                   _p(page_to_phys(page)), _p(page_to_pfn(page)),
   13.17 +                   _p(page_to_maddr(page)), _p(page_to_mfn(page)),
   13.18                     page->count_info, page->u.inuse.type_info);
   13.19          }
   13.20      }
   13.21 @@ -200,7 +200,7 @@ void dump_pageframe_info(struct domain *
   13.22      list_for_each_entry ( page, &d->xenpage_list, list )
   13.23      {
   13.24          printk("    XenPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
   13.25 -               _p(page_to_phys(page)), _p(page_to_pfn(page)),
   13.26 +               _p(page_to_maddr(page)), _p(page_to_mfn(page)),
   13.27                 page->count_info, page->u.inuse.type_info);
   13.28      }
   13.29  }
   13.30 @@ -400,7 +400,7 @@ int arch_set_info_guest(
   13.31  
   13.32      phys_basetab = c->ctrlreg[3];
   13.33      phys_basetab =
   13.34 -        (__gpfn_to_mfn(d, phys_basetab >> PAGE_SHIFT) << PAGE_SHIFT) |
   13.35 +        (gmfn_to_mfn(d, phys_basetab >> PAGE_SHIFT) << PAGE_SHIFT) |
   13.36          (phys_basetab & ~PAGE_MASK);
   13.37  
   13.38      v->arch.guest_table = mk_pagetable(phys_basetab);
   13.39 @@ -410,7 +410,7 @@ int arch_set_info_guest(
   13.40  
   13.41      if ( shadow_mode_refcounts(d) )
   13.42      {
   13.43 -        if ( !get_page(pfn_to_page(phys_basetab>>PAGE_SHIFT), d) )
   13.44 +        if ( !get_page(mfn_to_page(phys_basetab>>PAGE_SHIFT), d) )
   13.45          {
   13.46              destroy_gdt(v);
   13.47              return -EINVAL;
   13.48 @@ -418,7 +418,7 @@ int arch_set_info_guest(
   13.49      }
   13.50      else if ( !(c->flags & VGCF_HVM_GUEST) )
   13.51      {
   13.52 -        if ( !get_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT), d,
   13.53 +        if ( !get_page_and_type(mfn_to_page(phys_basetab>>PAGE_SHIFT), d,
   13.54                                  PGT_base_page_table) )
   13.55          {
   13.56              destroy_gdt(v);
   13.57 @@ -879,7 +879,7 @@ unsigned long __hypercall_create_continu
   13.58  static void relinquish_memory(struct domain *d, struct list_head *list)
   13.59  {
   13.60      struct list_head *ent;
   13.61 -    struct pfn_info  *page;
   13.62 +    struct page_info  *page;
   13.63      unsigned long     x, y;
   13.64  
   13.65      /* Use a recursive lock, as we may enter 'free_domheap_page'. */
   13.66 @@ -888,7 +888,7 @@ static void relinquish_memory(struct dom
   13.67      ent = list->next;
   13.68      while ( ent != list )
   13.69      {
   13.70 -        page = list_entry(ent, struct pfn_info, list);
   13.71 +        page = list_entry(ent, struct page_info, list);
   13.72  
   13.73          /* Grab a reference to the page so it won't disappear from under us. */
   13.74          if ( unlikely(!get_page(page, d)) )
   13.75 @@ -949,8 +949,8 @@ void domain_relinquish_resources(struct 
   13.76          if ( (pfn = pagetable_get_pfn(v->arch.guest_table)) != 0 )
   13.77          {
   13.78              if ( !shadow_mode_refcounts(d) )
   13.79 -                put_page_type(pfn_to_page(pfn));
   13.80 -            put_page(pfn_to_page(pfn));
   13.81 +                put_page_type(mfn_to_page(pfn));
   13.82 +            put_page(mfn_to_page(pfn));
   13.83  
   13.84              v->arch.guest_table = mk_pagetable(0);
   13.85          }
   13.86 @@ -958,8 +958,8 @@ void domain_relinquish_resources(struct 
   13.87          if ( (pfn = pagetable_get_pfn(v->arch.guest_table_user)) != 0 )
   13.88          {
   13.89              if ( !shadow_mode_refcounts(d) )
   13.90 -                put_page_type(pfn_to_page(pfn));
   13.91 -            put_page(pfn_to_page(pfn));
   13.92 +                put_page_type(mfn_to_page(pfn));
   13.93 +            put_page(mfn_to_page(pfn));
   13.94  
   13.95              v->arch.guest_table_user = mk_pagetable(0);
   13.96          }
    14.1 --- a/xen/arch/x86/domain_build.c	Wed Feb 01 15:01:04 2006 +0000
    14.2 +++ b/xen/arch/x86/domain_build.c	Wed Feb 01 16:28:50 2006 +0100
    14.3 @@ -75,9 +75,9 @@ string_param("dom0_ioports_disable", opt
    14.4  #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
    14.5  #define round_pgdown(_p)  ((_p)&PAGE_MASK)
    14.6  
    14.7 -static struct pfn_info *alloc_chunk(struct domain *d, unsigned long max_pages)
    14.8 +static struct page_info *alloc_chunk(struct domain *d, unsigned long max_pages)
    14.9  {
   14.10 -    struct pfn_info *page;
   14.11 +    struct page_info *page;
   14.12      unsigned int order;
   14.13      /*
   14.14       * Allocate up to 2MB at a time: It prevents allocating very large chunks
   14.15 @@ -143,7 +143,7 @@ int construct_dom0(struct domain *d,
   14.16      unsigned long alloc_spfn;
   14.17      unsigned long alloc_epfn;
   14.18      unsigned long count;
   14.19 -    struct pfn_info *page = NULL;
   14.20 +    struct page_info *page = NULL;
   14.21      start_info_t *si;
   14.22      struct vcpu *v = d->vcpu[0];
   14.23      char *p;
   14.24 @@ -299,12 +299,12 @@ int construct_dom0(struct domain *d,
   14.25      /* Allocate from DMA pool: PAE L3 table must be below 4GB boundary. */
   14.26      if ( (page = alloc_domheap_pages(d, order, ALLOC_DOM_DMA)) == NULL )
   14.27          panic("Not enough RAM for domain 0 allocation.\n");
   14.28 -    alloc_spfn = page_to_pfn(page);
   14.29 +    alloc_spfn = page_to_mfn(page);
   14.30      alloc_epfn = alloc_spfn + d->tot_pages;
   14.31  
   14.32      printk("PHYSICAL MEMORY ARRANGEMENT:\n"
   14.33 -           " Dom0 alloc.:   %"PRIphysaddr"->%"PRIphysaddr,
   14.34 -           pfn_to_phys(alloc_spfn), pfn_to_phys(alloc_epfn));
   14.35 +           " Dom0 alloc.:   %"PRIpaddr"->%"PRIpaddr,
   14.36 +           pfn_to_paddr(alloc_spfn), pfn_to_paddr(alloc_epfn));
   14.37      if ( d->tot_pages < nr_pages )
   14.38          printk(" (%lu pages to be allocated)",
   14.39                 nr_pages - d->tot_pages);
   14.40 @@ -334,7 +334,7 @@ int construct_dom0(struct domain *d,
   14.41      }
   14.42  
   14.43      mpt_alloc = (vpt_start - dsi.v_start) + 
   14.44 -        (unsigned long)pfn_to_phys(alloc_spfn);
   14.45 +        (unsigned long)pfn_to_paddr(alloc_spfn);
   14.46  
   14.47      /*
   14.48       * We're basically forcing default RPLs to 1, so that our "what privilege
   14.49 @@ -400,7 +400,7 @@ int construct_dom0(struct domain *d,
   14.50          *l1tab = l1e_from_pfn(mfn, L1_PROT);
   14.51          l1tab++;
   14.52          
   14.53 -        page = pfn_to_page(mfn);
   14.54 +        page = mfn_to_page(mfn);
   14.55          if ( !get_page_and_type(page, d, PGT_writable_page) )
   14.56              BUG();
   14.57  
   14.58 @@ -413,7 +413,7 @@ int construct_dom0(struct domain *d,
   14.59      l1tab += l1_table_offset(vpt_start);
   14.60      for ( count = 0; count < nr_pt_pages; count++ ) 
   14.61      {
   14.62 -        page = pfn_to_page(l1e_get_pfn(*l1tab));
   14.63 +        page = mfn_to_page(l1e_get_pfn(*l1tab));
   14.64          if ( !opt_dom0_shadow )
   14.65              l1e_remove_flags(*l1tab, _PAGE_RW);
   14.66          else
   14.67 @@ -496,7 +496,7 @@ int construct_dom0(struct domain *d,
   14.68      }
   14.69  
   14.70      /* WARNING: The new domain must have its 'processor' field filled in! */
   14.71 -    phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
   14.72 +    maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
   14.73      l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
   14.74      memcpy(l4tab, &idle_pg_table[0], PAGE_SIZE);
   14.75      l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
   14.76 @@ -511,21 +511,21 @@ int construct_dom0(struct domain *d,
   14.77      {
   14.78          if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
   14.79          {
   14.80 -            phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table;
   14.81 +            maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table;
   14.82              l1start = l1tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
   14.83              clear_page(l1tab);
   14.84              if ( count == 0 )
   14.85                  l1tab += l1_table_offset(dsi.v_start);
   14.86              if ( !((unsigned long)l2tab & (PAGE_SIZE-1)) )
   14.87              {
   14.88 -                phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
   14.89 +                maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
   14.90                  l2start = l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
   14.91                  clear_page(l2tab);
   14.92                  if ( count == 0 )
   14.93                      l2tab += l2_table_offset(dsi.v_start);
   14.94                  if ( !((unsigned long)l3tab & (PAGE_SIZE-1)) )
   14.95                  {
   14.96 -                    phys_to_page(mpt_alloc)->u.inuse.type_info =
   14.97 +                    maddr_to_page(mpt_alloc)->u.inuse.type_info =
   14.98                          PGT_l3_page_table;
   14.99                      l3start = l3tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
  14.100                      clear_page(l3tab);
  14.101 @@ -543,7 +543,7 @@ int construct_dom0(struct domain *d,
  14.102          *l1tab = l1e_from_pfn(mfn, L1_PROT);
  14.103          l1tab++;
  14.104  
  14.105 -        page = pfn_to_page(mfn);
  14.106 +        page = mfn_to_page(mfn);
  14.107          if ( (page->u.inuse.type_info == 0) &&
  14.108               !get_page_and_type(page, d, PGT_writable_page) )
  14.109              BUG();
  14.110 @@ -562,7 +562,7 @@ int construct_dom0(struct domain *d,
  14.111      for ( count = 0; count < nr_pt_pages; count++ ) 
  14.112      {
  14.113          l1e_remove_flags(*l1tab, _PAGE_RW);
  14.114 -        page = pfn_to_page(l1e_get_pfn(*l1tab));
  14.115 +        page = mfn_to_page(l1e_get_pfn(*l1tab));
  14.116  
  14.117          /* Read-only mapping + PGC_allocated + page-table page. */
  14.118          page->count_info         = PGC_allocated | 3;
  14.119 @@ -640,11 +640,11 @@ int construct_dom0(struct domain *d,
  14.120      memset(si, 0, PAGE_SIZE);
  14.121      si->nr_pages = nr_pages;
  14.122  
  14.123 -    si->shared_info = virt_to_phys(d->shared_info);
  14.124 +    si->shared_info = virt_to_maddr(d->shared_info);
  14.125      if ( opt_dom0_translate )
  14.126      {
  14.127          si->shared_info  = max_page << PAGE_SHIFT;
  14.128 -        set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT, max_page);
  14.129 +        set_pfn_from_mfn(virt_to_maddr(d->shared_info) >> PAGE_SHIFT, max_page);
  14.130      }
  14.131  
  14.132      si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
  14.133 @@ -672,7 +672,7 @@ int construct_dom0(struct domain *d,
  14.134              panic("Not enough RAM for DOM0 reservation.\n");
  14.135          while ( pfn < d->tot_pages )
  14.136          {
  14.137 -            mfn = page_to_pfn(page);
  14.138 +            mfn = page_to_mfn(page);
  14.139  #ifndef NDEBUG
  14.140  #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
  14.141  #endif
    15.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Feb 01 15:01:04 2006 +0000
    15.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Feb 01 16:28:50 2006 +0100
    15.3 @@ -59,18 +59,18 @@ static void hvm_map_io_shared_page(struc
    15.4      unsigned char e820_map_nr;
    15.5      struct e820entry *e820entry;
    15.6      unsigned char *p;
    15.7 -    unsigned long mpfn;
    15.8 +    unsigned long mfn;
    15.9      unsigned long gpfn = 0;
   15.10  
   15.11      local_flush_tlb_pge();
   15.12  
   15.13 -    mpfn = get_mfn_from_pfn(E820_MAP_PAGE >> PAGE_SHIFT);
   15.14 -    if (mpfn == INVALID_MFN) {
   15.15 +    mfn = get_mfn_from_pfn(E820_MAP_PAGE >> PAGE_SHIFT);
   15.16 +    if (mfn == INVALID_MFN) {
   15.17          printk("Can not find E820 memory map page for HVM domain.\n");
   15.18          domain_crash_synchronous();
   15.19      }
   15.20  
   15.21 -    p = map_domain_page(mpfn);
   15.22 +    p = map_domain_page(mfn);
   15.23      if (p == NULL) {
   15.24          printk("Can not map E820 memory map page for HVM domain.\n");
   15.25          domain_crash_synchronous();
   15.26 @@ -97,13 +97,13 @@ static void hvm_map_io_shared_page(struc
   15.27      unmap_domain_page(p);
   15.28  
   15.29      /* Initialise shared page */
   15.30 -    mpfn = get_mfn_from_pfn(gpfn);
   15.31 -    if (mpfn == INVALID_MFN) {
   15.32 +    mfn = get_mfn_from_pfn(gpfn);
   15.33 +    if (mfn == INVALID_MFN) {
   15.34          printk("Can not find io request shared page for HVM domain.\n");
   15.35          domain_crash_synchronous();
   15.36      }
   15.37  
   15.38 -    p = map_domain_page_global(mpfn);
   15.39 +    p = map_domain_page_global(mfn);
   15.40      if (p == NULL) {
   15.41          printk("Can not map io request shared page for HVM domain.\n");
   15.42          domain_crash_synchronous();
   15.43 @@ -140,16 +140,16 @@ static int validate_hvm_info(struct hvm_
   15.44  static void hvm_get_info(struct domain *d)
   15.45  {
   15.46      unsigned char *p;
   15.47 -    unsigned long mpfn;
   15.48 +    unsigned long mfn;
   15.49      struct hvm_info_table *t;
   15.50  
   15.51 -    mpfn = get_mfn_from_pfn(HVM_INFO_PFN);
   15.52 -    if ( mpfn == INVALID_MFN ) {
   15.53 +    mfn = get_mfn_from_pfn(HVM_INFO_PFN);
   15.54 +    if ( mfn == INVALID_MFN ) {
   15.55          printk("Can not get info page mfn for HVM domain.\n");
   15.56          domain_crash_synchronous();
   15.57      }
   15.58  
   15.59 -    p = map_domain_page(mpfn);
   15.60 +    p = map_domain_page(mfn);
   15.61      if ( p == NULL ) {
   15.62          printk("Can not map info page for HVM domain.\n");
   15.63          domain_crash_synchronous();
    16.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Feb 01 15:01:04 2006 +0000
    16.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Feb 01 16:28:50 2006 +0100
    16.3 @@ -1365,7 +1365,7 @@ static int svm_set_cr0(unsigned long val
    16.4          /* The guest CR3 must be pointing to the guest physical. */
    16.5          if (!VALID_MFN(mfn = 
    16.6                      get_mfn_from_pfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT))
    16.7 -                || !get_page(pfn_to_page(mfn), v->domain))
    16.8 +                || !get_page(mfn_to_page(mfn), v->domain))
    16.9          {
   16.10              printk("Invalid CR3 value = %lx\n", v->arch.hvm_svm.cpu_cr3);
   16.11              domain_crash_synchronous(); /* need to take a clean path */
   16.12 @@ -1435,7 +1435,7 @@ static int svm_set_cr0(unsigned long val
   16.13              unsigned long old_base_mfn;
   16.14              old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   16.15              if (old_base_mfn)
   16.16 -                put_page(pfn_to_page(old_base_mfn));
   16.17 +                put_page(mfn_to_page(old_base_mfn));
   16.18  	}
   16.19  #endif
   16.20          /* Now arch.guest_table points to machine physical. */
   16.21 @@ -1571,7 +1571,7 @@ static int mov_to_cr(int gpreg, int cr, 
   16.22              HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
   16.23              if (((value >> PAGE_SHIFT) > v->domain->max_pages) 
   16.24                      || !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT))
   16.25 -                    || !get_page(pfn_to_page(mfn), v->domain))
   16.26 +                    || !get_page(mfn_to_page(mfn), v->domain))
   16.27              {
   16.28                  printk("Invalid CR3 value=%lx\n", value);
   16.29                  domain_crash_synchronous(); /* need to take a clean path */
   16.30 @@ -1581,7 +1581,7 @@ static int mov_to_cr(int gpreg, int cr, 
   16.31              v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
   16.32  
   16.33              if (old_base_mfn)
   16.34 -                put_page(pfn_to_page(old_base_mfn));
   16.35 +                put_page(mfn_to_page(old_base_mfn));
   16.36  
   16.37              update_pagetables(v);
   16.38              
    17.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Wed Feb 01 15:01:04 2006 +0000
    17.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Wed Feb 01 16:28:50 2006 +0100
    17.3 @@ -155,8 +155,8 @@ static int construct_vmcb_controls(struc
    17.4      arch_svm->iopm = iopm;
    17.5      arch_svm->msrpm = msrpm;
    17.6  
    17.7 -    vmcb->iopm_base_pa = (u64) virt_to_phys(iopm);
    17.8 -    vmcb->msrpm_base_pa = (u64) virt_to_phys(msrpm);
    17.9 +    vmcb->iopm_base_pa = (u64) virt_to_maddr(iopm);
   17.10 +    vmcb->msrpm_base_pa = (u64) virt_to_maddr(msrpm);
   17.11  
   17.12      return 0;
   17.13  }
   17.14 @@ -361,11 +361,11 @@ int construct_vmcb(struct arch_svm_struc
   17.15          goto err_out;
   17.16      }
   17.17  
   17.18 -    phys_hsa = (u64) virt_to_phys(hsa);
   17.19 +    phys_hsa = (u64) virt_to_maddr(hsa);
   17.20      arch_svm->host_save_area = hsa;
   17.21      arch_svm->host_save_pa   = phys_hsa;
   17.22  
   17.23 -    arch_svm->vmcb_pa  = (u64) virt_to_phys(arch_svm->vmcb);
   17.24 +    arch_svm->vmcb_pa  = (u64) virt_to_maddr(arch_svm->vmcb);
   17.25  
   17.26      if ((error = load_vmcb(arch_svm, arch_svm->host_save_pa))) 
   17.27      {
    18.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Feb 01 15:01:04 2006 +0000
    18.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed Feb 01 16:28:50 2006 +0100
    18.3 @@ -107,8 +107,8 @@ static inline int construct_vmcs_control
    18.4      clear_bit(PC_DEBUG_PORT, io_bitmap_a);
    18.5      memset(io_bitmap_b, 0xff, 0x1000);
    18.6  
    18.7 -    error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_phys(io_bitmap_a));
    18.8 -    error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_phys(io_bitmap_b));
    18.9 +    error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_maddr(io_bitmap_a));
   18.10 +    error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_maddr(io_bitmap_b));
   18.11  
   18.12      arch_vmx->io_bitmap_a = io_bitmap_a;
   18.13      arch_vmx->io_bitmap_b = io_bitmap_b;
   18.14 @@ -405,7 +405,7 @@ static int construct_vmcs(struct arch_vm
   18.15          rc = -ENOMEM;
   18.16          goto err_out;
   18.17      }
   18.18 -    vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
   18.19 +    vmcs_phys_ptr = (u64) virt_to_maddr(arch_vmx->vmcs);
   18.20  
   18.21      if ((error = __vmpclear(vmcs_phys_ptr))) {
   18.22          printk("construct_vmcs: VMCLEAR failed\n");
   18.23 @@ -474,9 +474,9 @@ int modify_vmcs(struct arch_vmx_struct *
   18.24  {
   18.25      int error;
   18.26      u64 vmcs_phys_ptr, old, old_phys_ptr;
   18.27 -    vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
   18.28 +    vmcs_phys_ptr = (u64) virt_to_maddr(arch_vmx->vmcs);
   18.29  
   18.30 -    old_phys_ptr = virt_to_phys(&old);
   18.31 +    old_phys_ptr = virt_to_maddr(&old);
   18.32      __vmptrst(old_phys_ptr);
   18.33      if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
   18.34          printk("modify_vmcs: load_vmcs failed: VMCS = %lx\n",
   18.35 @@ -512,14 +512,14 @@ void arch_vmx_do_resume(struct vcpu *v)
   18.36  {
   18.37      if ( v->arch.hvm_vmx.launch_cpu == smp_processor_id() )
   18.38      {
   18.39 -        load_vmcs(&v->arch.hvm_vmx, virt_to_phys(v->arch.hvm_vmx.vmcs));
   18.40 +        load_vmcs(&v->arch.hvm_vmx, virt_to_maddr(v->arch.hvm_vmx.vmcs));
   18.41          vmx_do_resume(v);
   18.42          reset_stack_and_jump(vmx_asm_do_resume);
   18.43      }
   18.44      else
   18.45      {
   18.46 -        __vmpclear(virt_to_phys(v->arch.hvm_vmx.vmcs));
   18.47 -        load_vmcs(&v->arch.hvm_vmx, virt_to_phys(v->arch.hvm_vmx.vmcs));
   18.48 +        __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
   18.49 +        load_vmcs(&v->arch.hvm_vmx, virt_to_maddr(v->arch.hvm_vmx.vmcs));
   18.50          vmx_do_resume(v);
   18.51          vmx_set_host_env(v);
   18.52          v->arch.hvm_vmx.launch_cpu = smp_processor_id();
    19.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Feb 01 15:01:04 2006 +0000
    19.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Feb 01 16:28:50 2006 +0100
    19.3 @@ -495,7 +495,7 @@ int start_vmx(void)
    19.4          return 0;
    19.5      }
    19.6  
    19.7 -    phys_vmcs = (u64) virt_to_phys(vmcs);
    19.8 +    phys_vmcs = (u64) virt_to_maddr(vmcs);
    19.9  
   19.10      if (!(__vmxon(phys_vmcs))) {
   19.11          printk("VMXON is done\n");
   19.12 @@ -987,12 +987,12 @@ vmx_world_restore(struct vcpu *v, struct
   19.13              return 0;
   19.14          }
   19.15          mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
   19.16 -        if(!get_page(pfn_to_page(mfn), v->domain))
   19.17 +        if(!get_page(mfn_to_page(mfn), v->domain))
   19.18                  return 0;
   19.19          old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   19.20          v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
   19.21          if (old_base_mfn)
   19.22 -             put_page(pfn_to_page(old_base_mfn));
   19.23 +             put_page(mfn_to_page(old_base_mfn));
   19.24          update_pagetables(v);
   19.25          /*
   19.26           * arch.shadow_table should now hold the next CR3 for shadow
   19.27 @@ -1159,7 +1159,7 @@ static int vmx_set_cr0(unsigned long val
   19.28           */
   19.29          if ( !VALID_MFN(mfn = get_mfn_from_pfn(
   19.30              v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
   19.31 -             !get_page(pfn_to_page(mfn), v->domain) )
   19.32 +             !get_page(mfn_to_page(mfn), v->domain) )
   19.33          {
   19.34              printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3);
   19.35              domain_crash_synchronous(); /* need to take a clean path */
   19.36 @@ -1232,7 +1232,7 @@ static int vmx_set_cr0(unsigned long val
   19.37  
   19.38      if(!((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled)
   19.39          if(v->arch.hvm_vmx.cpu_cr3) {
   19.40 -            put_page(pfn_to_page(get_mfn_from_pfn(
   19.41 +            put_page(mfn_to_page(get_mfn_from_pfn(
   19.42                        v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
   19.43              v->arch.guest_table = mk_pagetable(0);
   19.44          }
   19.45 @@ -1378,7 +1378,7 @@ static int mov_to_cr(int gp, int cr, str
   19.46              HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
   19.47              if ( ((value >> PAGE_SHIFT) > v->domain->max_pages ) ||
   19.48                   !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) ||
   19.49 -                 !get_page(pfn_to_page(mfn), v->domain) )
   19.50 +                 !get_page(mfn_to_page(mfn), v->domain) )
   19.51              {
   19.52                  printk("Invalid CR3 value=%lx", value);
   19.53                  domain_crash_synchronous(); /* need to take a clean path */
   19.54 @@ -1386,7 +1386,7 @@ static int mov_to_cr(int gp, int cr, str
   19.55              old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   19.56              v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
   19.57              if (old_base_mfn)
   19.58 -                put_page(pfn_to_page(old_base_mfn));
   19.59 +                put_page(mfn_to_page(old_base_mfn));
   19.60              update_pagetables(v);
   19.61              /*
   19.62               * arch.shadow_table should now hold the next CR3 for shadow
    20.1 --- a/xen/arch/x86/mm.c	Wed Feb 01 15:01:04 2006 +0000
    20.2 +++ b/xen/arch/x86/mm.c	Wed Feb 01 16:28:50 2006 +0100
    20.3 @@ -121,8 +121,8 @@
    20.4   */
    20.5  #define MMU_UPDATE_PREEMPTED          (~(~0U>>1))
    20.6  
    20.7 -static void free_l2_table(struct pfn_info *page);
    20.8 -static void free_l1_table(struct pfn_info *page);
    20.9 +static void free_l2_table(struct page_info *page);
   20.10 +static void free_l1_table(struct page_info *page);
   20.11  
   20.12  static int mod_l2_entry(l2_pgentry_t *, l2_pgentry_t, unsigned long,
   20.13                          unsigned long type);
   20.14 @@ -148,27 +148,27 @@ static struct {
   20.15  static struct domain *dom_xen, *dom_io;
   20.16  
   20.17  /* Frame table and its size in pages. */
   20.18 -struct pfn_info *frame_table;
   20.19 +struct page_info *frame_table;
   20.20  unsigned long max_page;
   20.21  unsigned long total_pages;
   20.22  
   20.23  void __init init_frametable(void)
   20.24  {
   20.25 -    unsigned long nr_pages, page_step, i, pfn;
   20.26 -
   20.27 -    frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START;
   20.28 +    unsigned long nr_pages, page_step, i, mfn;
   20.29 +
   20.30 +    frame_table = (struct page_info *)FRAMETABLE_VIRT_START;
   20.31  
   20.32      nr_pages  = PFN_UP(max_page * sizeof(*frame_table));
   20.33      page_step = (1 << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT;
   20.34  
   20.35      for ( i = 0; i < nr_pages; i += page_step )
   20.36      {
   20.37 -        pfn = alloc_boot_pages(min(nr_pages - i, page_step), page_step);
   20.38 -        if ( pfn == 0 )
   20.39 +        mfn = alloc_boot_pages(min(nr_pages - i, page_step), page_step);
   20.40 +        if ( mfn == 0 )
   20.41              panic("Not enough memory for frame table\n");
   20.42          map_pages_to_xen(
   20.43              FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
   20.44 -            pfn, page_step, PAGE_HYPERVISOR);
   20.45 +            mfn, page_step, PAGE_HYPERVISOR);
   20.46      }
   20.47  
   20.48      memset(frame_table, 0, nr_pages << PAGE_SHIFT);
   20.49 @@ -179,7 +179,7 @@ void arch_init_memory(void)
   20.50      extern void subarch_init_memory(struct domain *);
   20.51  
   20.52      unsigned long i, pfn, rstart_pfn, rend_pfn;
   20.53 -    struct pfn_info *page;
   20.54 +    struct page_info *page;
   20.55  
   20.56      memset(percpu_info, 0, sizeof(percpu_info));
   20.57  
   20.58 @@ -194,7 +194,7 @@ void arch_init_memory(void)
   20.59  
   20.60      /*
   20.61       * Initialise our DOMID_IO domain.
   20.62 -     * This domain owns I/O pages that are within the range of the pfn_info
   20.63 +     * This domain owns I/O pages that are within the range of the page_info
   20.64       * array. Mappings occur at the priv of the caller.
   20.65       */
   20.66      dom_io = alloc_domain();
   20.67 @@ -204,7 +204,7 @@ void arch_init_memory(void)
   20.68      /* First 1MB of RAM is historically marked as I/O. */
   20.69      for ( i = 0; i < 0x100; i++ )
   20.70      {
   20.71 -        page = pfn_to_page(i);
   20.72 +        page = mfn_to_page(i);
   20.73          page->count_info        = PGC_allocated | 1;
   20.74          page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
   20.75          page_set_owner(page, dom_io);
   20.76 @@ -220,8 +220,8 @@ void arch_init_memory(void)
   20.77          rend_pfn   = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
   20.78          for ( ; pfn < rstart_pfn; pfn++ )
   20.79          {
   20.80 -            BUG_ON(!pfn_valid(pfn));
   20.81 -            page = pfn_to_page(pfn);
   20.82 +            BUG_ON(!mfn_valid(pfn));
   20.83 +            page = mfn_to_page(pfn);
   20.84              page->count_info        = PGC_allocated | 1;
   20.85              page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
   20.86              page_set_owner(page, dom_io);
   20.87 @@ -243,7 +243,7 @@ void invalidate_shadow_ldt(struct vcpu *
   20.88  {
   20.89      int i;
   20.90      unsigned long pfn;
   20.91 -    struct pfn_info *page;
   20.92 +    struct page_info *page;
   20.93      
   20.94      if ( v->arch.shadow_ldt_mapcnt == 0 )
   20.95          return;
   20.96 @@ -255,7 +255,7 @@ void invalidate_shadow_ldt(struct vcpu *
   20.97          pfn = l1e_get_pfn(v->arch.perdomain_ptes[i]);
   20.98          if ( pfn == 0 ) continue;
   20.99          v->arch.perdomain_ptes[i] = l1e_empty();
  20.100 -        page = pfn_to_page(pfn);
  20.101 +        page = mfn_to_page(pfn);
  20.102          ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
  20.103          ASSERT_PAGE_IS_DOMAIN(page, v->domain);
  20.104          put_page_and_type(page);
  20.105 @@ -266,12 +266,12 @@ void invalidate_shadow_ldt(struct vcpu *
  20.106  }
  20.107  
  20.108  
  20.109 -static int alloc_segdesc_page(struct pfn_info *page)
  20.110 +static int alloc_segdesc_page(struct page_info *page)
  20.111  {
  20.112      struct desc_struct *descs;
  20.113      int i;
  20.114  
  20.115 -    descs = map_domain_page(page_to_pfn(page));
  20.116 +    descs = map_domain_page(page_to_mfn(page));
  20.117  
  20.118      for ( i = 0; i < 512; i++ )
  20.119          if ( unlikely(!check_descriptor(&descs[i])) )
  20.120 @@ -291,7 +291,7 @@ int map_ldt_shadow_page(unsigned int off
  20.121  {
  20.122      struct vcpu *v = current;
  20.123      struct domain *d = v->domain;
  20.124 -    unsigned long gpfn, gmfn;
  20.125 +    unsigned long gmfn, mfn;
  20.126      l1_pgentry_t l1e, nl1e;
  20.127      unsigned long gva = v->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
  20.128      int res;
  20.129 @@ -316,25 +316,25 @@ int map_ldt_shadow_page(unsigned int off
  20.130      if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
  20.131          return 0;
  20.132  
  20.133 -    gpfn = l1e_get_pfn(l1e);
  20.134 -    gmfn = __gpfn_to_mfn(d, gpfn);
  20.135 -    if ( unlikely(!VALID_MFN(gmfn)) )
  20.136 +    gmfn = l1e_get_pfn(l1e);
  20.137 +    mfn = gmfn_to_mfn(d, gmfn);
  20.138 +    if ( unlikely(!VALID_MFN(mfn)) )
  20.139          return 0;
  20.140  
  20.141 -    res = get_page_and_type(pfn_to_page(gmfn), d, PGT_ldt_page);
  20.142 +    res = get_page_and_type(mfn_to_page(mfn), d, PGT_ldt_page);
  20.143  
  20.144      if ( !res && unlikely(shadow_mode_refcounts(d)) )
  20.145      {
  20.146          shadow_lock(d);
  20.147 -        shadow_remove_all_write_access(d, gpfn, gmfn);
  20.148 -        res = get_page_and_type(pfn_to_page(gmfn), d, PGT_ldt_page);
  20.149 +        shadow_remove_all_write_access(d, gmfn, mfn);
  20.150 +        res = get_page_and_type(mfn_to_page(mfn), d, PGT_ldt_page);
  20.151          shadow_unlock(d);
  20.152      }
  20.153  
  20.154      if ( unlikely(!res) )
  20.155          return 0;
  20.156  
  20.157 -    nl1e = l1e_from_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
  20.158 +    nl1e = l1e_from_pfn(mfn, l1e_get_flags(l1e) | _PAGE_RW);
  20.159  
  20.160      v->arch.perdomain_ptes[off + 16] = nl1e;
  20.161      v->arch.shadow_ldt_mapcnt++;
  20.162 @@ -345,9 +345,9 @@ int map_ldt_shadow_page(unsigned int off
  20.163  
  20.164  static int get_page_from_pagenr(unsigned long page_nr, struct domain *d)
  20.165  {
  20.166 -    struct pfn_info *page = pfn_to_page(page_nr);
  20.167 -
  20.168 -    if ( unlikely(!pfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
  20.169 +    struct page_info *page = mfn_to_page(page_nr);
  20.170 +
  20.171 +    if ( unlikely(!mfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
  20.172      {
  20.173          MEM_LOG("Could not get page ref for pfn %lx", page_nr);
  20.174          return 0;
  20.175 @@ -361,7 +361,7 @@ static int get_page_and_type_from_pagenr
  20.176                                           unsigned long type,
  20.177                                           struct domain *d)
  20.178  {
  20.179 -    struct pfn_info *page = pfn_to_page(page_nr);
  20.180 +    struct page_info *page = mfn_to_page(page_nr);
  20.181  
  20.182      if ( unlikely(!get_page_from_pagenr(page_nr, d)) )
  20.183          return 0;
  20.184 @@ -392,7 +392,7 @@ get_linear_pagetable(
  20.185      root_pgentry_t re, unsigned long re_pfn, struct domain *d)
  20.186  {
  20.187      unsigned long x, y;
  20.188 -    struct pfn_info *page;
  20.189 +    struct page_info *page;
  20.190      unsigned long pfn;
  20.191  
  20.192      ASSERT( !shadow_mode_refcounts(d) );
  20.193 @@ -413,7 +413,7 @@ get_linear_pagetable(
  20.194           * Make sure that the mapped frame is an already-validated L2 table. 
  20.195           * If so, atomically increment the count (checking for overflow).
  20.196           */
  20.197 -        page = pfn_to_page(pfn);
  20.198 +        page = mfn_to_page(pfn);
  20.199          y = page->u.inuse.type_info;
  20.200          do {
  20.201              x = y;
  20.202 @@ -436,7 +436,7 @@ get_page_from_l1e(
  20.203      l1_pgentry_t l1e, struct domain *d)
  20.204  {
  20.205      unsigned long mfn = l1e_get_pfn(l1e);
  20.206 -    struct pfn_info *page = pfn_to_page(mfn);
  20.207 +    struct page_info *page = mfn_to_page(mfn);
  20.208      int okay;
  20.209  
  20.210      if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
  20.211 @@ -448,7 +448,7 @@ get_page_from_l1e(
  20.212          return 0;
  20.213      }
  20.214  
  20.215 -    if ( unlikely(!pfn_valid(mfn)) ||
  20.216 +    if ( unlikely(!mfn_valid(mfn)) ||
  20.217           unlikely(page_get_owner(page) == dom_io) )
  20.218      {
  20.219          /* DOMID_IO reverts to caller for privilege checks. */
  20.220 @@ -462,7 +462,7 @@ get_page_from_l1e(
  20.221          }
  20.222  
  20.223          /* No reference counting for out-of-range I/O pages. */
  20.224 -        if ( !pfn_valid(mfn) )
  20.225 +        if ( !mfn_valid(mfn) )
  20.226              return 1;
  20.227  
  20.228          d = dom_io;
  20.229 @@ -586,11 +586,11 @@ get_page_from_l4e(
  20.230  void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
  20.231  {
  20.232      unsigned long    pfn  = l1e_get_pfn(l1e);
  20.233 -    struct pfn_info *page = pfn_to_page(pfn);
  20.234 +    struct page_info *page = mfn_to_page(pfn);
  20.235      struct domain   *e;
  20.236      struct vcpu     *v;
  20.237  
  20.238 -    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !pfn_valid(pfn) )
  20.239 +    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(pfn) )
  20.240          return;
  20.241  
  20.242      e = page_get_owner(page);
  20.243 @@ -644,7 +644,7 @@ static void put_page_from_l2e(l2_pgentry
  20.244  {
  20.245      if ( (l2e_get_flags(l2e) & _PAGE_PRESENT) && 
  20.246           (l2e_get_pfn(l2e) != pfn) )
  20.247 -        put_page_and_type(pfn_to_page(l2e_get_pfn(l2e)));
  20.248 +        put_page_and_type(mfn_to_page(l2e_get_pfn(l2e)));
  20.249  }
  20.250  
  20.251  
  20.252 @@ -654,7 +654,7 @@ static void put_page_from_l3e(l3_pgentry
  20.253  {
  20.254      if ( (l3e_get_flags(l3e) & _PAGE_PRESENT) && 
  20.255           (l3e_get_pfn(l3e) != pfn) )
  20.256 -        put_page_and_type(pfn_to_page(l3e_get_pfn(l3e)));
  20.257 +        put_page_and_type(mfn_to_page(l3e_get_pfn(l3e)));
  20.258  }
  20.259  
  20.260  #endif
  20.261 @@ -665,16 +665,16 @@ static void put_page_from_l4e(l4_pgentry
  20.262  {
  20.263      if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) && 
  20.264           (l4e_get_pfn(l4e) != pfn) )
  20.265 -        put_page_and_type(pfn_to_page(l4e_get_pfn(l4e)));
  20.266 +        put_page_and_type(mfn_to_page(l4e_get_pfn(l4e)));
  20.267  }
  20.268  
  20.269  #endif
  20.270  
  20.271  
  20.272 -static int alloc_l1_table(struct pfn_info *page)
  20.273 +static int alloc_l1_table(struct page_info *page)
  20.274  {
  20.275      struct domain *d = page_get_owner(page);
  20.276 -    unsigned long  pfn = page_to_pfn(page);
  20.277 +    unsigned long  pfn = page_to_mfn(page);
  20.278      l1_pgentry_t  *pl1e;
  20.279      int            i;
  20.280  
  20.281 @@ -703,7 +703,7 @@ static int alloc_l1_table(struct pfn_inf
  20.282  #ifdef CONFIG_X86_PAE
  20.283  static int create_pae_xen_mappings(l3_pgentry_t *pl3e)
  20.284  {
  20.285 -    struct pfn_info *page;
  20.286 +    struct page_info *page;
  20.287      l2_pgentry_t    *pl2e;
  20.288      l3_pgentry_t     l3e3;
  20.289      int              i;
  20.290 @@ -809,10 +809,10 @@ static inline int l3_backptr(
  20.291      ({ *(bp) = (unsigned long)(l2o) << L2_PAGETABLE_SHIFT; 1; })
  20.292  #endif
  20.293  
  20.294 -static int alloc_l2_table(struct pfn_info *page, unsigned long type)
  20.295 +static int alloc_l2_table(struct page_info *page, unsigned long type)
  20.296  {
  20.297      struct domain *d = page_get_owner(page);
  20.298 -    unsigned long  pfn = page_to_pfn(page);
  20.299 +    unsigned long  pfn = page_to_mfn(page);
  20.300      unsigned long  vaddr;
  20.301      l2_pgentry_t  *pl2e;
  20.302      int            i;
  20.303 @@ -863,10 +863,10 @@ static int alloc_l2_table(struct pfn_inf
  20.304  
  20.305  
  20.306  #if CONFIG_PAGING_LEVELS >= 3
  20.307 -static int alloc_l3_table(struct pfn_info *page, unsigned long type)
  20.308 +static int alloc_l3_table(struct page_info *page, unsigned long type)
  20.309  {
  20.310      struct domain *d = page_get_owner(page);
  20.311 -    unsigned long  pfn = page_to_pfn(page);
  20.312 +    unsigned long  pfn = page_to_mfn(page);
  20.313      unsigned long  vaddr;
  20.314      l3_pgentry_t  *pl3e;
  20.315      int            i;
  20.316 @@ -915,10 +915,10 @@ static int alloc_l3_table(struct pfn_inf
  20.317  #endif
  20.318  
  20.319  #if CONFIG_PAGING_LEVELS >= 4
  20.320 -static int alloc_l4_table(struct pfn_info *page, unsigned long type)
  20.321 +static int alloc_l4_table(struct page_info *page, unsigned long type)
  20.322  {
  20.323      struct domain *d = page_get_owner(page);
  20.324 -    unsigned long  pfn = page_to_pfn(page);
  20.325 +    unsigned long  pfn = page_to_mfn(page);
  20.326      l4_pgentry_t  *pl4e = page_to_virt(page);
  20.327      unsigned long vaddr;
  20.328      int            i;
  20.329 @@ -965,10 +965,10 @@ static int alloc_l4_table(struct pfn_inf
  20.330  #endif
  20.331  
  20.332  
  20.333 -static void free_l1_table(struct pfn_info *page)
  20.334 +static void free_l1_table(struct page_info *page)
  20.335  {
  20.336      struct domain *d = page_get_owner(page);
  20.337 -    unsigned long pfn = page_to_pfn(page);
  20.338 +    unsigned long pfn = page_to_mfn(page);
  20.339      l1_pgentry_t *pl1e;
  20.340      int i;
  20.341  
  20.342 @@ -982,9 +982,9 @@ static void free_l1_table(struct pfn_inf
  20.343  }
  20.344  
  20.345  
  20.346 -static void free_l2_table(struct pfn_info *page)
  20.347 +static void free_l2_table(struct page_info *page)
  20.348  {
  20.349 -    unsigned long pfn = page_to_pfn(page);
  20.350 +    unsigned long pfn = page_to_mfn(page);
  20.351      l2_pgentry_t *pl2e;
  20.352      int i;
  20.353  
  20.354 @@ -1000,9 +1000,9 @@ static void free_l2_table(struct pfn_inf
  20.355  
  20.356  #if CONFIG_PAGING_LEVELS >= 3
  20.357  
  20.358 -static void free_l3_table(struct pfn_info *page)
  20.359 +static void free_l3_table(struct page_info *page)
  20.360  {
  20.361 -    unsigned long pfn = page_to_pfn(page);
  20.362 +    unsigned long pfn = page_to_mfn(page);
  20.363      l3_pgentry_t *pl3e;
  20.364      int           i;
  20.365  
  20.366 @@ -1019,9 +1019,9 @@ static void free_l3_table(struct pfn_inf
  20.367  
  20.368  #if CONFIG_PAGING_LEVELS >= 4
  20.369  
  20.370 -static void free_l4_table(struct pfn_info *page)
  20.371 +static void free_l4_table(struct page_info *page)
  20.372  {
  20.373 -    unsigned long pfn = page_to_pfn(page);
  20.374 +    unsigned long pfn = page_to_mfn(page);
  20.375      l4_pgentry_t *pl4e = page_to_virt(page);
  20.376      int           i;
  20.377  
  20.378 @@ -1288,12 +1288,12 @@ static int mod_l4_entry(l4_pgentry_t *pl
  20.379  
  20.380  #endif
  20.381  
  20.382 -int alloc_page_type(struct pfn_info *page, unsigned long type)
  20.383 +int alloc_page_type(struct page_info *page, unsigned long type)
  20.384  {
  20.385      struct domain *owner = page_get_owner(page);
  20.386  
  20.387      if ( owner != NULL )
  20.388 -        mark_dirty(owner, page_to_pfn(page));
  20.389 +        mark_dirty(owner, page_to_mfn(page));
  20.390  
  20.391      switch ( type & PGT_type_mask )
  20.392      {
  20.393 @@ -1319,10 +1319,10 @@ int alloc_page_type(struct pfn_info *pag
  20.394  }
  20.395  
  20.396  
  20.397 -void free_page_type(struct pfn_info *page, unsigned long type)
  20.398 +void free_page_type(struct page_info *page, unsigned long type)
  20.399  {
  20.400      struct domain *owner = page_get_owner(page);
  20.401 -    unsigned long gpfn;
  20.402 +    unsigned long gmfn;
  20.403  
  20.404      if ( likely(owner != NULL) )
  20.405      {
  20.406 @@ -1337,14 +1337,14 @@ void free_page_type(struct pfn_info *pag
  20.407          {
  20.408              /* Raw page tables are rewritten during save/restore. */
  20.409              if ( !shadow_mode_translate(owner) )
  20.410 -                mark_dirty(owner, page_to_pfn(page));
  20.411 +                mark_dirty(owner, page_to_mfn(page));
  20.412  
  20.413              if ( shadow_mode_refcounts(owner) )
  20.414                  return;
  20.415  
  20.416 -            gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
  20.417 -            ASSERT(VALID_M2P(gpfn));
  20.418 -            remove_shadow(owner, gpfn, type & PGT_type_mask);
  20.419 +            gmfn = mfn_to_gmfn(owner, page_to_mfn(page));
  20.420 +            ASSERT(VALID_M2P(gmfn));
  20.421 +            remove_shadow(owner, gmfn, type & PGT_type_mask);
  20.422          }
  20.423      }
  20.424  
  20.425 @@ -1372,13 +1372,13 @@ void free_page_type(struct pfn_info *pag
  20.426  
  20.427      default:
  20.428          printk("%s: type %lx pfn %lx\n",__FUNCTION__,
  20.429 -               type, page_to_pfn(page));
  20.430 +               type, page_to_mfn(page));
  20.431          BUG();
  20.432      }
  20.433  }
  20.434  
  20.435  
  20.436 -void put_page_type(struct pfn_info *page)
  20.437 +void put_page_type(struct page_info *page)
  20.438  {
  20.439      unsigned long nx, x, y = page->u.inuse.type_info;
  20.440  
  20.441 @@ -1433,7 +1433,7 @@ void put_page_type(struct pfn_info *page
  20.442  }
  20.443  
  20.444  
  20.445 -int get_page_type(struct pfn_info *page, unsigned long type)
  20.446 +int get_page_type(struct page_info *page, unsigned long type)
  20.447  {
  20.448      unsigned long nx, x, y = page->u.inuse.type_info;
  20.449  
  20.450 @@ -1443,7 +1443,7 @@ int get_page_type(struct pfn_info *page,
  20.451          nx = x + 1;
  20.452          if ( unlikely((nx & PGT_count_mask) == 0) )
  20.453          {
  20.454 -            MEM_LOG("Type count overflow on pfn %lx", page_to_pfn(page));
  20.455 +            MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
  20.456              return 0;
  20.457          }
  20.458          else if ( unlikely((x & PGT_count_mask) == 0) )
  20.459 @@ -1506,8 +1506,8 @@ int get_page_type(struct pfn_info *page,
  20.460                          MEM_LOG("Bad type (saw %" PRtype_info
  20.461                                  " != exp %" PRtype_info ") "
  20.462                                  "for mfn %lx (pfn %lx)",
  20.463 -                                x, type, page_to_pfn(page),
  20.464 -                                get_pfn_from_mfn(page_to_pfn(page)));
  20.465 +                                x, type, page_to_mfn(page),
  20.466 +                                get_pfn_from_mfn(page_to_mfn(page)));
  20.467                      return 0;
  20.468                  }
  20.469                  else if ( (x & PGT_va_mask) == PGT_va_mutable )
  20.470 @@ -1547,7 +1547,7 @@ int get_page_type(struct pfn_info *page,
  20.471          {
  20.472              MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %"
  20.473                      PRtype_info ": caf=%08x taf=%" PRtype_info,
  20.474 -                    page_to_pfn(page), get_pfn_from_mfn(page_to_pfn(page)),
  20.475 +                    page_to_mfn(page), get_pfn_from_mfn(page_to_mfn(page)),
  20.476                      type, page->count_info, page->u.inuse.type_info);
  20.477              /* Noone else can get a reference. We hold the only ref. */
  20.478              page->u.inuse.type_info = 0;
  20.479 @@ -1585,9 +1585,9 @@ int new_guest_cr3(unsigned long mfn)
  20.480          write_ptbase(v);
  20.481  
  20.482          if ( shadow_mode_refcounts(d) )
  20.483 -            put_page(pfn_to_page(old_base_mfn));
  20.484 +            put_page(mfn_to_page(old_base_mfn));
  20.485          else
  20.486 -            put_page_and_type(pfn_to_page(old_base_mfn));
  20.487 +            put_page_and_type(mfn_to_page(old_base_mfn));
  20.488  
  20.489          /* CR3 also holds a ref to its shadow... */
  20.490          if ( shadow_mode_enabled(d) )
  20.491 @@ -1596,7 +1596,7 @@ int new_guest_cr3(unsigned long mfn)
  20.492                  put_shadow_ref(v->arch.monitor_shadow_ref);
  20.493              v->arch.monitor_shadow_ref =
  20.494                  pagetable_get_pfn(v->arch.monitor_table);
  20.495 -            ASSERT(!page_get_owner(pfn_to_page(v->arch.monitor_shadow_ref)));
  20.496 +            ASSERT(!page_get_owner(mfn_to_page(v->arch.monitor_shadow_ref)));
  20.497              get_shadow_ref(v->arch.monitor_shadow_ref);
  20.498          }
  20.499      }
  20.500 @@ -1717,7 +1717,7 @@ int do_mmuext_op(
  20.501      struct mmuext_op op;
  20.502      int rc = 0, i = 0, okay, cpu = smp_processor_id();
  20.503      unsigned long mfn, type, done = 0;
  20.504 -    struct pfn_info *page;
  20.505 +    struct page_info *page;
  20.506      struct vcpu *v = current;
  20.507      struct domain *d = v->domain;
  20.508  
  20.509 @@ -1763,7 +1763,7 @@ int do_mmuext_op(
  20.510  
  20.511          okay = 1;
  20.512          mfn  = op.arg1.mfn;
  20.513 -        page = pfn_to_page(mfn);
  20.514 +        page = mfn_to_page(mfn);
  20.515  
  20.516          switch ( op.cmd )
  20.517          {
  20.518 @@ -1827,7 +1827,7 @@ int do_mmuext_op(
  20.519              break;
  20.520  
  20.521          case MMUEXT_NEW_BASEPTR:
  20.522 -            mfn = __gpfn_to_mfn(current->domain, mfn);
  20.523 +            mfn = gmfn_to_mfn(current->domain, mfn);
  20.524              okay = new_guest_cr3(mfn);
  20.525              percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
  20.526              break;
  20.527 @@ -1846,7 +1846,7 @@ int do_mmuext_op(
  20.528                      pagetable_get_pfn(v->arch.guest_table_user);
  20.529                  v->arch.guest_table_user = mk_pagetable(mfn << PAGE_SHIFT);
  20.530                  if ( old_mfn != 0 )
  20.531 -                    put_page_and_type(pfn_to_page(old_mfn));
  20.532 +                    put_page_and_type(mfn_to_page(old_mfn));
  20.533              }
  20.534              break;
  20.535  #endif
  20.536 @@ -1965,8 +1965,8 @@ int do_mmu_update(
  20.537  {
  20.538      struct mmu_update req;
  20.539      void *va;
  20.540 -    unsigned long gpfn, mfn;
  20.541 -    struct pfn_info *page;
  20.542 +    unsigned long gpfn, gmfn, mfn;
  20.543 +    struct page_info *page;
  20.544      int rc = 0, okay = 1, i = 0, cpu = smp_processor_id();
  20.545      unsigned int cmd, done = 0;
  20.546      struct vcpu *v = current;
  20.547 @@ -2034,8 +2034,8 @@ int do_mmu_update(
  20.548               */
  20.549          case MMU_NORMAL_PT_UPDATE:
  20.550  
  20.551 -            gpfn = req.ptr >> PAGE_SHIFT;
  20.552 -            mfn = __gpfn_to_mfn(d, gpfn);
  20.553 +            gmfn = req.ptr >> PAGE_SHIFT;
  20.554 +            mfn = gmfn_to_mfn(d, gmfn);
  20.555  
  20.556              if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
  20.557              {
  20.558 @@ -2046,7 +2046,7 @@ int do_mmu_update(
  20.559              va = map_domain_page_with_cache(mfn, &mapcache);
  20.560              va = (void *)((unsigned long)va +
  20.561                            (unsigned long)(req.ptr & ~PAGE_MASK));
  20.562 -            page = pfn_to_page(mfn);
  20.563 +            page = mfn_to_page(mfn);
  20.564  
  20.565              switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask )
  20.566              {
  20.567 @@ -2130,7 +2130,7 @@ int do_mmu_update(
  20.568                          if ( page_is_page_table(page) &&
  20.569                               !page_out_of_sync(page) )
  20.570                          {
  20.571 -                            shadow_mark_mfn_out_of_sync(v, gpfn, mfn);
  20.572 +                            shadow_mark_mfn_out_of_sync(v, gmfn, mfn);
  20.573                          }
  20.574                      }
  20.575  
  20.576 @@ -2171,7 +2171,7 @@ int do_mmu_update(
  20.577  
  20.578              mark_dirty(FOREIGNDOM, mfn);
  20.579  
  20.580 -            put_page(pfn_to_page(mfn));
  20.581 +            put_page(mfn_to_page(mfn));
  20.582              break;
  20.583  
  20.584          default:
  20.585 @@ -2211,8 +2211,8 @@ static int create_grant_pte_mapping(
  20.586  {
  20.587      int rc = GNTST_okay;
  20.588      void *va;
  20.589 -    unsigned long gpfn, mfn;
  20.590 -    struct pfn_info *page;
  20.591 +    unsigned long gmfn, mfn;
  20.592 +    struct page_info *page;
  20.593      u32 type_info;
  20.594      l1_pgentry_t ol1e;
  20.595      struct domain *d = v->domain;
  20.596 @@ -2220,8 +2220,8 @@ static int create_grant_pte_mapping(
  20.597      ASSERT(spin_is_locked(&d->big_lock));
  20.598      ASSERT(!shadow_mode_refcounts(d));
  20.599  
  20.600 -    gpfn = pte_addr >> PAGE_SHIFT;
  20.601 -    mfn = __gpfn_to_mfn(d, gpfn);
  20.602 +    gmfn = pte_addr >> PAGE_SHIFT;
  20.603 +    mfn = gmfn_to_mfn(d, gmfn);
  20.604  
  20.605      if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
  20.606      {
  20.607 @@ -2231,7 +2231,7 @@ static int create_grant_pte_mapping(
  20.608      
  20.609      va = map_domain_page(mfn);
  20.610      va = (void *)((unsigned long)va + (pte_addr & ~PAGE_MASK));
  20.611 -    page = pfn_to_page(mfn);
  20.612 +    page = mfn_to_page(mfn);
  20.613  
  20.614      type_info = page->u.inuse.type_info;
  20.615      if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
  20.616 @@ -2273,15 +2273,15 @@ static int destroy_grant_pte_mapping(
  20.617  {
  20.618      int rc = GNTST_okay;
  20.619      void *va;
  20.620 -    unsigned long gpfn, mfn;
  20.621 -    struct pfn_info *page;
  20.622 +    unsigned long gmfn, mfn;
  20.623 +    struct page_info *page;
  20.624      u32 type_info;
  20.625      l1_pgentry_t ol1e;
  20.626  
  20.627      ASSERT(!shadow_mode_refcounts(d));
  20.628  
  20.629 -    gpfn = addr >> PAGE_SHIFT;
  20.630 -    mfn = __gpfn_to_mfn(d, gpfn);
  20.631 +    gmfn = addr >> PAGE_SHIFT;
  20.632 +    mfn = gmfn_to_mfn(d, gmfn);
  20.633  
  20.634      if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
  20.635      {
  20.636 @@ -2291,7 +2291,7 @@ static int destroy_grant_pte_mapping(
  20.637      
  20.638      va = map_domain_page(mfn);
  20.639      va = (void *)((unsigned long)va + (addr & ~PAGE_MASK));
  20.640 -    page = pfn_to_page(mfn);
  20.641 +    page = mfn_to_page(mfn);
  20.642  
  20.643      type_info = page->u.inuse.type_info;
  20.644      if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
  20.645 @@ -2433,7 +2433,7 @@ int destroy_grant_host_mapping(
  20.646  }
  20.647  
  20.648  int steal_page_for_grant_transfer(
  20.649 -    struct domain *d, struct pfn_info *page)
  20.650 +    struct domain *d, struct page_info *page)
  20.651  {
  20.652      u32 _d, _nd, x, y;
  20.653  
  20.654 @@ -2453,7 +2453,7 @@ int steal_page_for_grant_transfer(
  20.655                       (1 | PGC_allocated)) || unlikely(_nd != _d)) { 
  20.656              DPRINTK("gnttab_transfer: Bad page %p: ed=%p(%u), sd=%p,"
  20.657                      " caf=%08x, taf=%" PRtype_info "\n", 
  20.658 -                    (void *) page_to_pfn(page),
  20.659 +                    (void *) page_to_mfn(page),
  20.660                      d, d->domain_id, unpickle_domptr(_nd), x, 
  20.661                      page->u.inuse.type_info);
  20.662              spin_unlock(&d->page_alloc_lock);
  20.663 @@ -2612,7 +2612,7 @@ void destroy_gdt(struct vcpu *v)
  20.664      for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
  20.665      {
  20.666          if ( (pfn = l1e_get_pfn(v->arch.perdomain_ptes[i])) != 0 )
  20.667 -            put_page_and_type(pfn_to_page(pfn));
  20.668 +            put_page_and_type(mfn_to_page(pfn));
  20.669          v->arch.perdomain_ptes[i] = l1e_empty();
  20.670          v->arch.guest_context.gdt_frames[i] = 0;
  20.671      }
  20.672 @@ -2635,9 +2635,9 @@ long set_gdt(struct vcpu *v,
  20.673  
  20.674      /* Check the pages in the new GDT. */
  20.675      for ( i = 0; i < nr_pages; i++ ) {
  20.676 -        pfn = frames[i] = __gpfn_to_mfn(d, frames[i]);
  20.677 +        pfn = frames[i] = gmfn_to_mfn(d, frames[i]);
  20.678          if ((pfn >= max_page) ||
  20.679 -            !get_page_and_type(pfn_to_page(pfn), d, PGT_gdt_page) )
  20.680 +            !get_page_and_type(mfn_to_page(pfn), d, PGT_gdt_page) )
  20.681              goto fail;
  20.682      }
  20.683  
  20.684 @@ -2657,7 +2657,7 @@ long set_gdt(struct vcpu *v,
  20.685  
  20.686   fail:
  20.687      while ( i-- > 0 )
  20.688 -        put_page_and_type(pfn_to_page(frames[i]));
  20.689 +        put_page_and_type(mfn_to_page(frames[i]));
  20.690      return -EINVAL;
  20.691  }
  20.692  
  20.693 @@ -2689,11 +2689,11 @@ long do_set_gdt(unsigned long *frame_lis
  20.694  long do_update_descriptor(u64 pa, u64 desc)
  20.695  {
  20.696      struct domain *dom = current->domain;
  20.697 -    unsigned long gpfn = pa >> PAGE_SHIFT;
  20.698 +    unsigned long gmfn = pa >> PAGE_SHIFT;
  20.699      unsigned long mfn;
  20.700      unsigned int  offset;
  20.701      struct desc_struct *gdt_pent, d;
  20.702 -    struct pfn_info *page;
  20.703 +    struct page_info *page;
  20.704      long ret = -EINVAL;
  20.705  
  20.706      offset = ((unsigned int)pa & ~PAGE_MASK) / sizeof(struct desc_struct);
  20.707 @@ -2702,7 +2702,7 @@ long do_update_descriptor(u64 pa, u64 de
  20.708  
  20.709      LOCK_BIGLOCK(dom);
  20.710  
  20.711 -    if ( !VALID_MFN(mfn = __gpfn_to_mfn(dom, gpfn)) ||
  20.712 +    if ( !VALID_MFN(mfn = gmfn_to_mfn(dom, gmfn)) ||
  20.713           (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
  20.714           (mfn >= max_page) ||
  20.715           !check_descriptor(&d) )
  20.716 @@ -2711,7 +2711,7 @@ long do_update_descriptor(u64 pa, u64 de
  20.717          return -EINVAL;
  20.718      }
  20.719  
  20.720 -    page = pfn_to_page(mfn);
  20.721 +    page = mfn_to_page(mfn);
  20.722      if ( unlikely(!get_page(page, dom)) )
  20.723      {
  20.724          UNLOCK_BIGLOCK(dom);
  20.725 @@ -2742,7 +2742,7 @@ long do_update_descriptor(u64 pa, u64 de
  20.726          __mark_dirty(dom, mfn);
  20.727  
  20.728          if ( page_is_page_table(page) && !page_out_of_sync(page) )
  20.729 -            shadow_mark_mfn_out_of_sync(current, gpfn, mfn);
  20.730 +            shadow_mark_mfn_out_of_sync(current, gmfn, mfn);
  20.731      }
  20.732  
  20.733      /* All is good so make the update. */
  20.734 @@ -2798,7 +2798,7 @@ long arch_memory_op(int op, void *arg)
  20.735          {
  20.736              d->arch.first_reserved_pfn = pfn = d->max_pages;
  20.737              guest_physmap_add_page(
  20.738 -                d, pfn + 0, virt_to_phys(d->shared_info) >> PAGE_SHIFT);
  20.739 +                d, pfn + 0, virt_to_maddr(d->shared_info) >> PAGE_SHIFT);
  20.740              for ( i = 0; i < NR_GRANT_FRAMES; i++ )
  20.741                  guest_physmap_add_page(
  20.742                      d, pfn + 1 + i, gnttab_shared_mfn(d, d->grant_table, i));
  20.743 @@ -2977,7 +2977,7 @@ int revalidate_l1(
  20.744          if ( likely(l1e_get_intpte(ol1e) == (l1e_get_intpte(nl1e)|_PAGE_RW)) )
  20.745          {
  20.746              if ( likely(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
  20.747 -                put_page_type(pfn_to_page(l1e_get_pfn(nl1e)));
  20.748 +                put_page_type(mfn_to_page(l1e_get_pfn(nl1e)));
  20.749              continue;
  20.750          }
  20.751  
  20.752 @@ -3110,13 +3110,13 @@ void ptwr_flush(struct domain *d, const 
  20.753  
  20.754  static int ptwr_emulated_update(
  20.755      unsigned long addr,
  20.756 -    physaddr_t old,
  20.757 -    physaddr_t val,
  20.758 +    paddr_t old,
  20.759 +    paddr_t val,
  20.760      unsigned int bytes,
  20.761      unsigned int do_cmpxchg)
  20.762  {
  20.763      unsigned long pfn, l1va;
  20.764 -    struct pfn_info *page;
  20.765 +    struct page_info *page;
  20.766      l1_pgentry_t pte, ol1e, nl1e, *pl1e;
  20.767      struct domain *d = current->domain;
  20.768  
  20.769 @@ -3129,25 +3129,25 @@ static int ptwr_emulated_update(
  20.770      }
  20.771  
  20.772      /* Turn a sub-word access into a full-word access. */
  20.773 -    if ( bytes != sizeof(physaddr_t) )
  20.774 +    if ( bytes != sizeof(paddr_t) )
  20.775      {
  20.776          int           rc;
  20.777 -        physaddr_t    full;
  20.778 -        unsigned int  offset = addr & (sizeof(physaddr_t)-1);
  20.779 +        paddr_t    full;
  20.780 +        unsigned int  offset = addr & (sizeof(paddr_t)-1);
  20.781  
  20.782          /* Align address; read full word. */
  20.783 -        addr &= ~(sizeof(physaddr_t)-1);
  20.784 +        addr &= ~(sizeof(paddr_t)-1);
  20.785          if ( (rc = x86_emulate_read_std(addr, (unsigned long *)&full,
  20.786 -                                        sizeof(physaddr_t))) )
  20.787 +                                        sizeof(paddr_t))) )
  20.788              return rc; 
  20.789          /* Mask out bits provided by caller. */
  20.790 -        full &= ~((((physaddr_t)1 << (bytes*8)) - 1) << (offset*8));
  20.791 +        full &= ~((((paddr_t)1 << (bytes*8)) - 1) << (offset*8));
  20.792          /* Shift the caller value and OR in the missing bits. */
  20.793 -        val  &= (((physaddr_t)1 << (bytes*8)) - 1);
  20.794 +        val  &= (((paddr_t)1 << (bytes*8)) - 1);
  20.795          val <<= (offset)*8;
  20.796          val  |= full;
  20.797          /* Also fill in missing parts of the cmpxchg old value. */
  20.798 -        old  &= (((physaddr_t)1 << (bytes*8)) - 1);
  20.799 +        old  &= (((paddr_t)1 << (bytes*8)) - 1);
  20.800          old <<= (offset)*8;
  20.801          old  |= full;
  20.802      }
  20.803 @@ -3172,7 +3172,7 @@ static int ptwr_emulated_update(
  20.804      }
  20.805  
  20.806      pfn  = l1e_get_pfn(pte);
  20.807 -    page = pfn_to_page(pfn);
  20.808 +    page = mfn_to_page(pfn);
  20.809  
  20.810      /* We are looking only for read-only mappings of p.t. pages. */
  20.811      if ( ((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) != _PAGE_PRESENT) ||
  20.812 @@ -3194,7 +3194,7 @@ static int ptwr_emulated_update(
  20.813      }
  20.814  
  20.815      /* Checked successfully: do the update (write or cmpxchg). */
  20.816 -    pl1e = map_domain_page(page_to_pfn(page));
  20.817 +    pl1e = map_domain_page(page_to_mfn(page));
  20.818      pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));
  20.819      if ( do_cmpxchg )
  20.820      {
  20.821 @@ -3261,7 +3261,7 @@ int ptwr_do_page_fault(struct domain *d,
  20.822                         struct cpu_user_regs *regs)
  20.823  {
  20.824      unsigned long    pfn;
  20.825 -    struct pfn_info *page;
  20.826 +    struct page_info *page;
  20.827      l1_pgentry_t    *pl1e, pte;
  20.828      l2_pgentry_t    *pl2e, l2e;
  20.829      int              which, flags;
  20.830 @@ -3283,7 +3283,7 @@ int ptwr_do_page_fault(struct domain *d,
  20.831      }
  20.832  
  20.833      pfn  = l1e_get_pfn(pte);
  20.834 -    page = pfn_to_page(pfn);
  20.835 +    page = mfn_to_page(pfn);
  20.836  
  20.837  #ifdef CONFIG_X86_64
  20.838  #define WRPT_PTE_FLAGS (_PAGE_RW | _PAGE_PRESENT | _PAGE_USER)
  20.839 @@ -3473,8 +3473,8 @@ void cleanup_writable_pagetable(struct d
  20.840  
  20.841  int map_pages_to_xen(
  20.842      unsigned long virt,
  20.843 -    unsigned long pfn,
  20.844 -    unsigned long nr_pfns,
  20.845 +    unsigned long mfn,
  20.846 +    unsigned long nr_mfns,
  20.847      unsigned long flags)
  20.848  {
  20.849      l2_pgentry_t *pl2e, ol2e;
  20.850 @@ -3484,17 +3484,17 @@ int map_pages_to_xen(
  20.851      unsigned int  map_small_pages = !!(flags & MAP_SMALL_PAGES);
  20.852      flags &= ~MAP_SMALL_PAGES;
  20.853  
  20.854 -    while ( nr_pfns != 0 )
  20.855 +    while ( nr_mfns != 0 )
  20.856      {
  20.857          pl2e = virt_to_xen_l2e(virt);
  20.858  
  20.859 -        if ( ((((virt>>PAGE_SHIFT) | pfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
  20.860 -             (nr_pfns >= (1<<PAGETABLE_ORDER)) &&
  20.861 +        if ( ((((virt>>PAGE_SHIFT) | mfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
  20.862 +             (nr_mfns >= (1<<PAGETABLE_ORDER)) &&
  20.863               !map_small_pages )
  20.864          {
  20.865              /* Super-page mapping. */
  20.866              ol2e  = *pl2e;
  20.867 -            *pl2e = l2e_from_pfn(pfn, flags|_PAGE_PSE);
  20.868 +            *pl2e = l2e_from_pfn(mfn, flags|_PAGE_PSE);
  20.869  
  20.870              if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
  20.871              {
  20.872 @@ -3504,8 +3504,8 @@ int map_pages_to_xen(
  20.873              }
  20.874  
  20.875              virt    += 1UL << L2_PAGETABLE_SHIFT;
  20.876 -            pfn     += 1UL << PAGETABLE_ORDER;
  20.877 -            nr_pfns -= 1UL << PAGETABLE_ORDER;
  20.878 +            mfn     += 1UL << PAGETABLE_ORDER;
  20.879 +            nr_mfns -= 1UL << PAGETABLE_ORDER;
  20.880          }
  20.881          else
  20.882          {
  20.883 @@ -3529,13 +3529,13 @@ int map_pages_to_xen(
  20.884  
  20.885              pl1e  = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
  20.886              ol1e  = *pl1e;
  20.887 -            *pl1e = l1e_from_pfn(pfn, flags);
  20.888 +            *pl1e = l1e_from_pfn(mfn, flags);
  20.889              if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
  20.890                  local_flush_tlb_one(virt);
  20.891  
  20.892              virt    += 1UL << L1_PAGETABLE_SHIFT;
  20.893 -            pfn     += 1UL;
  20.894 -            nr_pfns -= 1UL;
  20.895 +            mfn     += 1UL;
  20.896 +            nr_mfns -= 1UL;
  20.897          }
  20.898      }
  20.899  
  20.900 @@ -3575,7 +3575,7 @@ static void __memguard_change_range(void
  20.901          flags &= ~_PAGE_PRESENT;
  20.902  
  20.903      map_pages_to_xen(
  20.904 -        _p, virt_to_phys(p) >> PAGE_SHIFT, _l >> PAGE_SHIFT, flags);
  20.905 +        _p, virt_to_maddr(p) >> PAGE_SHIFT, _l >> PAGE_SHIFT, flags);
  20.906  }
  20.907  
  20.908  void memguard_guard_range(void *p, unsigned long l)
    21.1 --- a/xen/arch/x86/mpparse.c	Wed Feb 01 15:01:04 2006 +0000
    21.2 +++ b/xen/arch/x86/mpparse.c	Wed Feb 01 16:28:50 2006 +0100
    21.3 @@ -737,7 +737,7 @@ int __init mp_get_num_processors(void)
    21.4  
    21.5  static int __init smp_scan_config (unsigned long base, unsigned long length)
    21.6  {
    21.7 -	unsigned int *bp = phys_to_virt(base);
    21.8 +	unsigned int *bp = maddr_to_virt(base);
    21.9  	struct intel_mp_floating *mpf;
   21.10  
   21.11  	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
   21.12 @@ -754,9 +754,9 @@ static int __init smp_scan_config (unsig
   21.13  
   21.14  			smp_found_config = 1;
   21.15  			printk(KERN_INFO "found SMP MP-table at %08lx\n",
   21.16 -						virt_to_phys(mpf));
   21.17 +						virt_to_maddr(mpf));
   21.18  #if 0
   21.19 -			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
   21.20 +			reserve_bootmem(virt_to_maddr(mpf), PAGE_SIZE);
   21.21  			if (mpf->mpf_physptr) {
   21.22  				/*
   21.23  				 * We cannot access to MPC table to compute
    22.1 --- a/xen/arch/x86/setup.c	Wed Feb 01 15:01:04 2006 +0000
    22.2 +++ b/xen/arch/x86/setup.c	Wed Feb 01 16:28:50 2006 +0100
    22.3 @@ -29,7 +29,7 @@ extern void generic_apic_probe(void);
    22.4  
    22.5  /*
    22.6   * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
    22.7 - * pfn_info table and allocation bitmap.
    22.8 + * page_info table and allocation bitmap.
    22.9   */
   22.10  static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
   22.11  #if defined(CONFIG_X86_64)
   22.12 @@ -153,7 +153,7 @@ void __init __start_xen(multiboot_info_t
   22.13      module_t *mod = (module_t *)__va(mbi->mods_addr);
   22.14      unsigned long nr_pages, modules_length;
   22.15      unsigned long initial_images_start, initial_images_end;
   22.16 -    physaddr_t s, e;
   22.17 +    paddr_t s, e;
   22.18      int i, e820_warn = 0, e820_raw_nr = 0, bytes = 0;
   22.19      struct ns16550_defaults ns16550 = {
   22.20          .data_bits = 8,
    23.1 --- a/xen/arch/x86/shadow.c	Wed Feb 01 15:01:04 2006 +0000
    23.2 +++ b/xen/arch/x86/shadow.c	Wed Feb 01 16:28:50 2006 +0100
    23.3 @@ -77,7 +77,7 @@ static inline int
    23.4  shadow_promote(struct domain *d, unsigned long gpfn, unsigned long gmfn,
    23.5                 unsigned long new_type)
    23.6  {
    23.7 -    struct pfn_info *page = pfn_to_page(gmfn);
    23.8 +    struct page_info *page = mfn_to_page(gmfn);
    23.9      int pinned = 0, okay = 1;
   23.10  
   23.11      if ( page_out_of_sync(page) )
   23.12 @@ -177,7 +177,7 @@ shadow_promote(struct domain *d, unsigne
   23.13   *   general ref to the page.
   23.14   */
   23.15  /*
   23.16 - * pfn_info fields for pages allocated as shadow pages:
   23.17 + * page_info fields for pages allocated as shadow pages:
   23.18   *
   23.19   * All 32 bits of count_info are a simple count of refs to this shadow
   23.20   * from a) other shadow pages, b) current CR3's (aka ed->arch.shadow_table),
   23.21 @@ -204,7 +204,7 @@ alloc_shadow_page(struct domain *d,
   23.22                    unsigned long gpfn, unsigned long gmfn,
   23.23                    u32 psh_type)
   23.24  {
   23.25 -    struct pfn_info *page;
   23.26 +    struct page_info *page;
   23.27      unsigned long smfn;
   23.28      int pin = 0;
   23.29      void *l1, *lp;
   23.30 @@ -217,7 +217,7 @@ alloc_shadow_page(struct domain *d,
   23.31          if ( !list_empty(&d->arch.free_shadow_frames) )
   23.32          {
   23.33              struct list_head *entry = d->arch.free_shadow_frames.next;
   23.34 -            page = list_entry(entry, struct pfn_info, list);
   23.35 +            page = list_entry(entry, struct page_info, list);
   23.36              list_del(entry);
   23.37              perfc_decr(free_l1_pages);
   23.38          }
   23.39 @@ -233,11 +233,11 @@ alloc_shadow_page(struct domain *d,
   23.40                  if (!page)
   23.41                      goto no_shadow_page;
   23.42  
   23.43 -                l1 = map_domain_page(page_to_pfn(page));
   23.44 +                l1 = map_domain_page(page_to_mfn(page));
   23.45                  memset(l1, 0, PAGE_SIZE);
   23.46                  unmap_domain_page(l1);
   23.47  
   23.48 -                l1 = map_domain_page(page_to_pfn(page+1));
   23.49 +                l1 = map_domain_page(page_to_mfn(page+1));
   23.50                  memset(l1, 0, PAGE_SIZE);
   23.51                  unmap_domain_page(l1);
   23.52  #else
   23.53 @@ -245,7 +245,7 @@ alloc_shadow_page(struct domain *d,
   23.54                  if (!page)
   23.55                      goto no_shadow_page;
   23.56  
   23.57 -                l1 = map_domain_page(page_to_pfn(page));
   23.58 +                l1 = map_domain_page(page_to_mfn(page));
   23.59                  memset(l1, 0, PAGE_SIZE);
   23.60                  unmap_domain_page(l1);
   23.61  #endif
   23.62 @@ -256,7 +256,7 @@ alloc_shadow_page(struct domain *d,
   23.63                  if (!page)
   23.64                      goto no_shadow_page;
   23.65  
   23.66 -                l1 = map_domain_page(page_to_pfn(page));
   23.67 +                l1 = map_domain_page(page_to_mfn(page));
   23.68                  memset(l1, 0, PAGE_SIZE);
   23.69                  unmap_domain_page(l1);
   23.70              }
   23.71 @@ -280,12 +280,12 @@ alloc_shadow_page(struct domain *d,
   23.72          if (!page)
   23.73              goto no_shadow_page;
   23.74  
   23.75 -        lp = map_domain_page(page_to_pfn(page));
   23.76 +        lp = map_domain_page(page_to_mfn(page));
   23.77          memset(lp, 0, PAGE_SIZE);
   23.78          unmap_domain_page(lp);
   23.79      }
   23.80  
   23.81 -    smfn = page_to_pfn(page);
   23.82 +    smfn = page_to_mfn(page);
   23.83  
   23.84      ASSERT( (gmfn & ~PGT_mfn_mask) == 0 );
   23.85      page->u.inuse.type_info = psh_type | gmfn;
   23.86 @@ -506,7 +506,7 @@ static unsigned long shadow_l2_table(
   23.87  
   23.88          for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
   23.89              spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
   23.90 -                l2e_from_page(virt_to_page(page_get_owner(pfn_to_page(gmfn))->
   23.91 +                l2e_from_page(virt_to_page(page_get_owner(mfn_to_page(gmfn))->
   23.92                                             arch.mm_perdomain_pt) + i,
   23.93                                __PAGE_HYPERVISOR);
   23.94  
   23.95 @@ -566,7 +566,7 @@ static void shadow_map_l1_into_current_l
   23.96          /* This L1 is NOT already shadowed so we need to shadow it. */
   23.97          SH_VVLOG("4a: l1 not shadowed");
   23.98  
   23.99 -        gl1mfn = __gpfn_to_mfn(d, gl1pfn);
  23.100 +        gl1mfn = gmfn_to_mfn(d, gl1pfn);
  23.101          if ( unlikely(!VALID_MFN(gl1mfn)) )
  23.102          {
  23.103              // Attempt to use an invalid pfn as an L1 page.
  23.104 @@ -636,7 +636,7 @@ static void shadow_map_l1_into_current_l
  23.105          guest_l2_pgentry_t tmp_gl2e = {0};
  23.106  
  23.107          __guest_get_l2e(v, va, &tmp_gl2e);
  23.108 -        tmp_gmfn = __gpfn_to_mfn(d, l2e_get_pfn(tmp_gl2e));
  23.109 +        tmp_gmfn = gmfn_to_mfn(d, l2e_get_pfn(tmp_gl2e));
  23.110          gpl1e = (guest_l1_pgentry_t *) map_domain_page(tmp_gmfn);
  23.111  
  23.112          /* If the PGT_l1_shadow has two continual pages */
  23.113 @@ -673,7 +673,7 @@ static void shadow_map_l1_into_current_l
  23.114              set_guest_back_ptr(d, sl1e, sl1mfn, i);
  23.115          }
  23.116  
  23.117 -        pfn_to_page(sl1mfn)->tlbflush_timestamp =
  23.118 +        mfn_to_page(sl1mfn)->tlbflush_timestamp =
  23.119              SHADOW_ENCODE_MIN_MAX(min, max);
  23.120  
  23.121          unmap_domain_page(gpl1e);
  23.122 @@ -910,7 +910,7 @@ shadow_make_snapshot(
  23.123      u32 min_max = 0;
  23.124      int min, max, length;
  23.125  
  23.126 -    if ( test_and_set_bit(_PGC_out_of_sync, &pfn_to_page(gmfn)->count_info) )
  23.127 +    if ( test_and_set_bit(_PGC_out_of_sync, &mfn_to_page(gmfn)->count_info) )
  23.128      {
  23.129          ASSERT(__shadow_status(d, gpfn, PGT_snapshot));
  23.130          return SHADOW_SNAPSHOT_ELSEWHERE;
  23.131 @@ -931,8 +931,8 @@ shadow_make_snapshot(
  23.132  
  23.133      if ( shadow_mode_refcounts(d) &&
  23.134           (shadow_max_pgtable_type(d, gpfn, &sl1mfn) == PGT_l1_shadow) )
  23.135 -        min_max = pfn_to_page(sl1mfn)->tlbflush_timestamp;
  23.136 -    pfn_to_page(smfn)->tlbflush_timestamp = min_max;
  23.137 +        min_max = mfn_to_page(sl1mfn)->tlbflush_timestamp;
  23.138 +    mfn_to_page(smfn)->tlbflush_timestamp = min_max;
  23.139  
  23.140      min = SHADOW_MIN(min_max);
  23.141      max = SHADOW_MAX(min_max);
  23.142 @@ -956,11 +956,11 @@ static struct out_of_sync_entry *
  23.143                               unsigned long mfn)
  23.144  {
  23.145      struct domain *d = v->domain;
  23.146 -    struct pfn_info *page = pfn_to_page(mfn);
  23.147 +    struct page_info *page = mfn_to_page(mfn);
  23.148      struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
  23.149  
  23.150      ASSERT(shadow_lock_is_acquired(d));
  23.151 -    ASSERT(pfn_valid(mfn));
  23.152 +    ASSERT(mfn_valid(mfn));
  23.153  
  23.154  #ifndef NDEBUG
  23.155      {
  23.156 @@ -1143,7 +1143,7 @@ static int is_out_of_sync(struct vcpu *v
  23.157  #else
  23.158      unsigned long l2mfn = pagetable_get_pfn(v->arch.guest_table);
  23.159  #endif
  23.160 -    unsigned long l2pfn = __mfn_to_gpfn(d, l2mfn);
  23.161 +    unsigned long l2pfn = mfn_to_gmfn(d, l2mfn);
  23.162      guest_l2_pgentry_t l2e;
  23.163      unsigned long l1pfn, l1mfn;
  23.164      guest_l1_pgentry_t *guest_pt;
  23.165 @@ -1177,7 +1177,7 @@ static int is_out_of_sync(struct vcpu *v
  23.166                  && i == PAGING_L4)
  23.167                  continue;       /* skip the top-level for 3-level */
  23.168  
  23.169 -            if ( page_out_of_sync(pfn_to_page(gmfn)) &&
  23.170 +            if ( page_out_of_sync(mfn_to_page(gmfn)) &&
  23.171                   !snapshot_entry_matches(
  23.172                       d, guest_pt, gpfn, table_offset_64(va, i)) )
  23.173              {
  23.174 @@ -1192,7 +1192,7 @@ static int is_out_of_sync(struct vcpu *v
  23.175                  unmap_and_return (0);
  23.176              }
  23.177              gpfn = entry_get_pfn(le);
  23.178 -            gmfn = __gpfn_to_mfn(d, gpfn);
  23.179 +            gmfn = gmfn_to_mfn(d, gpfn);
  23.180              if ( !VALID_MFN(gmfn) )
  23.181              {
  23.182                  unmap_and_return (0);
  23.183 @@ -1203,7 +1203,7 @@ static int is_out_of_sync(struct vcpu *v
  23.184          }
  23.185  
  23.186          /* L2 */
  23.187 -        if ( page_out_of_sync(pfn_to_page(gmfn)) &&
  23.188 +        if ( page_out_of_sync(mfn_to_page(gmfn)) &&
  23.189               !snapshot_entry_matches(d, guest_pt, gpfn, l2_table_offset(va)) )
  23.190          {
  23.191              unmap_and_return (1);
  23.192 @@ -1217,7 +1217,7 @@ static int is_out_of_sync(struct vcpu *v
  23.193  #undef unmap_and_return
  23.194  #endif /* CONFIG_PAGING_LEVELS >= 3 */
  23.195      {
  23.196 -        if ( page_out_of_sync(pfn_to_page(l2mfn)) &&
  23.197 +        if ( page_out_of_sync(mfn_to_page(l2mfn)) &&
  23.198               !snapshot_entry_matches(d, (guest_l1_pgentry_t *)v->arch.guest_vtable,
  23.199                                       l2pfn, guest_l2_table_offset(va)) )
  23.200              return 1;
  23.201 @@ -1229,7 +1229,7 @@ static int is_out_of_sync(struct vcpu *v
  23.202          return 0;
  23.203  
  23.204      l1pfn = l2e_get_pfn(l2e);
  23.205 -    l1mfn = __gpfn_to_mfn(d, l1pfn);
  23.206 +    l1mfn = gmfn_to_mfn(d, l1pfn);
  23.207  
  23.208      // If the l1 pfn is invalid, it can't be out of sync...
  23.209      if ( !VALID_MFN(l1mfn) )
  23.210 @@ -1237,7 +1237,7 @@ static int is_out_of_sync(struct vcpu *v
  23.211  
  23.212      guest_pt = (guest_l1_pgentry_t *) map_domain_page(l1mfn);
  23.213  
  23.214 -    if ( page_out_of_sync(pfn_to_page(l1mfn)) &&
  23.215 +    if ( page_out_of_sync(mfn_to_page(l1mfn)) &&
  23.216           !snapshot_entry_matches(
  23.217               d, guest_pt, l1pfn, guest_l1_table_offset(va)) ) 
  23.218      {
  23.219 @@ -1327,18 +1327,18 @@ static u32 remove_all_write_access_in_pt
  23.220      int i;
  23.221      u32 found = 0;
  23.222      int is_l1_shadow =
  23.223 -        ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
  23.224 +        ((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
  23.225           PGT_l1_shadow);
  23.226  #if CONFIG_PAGING_LEVELS == 4
  23.227      is_l1_shadow |=
  23.228 -      ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
  23.229 +      ((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
  23.230                  PGT_fl1_shadow);
  23.231  #endif
  23.232  
  23.233      match = l1e_from_pfn(readonly_gmfn, flags);
  23.234  
  23.235      if ( shadow_mode_external(d) ) {
  23.236 -        i = (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask)
  23.237 +        i = (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask)
  23.238              >> PGT_va_shift;
  23.239  
  23.240          if ( (i >= 0 && i < L1_PAGETABLE_ENTRIES) &&
  23.241 @@ -1376,7 +1376,7 @@ static int remove_all_write_access(
  23.242  
  23.243      // If it's not a writable page, then no writable refs can be outstanding.
  23.244      //
  23.245 -    if ( (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
  23.246 +    if ( (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
  23.247           PGT_writable_page )
  23.248      {
  23.249          perfc_incrc(remove_write_not_writable);
  23.250 @@ -1386,7 +1386,7 @@ static int remove_all_write_access(
  23.251      // How many outstanding writable PTEs for this page are there?
  23.252      //
  23.253      write_refs =
  23.254 -        (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
  23.255 +        (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
  23.256      if ( write_refs && MFN_PINNED(readonly_gmfn) )
  23.257      {
  23.258          write_refs--;
  23.259 @@ -1404,7 +1404,7 @@ static int remove_all_write_access(
  23.260  
  23.261           // Use the back pointer to locate the shadow page that can contain
  23.262           // the PTE of interest
  23.263 -         if ( (predicted_smfn = pfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
  23.264 +         if ( (predicted_smfn = mfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
  23.265               found += remove_all_write_access_in_ptpage(
  23.266                   d, predicted_smfn, predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, 0);
  23.267               if ( found == write_refs )
  23.268 @@ -1478,7 +1478,7 @@ static int resync_all(struct domain *d, 
  23.269              // the new contents of the guest page iff this it has the right
  23.270              // page type.
  23.271              //
  23.272 -            if ( stype != ( pfn_to_page(entry->gmfn)->u.inuse.type_info & PGT_type_mask) )
  23.273 +            if ( stype != ( mfn_to_page(entry->gmfn)->u.inuse.type_info & PGT_type_mask) )
  23.274                  continue;
  23.275          }
  23.276  
  23.277 @@ -1498,11 +1498,11 @@ static int resync_all(struct domain *d, 
  23.278  
  23.279          unshadow = 0;
  23.280  
  23.281 -        min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
  23.282 +        min_max_shadow = mfn_to_page(smfn)->tlbflush_timestamp;
  23.283          min_shadow     = SHADOW_MIN(min_max_shadow);
  23.284          max_shadow     = SHADOW_MAX(min_max_shadow);
  23.285  
  23.286 -        min_max_snapshot= pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
  23.287 +        min_max_snapshot= mfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
  23.288          min_snapshot    = SHADOW_MIN(min_max_snapshot);
  23.289          max_snapshot    = SHADOW_MAX(min_max_snapshot);
  23.290  
  23.291 @@ -1673,7 +1673,7 @@ static int resync_all(struct domain *d, 
  23.292                      if ( !(entry_get_flags(guest_pt[i]) & _PAGE_PRESENT) &&
  23.293                           unlikely(entry_get_value(guest_pt[i]) != 0) &&
  23.294                           !unshadow &&
  23.295 -                         (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
  23.296 +                         (mfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
  23.297                          unshadow = 1;
  23.298                  }
  23.299  #endif
  23.300 @@ -1721,7 +1721,7 @@ static int resync_all(struct domain *d, 
  23.301                  if ( !(guest_root_get_flags(new_root_e) & _PAGE_PRESENT) &&
  23.302                       unlikely(guest_root_get_intpte(new_root_e) != 0) &&
  23.303                       !unshadow &&
  23.304 -                     (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
  23.305 +                     (mfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
  23.306                      unshadow = 1;
  23.307              }
  23.308              if ( max == -1 )
  23.309 @@ -1848,7 +1848,7 @@ static inline int l1pte_write_fault(
  23.310      guest_l1_pgentry_t gpte = *gpte_p;
  23.311      l1_pgentry_t spte;
  23.312      unsigned long gpfn = l1e_get_pfn(gpte);
  23.313 -    unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
  23.314 +    unsigned long gmfn = gmfn_to_mfn(d, gpfn);
  23.315  
  23.316      //printk("l1pte_write_fault gmfn=%lx\n", gmfn);
  23.317  
  23.318 @@ -1883,7 +1883,7 @@ static inline int l1pte_read_fault(
  23.319      guest_l1_pgentry_t gpte = *gpte_p;
  23.320      l1_pgentry_t spte = *spte_p;
  23.321      unsigned long pfn = l1e_get_pfn(gpte);
  23.322 -    unsigned long mfn = __gpfn_to_mfn(d, pfn);
  23.323 +    unsigned long mfn = gmfn_to_mfn(d, pfn);
  23.324  
  23.325      if ( unlikely(!VALID_MFN(mfn)) )
  23.326      {
  23.327 @@ -2028,7 +2028,7 @@ static int shadow_fault_32(unsigned long
  23.328              domain_crash_synchronous();
  23.329          }
  23.330  
  23.331 -        __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
  23.332 +        __mark_dirty(d, gmfn_to_mfn(d, l2e_get_pfn(gpde)));
  23.333      }
  23.334  
  23.335      shadow_set_l1e(va, spte, 1);
  23.336 @@ -2057,7 +2057,7 @@ static inline unsigned long va_to_l1mfn(
  23.337      if ( unlikely(!(guest_l2e_get_flags(gl2e) & _PAGE_PRESENT)) )
  23.338          return INVALID_MFN;
  23.339  
  23.340 -    return __gpfn_to_mfn(d, l2e_get_pfn(gl2e));
  23.341 +    return gmfn_to_mfn(d, l2e_get_pfn(gl2e));
  23.342  }
  23.343  
  23.344  static int do_update_va_mapping(unsigned long va,
  23.345 @@ -2132,7 +2132,7 @@ static void shadow_update_pagetables(str
  23.346      unsigned long gmfn = pagetable_get_pfn(v->arch.guest_table);
  23.347  #endif
  23.348  
  23.349 -    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
  23.350 +    unsigned long gpfn = mfn_to_gmfn(d, gmfn);
  23.351      unsigned long smfn, old_smfn;
  23.352  
  23.353  #if CONFIG_PAGING_LEVELS == 2
  23.354 @@ -2400,7 +2400,7 @@ static int check_pte(
  23.355          FAIL("global bit set in shadow");
  23.356  
  23.357      eff_guest_pfn = l1e_get_pfn(eff_guest_pte);
  23.358 -    eff_guest_mfn = __gpfn_to_mfn(d, eff_guest_pfn);
  23.359 +    eff_guest_mfn = gmfn_to_mfn(d, eff_guest_pfn);
  23.360      shadow_mfn = l1e_get_pfn(shadow_pte);
  23.361  
  23.362      if ( !VALID_MFN(eff_guest_mfn) && !shadow_mode_refcounts(d) )
  23.363 @@ -2417,7 +2417,7 @@ static int check_pte(
  23.364      {
  23.365          printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx page_table_page=%d\n",
  23.366                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
  23.367 -               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
  23.368 +               mfn_to_page(eff_guest_mfn)->u.inuse.type_info,
  23.369                 page_table_page);
  23.370          FAIL("RW coherence");
  23.371      }
  23.372 @@ -2428,7 +2428,7 @@ static int check_pte(
  23.373      {
  23.374          printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx page_table_page=%d\n",
  23.375                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
  23.376 -               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
  23.377 +               mfn_to_page(eff_guest_mfn)->u.inuse.type_info,
  23.378                 page_table_page);
  23.379          FAIL("RW2 coherence");
  23.380      }
  23.381 @@ -2468,7 +2468,7 @@ static int check_l1_table(
  23.382      l1_pgentry_t *p_guest, *p_shadow, *p_snapshot = NULL;
  23.383      int errors = 0;
  23.384  
  23.385 -    if ( page_out_of_sync(pfn_to_page(gmfn)) )
  23.386 +    if ( page_out_of_sync(mfn_to_page(gmfn)) )
  23.387      {
  23.388          snapshot_mfn = __shadow_status(d, gpfn, PGT_snapshot);
  23.389          ASSERT(snapshot_mfn);
  23.390 @@ -2508,13 +2508,13 @@ static int check_l2_table(
  23.391      int errors = 0;
  23.392      int limit;
  23.393  
  23.394 -    if ( !oos_pdes && (page_get_owner(pfn_to_page(gmfn)) != d) )
  23.395 +    if ( !oos_pdes && (page_get_owner(mfn_to_page(gmfn)) != d) )
  23.396          FAILPT("domain doesn't own page");
  23.397 -    if ( oos_pdes && (page_get_owner(pfn_to_page(gmfn)) != NULL) )
  23.398 +    if ( oos_pdes && (page_get_owner(mfn_to_page(gmfn)) != NULL) )
  23.399          FAILPT("bogus owner for snapshot page");
  23.400 -    if ( page_get_owner(pfn_to_page(smfn)) != NULL )
  23.401 +    if ( page_get_owner(mfn_to_page(smfn)) != NULL )
  23.402          FAILPT("shadow page mfn=0x%lx is owned by someone, domid=%d",
  23.403 -               smfn, page_get_owner(pfn_to_page(smfn))->domain_id);
  23.404 +               smfn, page_get_owner(mfn_to_page(smfn))->domain_id);
  23.405  
  23.406  #if 0
  23.407      if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
  23.408 @@ -2611,14 +2611,14 @@ int _check_pagetable(struct vcpu *v, cha
  23.409      perfc_incrc(check_pagetable);
  23.410  
  23.411      ptbase_mfn = gptbase >> PAGE_SHIFT;
  23.412 -    ptbase_pfn = __mfn_to_gpfn(d, ptbase_mfn);
  23.413 +    ptbase_pfn = mfn_to_gmfn(d, ptbase_mfn);
  23.414  
  23.415      if ( !(smfn = __shadow_status(d, ptbase_pfn, PGT_base_page_table)) )
  23.416      {
  23.417          printk("%s-PT %lx not shadowed\n", s, gptbase);
  23.418          goto out;
  23.419      }
  23.420 -    if ( page_out_of_sync(pfn_to_page(ptbase_mfn)) )
  23.421 +    if ( page_out_of_sync(mfn_to_page(ptbase_mfn)) )
  23.422      {
  23.423          ptbase_mfn = __shadow_status(d, ptbase_pfn, PGT_snapshot);
  23.424          oos_pdes = 1;
  23.425 @@ -2643,7 +2643,7 @@ int _check_pagetable(struct vcpu *v, cha
  23.426      for ( i = 0; i < limit; i++ )
  23.427      {
  23.428          unsigned long gl1pfn = l2e_get_pfn(gpl2e[i]);
  23.429 -        unsigned long gl1mfn = __gpfn_to_mfn(d, gl1pfn);
  23.430 +        unsigned long gl1mfn = gmfn_to_mfn(d, gl1pfn);
  23.431          unsigned long sl1mfn = l2e_get_pfn(spl2e[i]);
  23.432  
  23.433          if ( l2e_get_intpte(spl2e[i]) != 0 )  /* FIXME: check flags? */
  23.434 @@ -2689,7 +2689,7 @@ int _check_all_pagetables(struct vcpu *v
  23.435          a = &d->arch.shadow_ht[i];
  23.436          while ( a && a->gpfn_and_flags )
  23.437          {
  23.438 -            gmfn = __gpfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
  23.439 +            gmfn = gmfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
  23.440  
  23.441              switch ( a->gpfn_and_flags & PGT_type_mask )
  23.442              {
  23.443 @@ -2699,7 +2699,7 @@ int _check_all_pagetables(struct vcpu *v
  23.444                  break;
  23.445              case PGT_l2_shadow:
  23.446                  errors += check_l2_table(v, gmfn, a->smfn,
  23.447 -                                         page_out_of_sync(pfn_to_page(gmfn)));
  23.448 +                                         page_out_of_sync(mfn_to_page(gmfn)));
  23.449                  break;
  23.450              case PGT_l3_shadow:
  23.451              case PGT_l4_shadow:
  23.452 @@ -2797,7 +2797,7 @@ static unsigned long shadow_l3_table(
  23.453           * When we free L2 pages, we need to tell if the page contains
  23.454           * Xen private mappings. Use the va_mask part.
  23.455           */
  23.456 -        pfn_to_page(s2mfn)->u.inuse.type_info |= 
  23.457 +        mfn_to_page(s2mfn)->u.inuse.type_info |= 
  23.458              (unsigned long) 3 << PGT_score_shift; 
  23.459  
  23.460          memset(spl2e, 0, 
  23.461 @@ -2810,7 +2810,7 @@ static unsigned long shadow_l3_table(
  23.462          for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
  23.463              spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
  23.464                  l2e_from_page(
  23.465 -                    virt_to_page(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_pt) + i, 
  23.466 +                    virt_to_page(page_get_owner(mfn_to_page(gmfn))->arch.mm_perdomain_pt) + i, 
  23.467                      __PAGE_HYPERVISOR);
  23.468          for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
  23.469              spl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
  23.470 @@ -2845,7 +2845,7 @@ static inline unsigned long init_bl2(l4_
  23.471  {
  23.472      unsigned int count;
  23.473      unsigned long sl2mfn;
  23.474 -    struct pfn_info *page;
  23.475 +    struct page_info *page;
  23.476      void *l2;
  23.477  
  23.478      memset(spl4e, 0, PAGE_SIZE);
  23.479 @@ -2860,7 +2860,7 @@ static inline unsigned long init_bl2(l4_
  23.480  
  23.481      for (count = 0; count < PDP_ENTRIES; count++)
  23.482      {
  23.483 -        sl2mfn = page_to_pfn(page+count);
  23.484 +        sl2mfn = page_to_mfn(page+count);
  23.485          l2 = map_domain_page(sl2mfn);
  23.486          memset(l2, 0, PAGE_SIZE);
  23.487          unmap_domain_page(l2);
  23.488 @@ -2912,7 +2912,7 @@ static unsigned long shadow_l4_table(
  23.489             ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
  23.490  
  23.491          spl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
  23.492 -            l4e_from_paddr(__pa(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_l3),
  23.493 +            l4e_from_paddr(__pa(page_get_owner(mfn_to_page(gmfn))->arch.mm_perdomain_l3),
  23.494                              __PAGE_HYPERVISOR);
  23.495  
  23.496          if ( shadow_mode_translate(d) ) // NB: not external
  23.497 @@ -3000,7 +3000,7 @@ static int get_shadow_mfn(struct domain 
  23.498          /* This is NOT already shadowed so we need to shadow it. */
  23.499          SH_VVLOG("<get_shadow_mfn>: not shadowed");
  23.500  
  23.501 -        gmfn = __gpfn_to_mfn(d, gpfn);
  23.502 +        gmfn = gmfn_to_mfn(d, gpfn);
  23.503          if ( unlikely(!VALID_MFN(gmfn)) )
  23.504          {
  23.505              // Attempt to use an invalid pfn as an shadow page.
  23.506 @@ -3168,7 +3168,7 @@ static inline int l2e_rw_fault(
  23.507          sl2e = l2e_from_pfn(l1_mfn, l2e_get_flags(tmp_l2e));
  23.508      } else {
  23.509          /* Allocate a new page as shadow page table if need */
  23.510 -        gmfn = __gpfn_to_mfn(d, start_gpfn);
  23.511 +        gmfn = gmfn_to_mfn(d, start_gpfn);
  23.512          l1_mfn = alloc_shadow_page(d, start_gpfn | nx, gmfn, PGT_fl1_shadow);
  23.513          if (unlikely(!l1_mfn)) {
  23.514              BUG();
  23.515 @@ -3193,7 +3193,7 @@ static inline int l2e_rw_fault(
  23.516      for (gpfn = start_gpfn;
  23.517        gpfn < (start_gpfn + L1_PAGETABLE_ENTRIES); gpfn++) {
  23.518  
  23.519 -        mfn = __gpfn_to_mfn(d, gpfn);
  23.520 +        mfn = gmfn_to_mfn(d, gpfn);
  23.521  
  23.522          if ( unlikely(!VALID_MFN(mfn)) )
  23.523          {
  23.524 @@ -3343,7 +3343,7 @@ static inline int guest_page_fault(
  23.525          /*
  23.526           * If it's not external mode, then mfn should be machine physical.
  23.527           */
  23.528 -        mfn = __gpfn_to_mfn(d, gpfn);
  23.529 +        mfn = gmfn_to_mfn(d, gpfn);
  23.530  
  23.531          lva = (pgentry_64_t *) map_domain_page(mfn);
  23.532          gle = lva[table_offset_64(va, i)];
  23.533 @@ -3492,7 +3492,7 @@ check_writeable:
  23.534          if (unlikely(!__guest_set_l1e(v, va, &gl1e))) 
  23.535              domain_crash_synchronous();
  23.536  
  23.537 -        __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gl2e)));
  23.538 +        __mark_dirty(d, gmfn_to_mfn(d, l2e_get_pfn(gl2e)));
  23.539      }
  23.540  
  23.541      shadow_set_l1e_64(va, (pgentry_64_t *)&sl1e, 1);
    24.1 --- a/xen/arch/x86/shadow32.c	Wed Feb 01 15:01:04 2006 +0000
    24.2 +++ b/xen/arch/x86/shadow32.c	Wed Feb 01 16:28:50 2006 +0100
    24.3 @@ -30,7 +30,7 @@
    24.4  #include <xen/sched.h>
    24.5  #include <xen/trace.h>
    24.6  
    24.7 -#define MFN_PINNED(_x) (pfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
    24.8 +#define MFN_PINNED(_x) (mfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
    24.9  #define va_to_l1mfn(_ed, _va) \
   24.10      (l2e_get_pfn(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]))
   24.11  
   24.12 @@ -59,7 +59,7 @@ static inline int
   24.13  shadow_promote(struct domain *d, unsigned long gpfn, unsigned long gmfn,
   24.14                 unsigned long new_type)
   24.15  {
   24.16 -    struct pfn_info *page = pfn_to_page(gmfn);
   24.17 +    struct page_info *page = mfn_to_page(gmfn);
   24.18      int pinned = 0, okay = 1;
   24.19  
   24.20      if ( page_out_of_sync(page) )
   24.21 @@ -144,13 +144,13 @@ shadow_demote(struct domain *d, unsigned
   24.22      if ( !shadow_mode_refcounts(d) )
   24.23          return;
   24.24  
   24.25 -    ASSERT(pfn_to_page(gmfn)->count_info & PGC_page_table);
   24.26 +    ASSERT(mfn_to_page(gmfn)->count_info & PGC_page_table);
   24.27  
   24.28      if ( shadow_max_pgtable_type(d, gpfn, NULL) == PGT_none )
   24.29      {
   24.30 -        clear_bit(_PGC_page_table, &pfn_to_page(gmfn)->count_info);
   24.31 -
   24.32 -        if ( page_out_of_sync(pfn_to_page(gmfn)) )
   24.33 +        clear_bit(_PGC_page_table, &mfn_to_page(gmfn)->count_info);
   24.34 +
   24.35 +        if ( page_out_of_sync(mfn_to_page(gmfn)) )
   24.36          {
   24.37              remove_out_of_sync_entries(d, gmfn);
   24.38          }
   24.39 @@ -178,7 +178,7 @@ shadow_demote(struct domain *d, unsigned
   24.40   *   general ref to the page.
   24.41   */
   24.42  /*
   24.43 - * pfn_info fields for pages allocated as shadow pages:
   24.44 + * page_info fields for pages allocated as shadow pages:
   24.45   *
   24.46   * All 32 bits of count_info are a simple count of refs to this shadow
   24.47   * from a) other shadow pages, b) current CR3's (aka ed->arch.shadow_table),
   24.48 @@ -205,7 +205,7 @@ alloc_shadow_page(struct domain *d,
   24.49                    unsigned long gpfn, unsigned long gmfn,
   24.50                    u32 psh_type)
   24.51  {
   24.52 -    struct pfn_info *page;
   24.53 +    struct page_info *page;
   24.54      unsigned long smfn;
   24.55      int pin = 0;
   24.56      void *l1;
   24.57 @@ -218,14 +218,14 @@ alloc_shadow_page(struct domain *d,
   24.58          if ( !list_empty(&d->arch.free_shadow_frames) )
   24.59          {
   24.60              struct list_head *entry = d->arch.free_shadow_frames.next;
   24.61 -            page = list_entry(entry, struct pfn_info, list);
   24.62 +            page = list_entry(entry, struct page_info, list);
   24.63              list_del(entry);
   24.64              perfc_decr(free_l1_pages);
   24.65          }
   24.66          else
   24.67          {
   24.68              page = alloc_domheap_page(NULL);
   24.69 -            l1 = map_domain_page(page_to_pfn(page));
   24.70 +            l1 = map_domain_page(page_to_mfn(page));
   24.71              memset(l1, 0, PAGE_SIZE);
   24.72              unmap_domain_page(l1);
   24.73          }
   24.74 @@ -245,7 +245,7 @@ alloc_shadow_page(struct domain *d,
   24.75          BUG(); /* XXX FIXME: try a shadow flush to free up some memory. */
   24.76      }
   24.77  
   24.78 -    smfn = page_to_pfn(page);
   24.79 +    smfn = page_to_mfn(page);
   24.80  
   24.81      ASSERT( (gmfn & ~PGT_mfn_mask) == 0 );
   24.82      page->u.inuse.type_info = psh_type | gmfn;
   24.83 @@ -320,7 +320,7 @@ free_shadow_l1_table(struct domain *d, u
   24.84  {
   24.85      l1_pgentry_t *pl1e = map_domain_page(smfn);
   24.86      int i;
   24.87 -    struct pfn_info *spage = pfn_to_page(smfn);
   24.88 +    struct page_info *spage = mfn_to_page(smfn);
   24.89      u32 min_max = spage->tlbflush_timestamp;
   24.90      int min = SHADOW_MIN(min_max);
   24.91      int max = SHADOW_MAX(min_max);
   24.92 @@ -350,7 +350,7 @@ free_shadow_hl2_table(struct domain *d, 
   24.93      for ( i = 0; i < limit; i++ )
   24.94      {
   24.95          if ( l1e_get_flags(hl2[i]) & _PAGE_PRESENT )
   24.96 -            put_page(pfn_to_page(l1e_get_pfn(hl2[i])));
   24.97 +            put_page(mfn_to_page(l1e_get_pfn(hl2[i])));
   24.98      }
   24.99  
  24.100      unmap_domain_page(hl2);
  24.101 @@ -380,10 +380,10 @@ free_shadow_l2_table(struct domain *d, u
  24.102  
  24.103  void free_shadow_page(unsigned long smfn)
  24.104  {
  24.105 -    struct pfn_info *page = pfn_to_page(smfn);
  24.106 +    struct page_info *page = mfn_to_page(smfn);
  24.107      unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
  24.108 -    struct domain *d = page_get_owner(pfn_to_page(gmfn));
  24.109 -    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
  24.110 +    struct domain *d = page_get_owner(mfn_to_page(gmfn));
  24.111 +    unsigned long gpfn = mfn_to_gmfn(d, gmfn);
  24.112      unsigned long type = page->u.inuse.type_info & PGT_type_mask;
  24.113  
  24.114      SH_VVLOG("%s: free'ing smfn=%lx", __func__, smfn);
  24.115 @@ -422,7 +422,7 @@ void free_shadow_page(unsigned long smfn
  24.116  
  24.117      default:
  24.118          printk("Free shadow weird page type mfn=%lx type=%" PRtype_info "\n",
  24.119 -               page_to_pfn(page), page->u.inuse.type_info);
  24.120 +               page_to_mfn(page), page->u.inuse.type_info);
  24.121          break;
  24.122      }
  24.123  
  24.124 @@ -463,9 +463,9 @@ remove_shadow(struct domain *d, unsigned
  24.125  static void inline
  24.126  release_out_of_sync_entry(struct domain *d, struct out_of_sync_entry *entry)
  24.127  {
  24.128 -    struct pfn_info *page;
  24.129 -
  24.130 -    page = pfn_to_page(entry->gmfn);
  24.131 +    struct page_info *page;
  24.132 +
  24.133 +    page = mfn_to_page(entry->gmfn);
  24.134  
  24.135      // Decrement ref count of guest & shadow pages
  24.136      //
  24.137 @@ -698,7 +698,7 @@ static void free_shadow_pages(struct dom
  24.138      /* Now free the pre-zero'ed pages from the domain */
  24.139      list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames)
  24.140      {
  24.141 -        struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
  24.142 +        struct page_info *page = list_entry(list_ent, struct page_info, list);
  24.143  
  24.144          list_del(list_ent);
  24.145          perfc_decr(free_l1_pages);
  24.146 @@ -724,7 +724,7 @@ static void alloc_monitor_pagetable(stru
  24.147  {
  24.148      unsigned long mmfn;
  24.149      l2_pgentry_t *mpl2e;
  24.150 -    struct pfn_info *mmfn_info;
  24.151 +    struct page_info *mmfn_info;
  24.152      struct domain *d = v->domain;
  24.153      int i;
  24.154  
  24.155 @@ -733,7 +733,7 @@ static void alloc_monitor_pagetable(stru
  24.156      mmfn_info = alloc_domheap_page(NULL);
  24.157      ASSERT(mmfn_info != NULL);
  24.158  
  24.159 -    mmfn = page_to_pfn(mmfn_info);
  24.160 +    mmfn = page_to_mfn(mmfn_info);
  24.161      mpl2e = (l2_pgentry_t *)map_domain_page_global(mmfn);
  24.162      memset(mpl2e, 0, PAGE_SIZE);
  24.163  
  24.164 @@ -797,7 +797,7 @@ void free_monitor_pagetable(struct vcpu 
  24.165       */
  24.166      mfn = pagetable_get_pfn(v->arch.monitor_table);
  24.167      unmap_domain_page_global(v->arch.monitor_vtable);
  24.168 -    free_domheap_page(pfn_to_page(mfn));
  24.169 +    free_domheap_page(mfn_to_page(mfn));
  24.170  
  24.171      v->arch.monitor_table = mk_pagetable(0);
  24.172      v->arch.monitor_vtable = 0;
  24.173 @@ -811,7 +811,7 @@ set_p2m_entry(struct domain *d, unsigned
  24.174      unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
  24.175      l2_pgentry_t *l2, l2e;
  24.176      l1_pgentry_t *l1;
  24.177 -    struct pfn_info *l1page;
  24.178 +    struct page_info *l1page;
  24.179      unsigned long va = pfn << PAGE_SHIFT;
  24.180  
  24.181      ASSERT(tabpfn != 0);
  24.182 @@ -828,7 +828,7 @@ set_p2m_entry(struct domain *d, unsigned
  24.183              return 0;
  24.184          }
  24.185  
  24.186 -        l1 = map_domain_page_with_cache(page_to_pfn(l1page), l1cache);
  24.187 +        l1 = map_domain_page_with_cache(page_to_mfn(l1page), l1cache);
  24.188          memset(l1, 0, PAGE_SIZE);
  24.189          unmap_domain_page_with_cache(l1, l1cache);
  24.190  
  24.191 @@ -848,7 +848,7 @@ static int
  24.192  alloc_p2m_table(struct domain *d)
  24.193  {
  24.194      struct list_head *list_ent;
  24.195 -    struct pfn_info *page, *l2page;
  24.196 +    struct page_info *page, *l2page;
  24.197      l2_pgentry_t *l2;
  24.198      unsigned long mfn, pfn;
  24.199      struct domain_mmap_cache l1cache, l2cache;
  24.200 @@ -860,16 +860,16 @@ alloc_p2m_table(struct domain *d)
  24.201      domain_mmap_cache_init(&l1cache);
  24.202      domain_mmap_cache_init(&l2cache);
  24.203  
  24.204 -    d->arch.phys_table = mk_pagetable(page_to_phys(l2page));
  24.205 -    l2 = map_domain_page_with_cache(page_to_pfn(l2page), &l2cache);
  24.206 +    d->arch.phys_table = mk_pagetable(page_to_maddr(l2page));
  24.207 +    l2 = map_domain_page_with_cache(page_to_mfn(l2page), &l2cache);
  24.208      memset(l2, 0, PAGE_SIZE);
  24.209      unmap_domain_page_with_cache(l2, &l2cache);
  24.210  
  24.211      list_ent = d->page_list.next;
  24.212      while ( list_ent != &d->page_list )
  24.213      {
  24.214 -        page = list_entry(list_ent, struct pfn_info, list);
  24.215 -        mfn = page_to_pfn(page);
  24.216 +        page = list_entry(list_ent, struct page_info, list);
  24.217 +        mfn = page_to_mfn(page);
  24.218          pfn = get_pfn_from_mfn(mfn);
  24.219          ASSERT(pfn != INVALID_M2P_ENTRY);
  24.220          ASSERT(pfn < (1u<<20));
  24.221 @@ -882,8 +882,8 @@ alloc_p2m_table(struct domain *d)
  24.222      list_ent = d->xenpage_list.next;
  24.223      while ( list_ent != &d->xenpage_list )
  24.224      {
  24.225 -        page = list_entry(list_ent, struct pfn_info, list);
  24.226 -        mfn = page_to_pfn(page);
  24.227 +        page = list_entry(list_ent, struct page_info, list);
  24.228 +        mfn = page_to_mfn(page);
  24.229          pfn = get_pfn_from_mfn(mfn);
  24.230          if ( (pfn != INVALID_M2P_ENTRY) &&
  24.231               (pfn < (1u<<20)) )
  24.232 @@ -1020,7 +1020,7 @@ int __shadow_mode_enable(struct domain *
  24.233          {
  24.234              // external guests provide their own memory for their P2M maps.
  24.235              //
  24.236 -            ASSERT(d == page_get_owner(pfn_to_page(pagetable_get_pfn(
  24.237 +            ASSERT(d == page_get_owner(mfn_to_page(pagetable_get_pfn(
  24.238                  d->arch.phys_table))));
  24.239          }
  24.240      }
  24.241 @@ -1034,7 +1034,7 @@ int __shadow_mode_enable(struct domain *
  24.242      if ( shadow_mode_refcounts(d) )
  24.243      {
  24.244          struct list_head *list_ent; 
  24.245 -        struct pfn_info *page;
  24.246 +        struct page_info *page;
  24.247  
  24.248          /*
  24.249           * Tear down its counts by disassembling its page-table-based refcounts
  24.250 @@ -1062,7 +1062,7 @@ int __shadow_mode_enable(struct domain *
  24.251          for (list_ent = d->page_list.next; list_ent != &d->page_list; 
  24.252               list_ent = page->list.next) {
  24.253              
  24.254 -            page = list_entry(list_ent, struct pfn_info, list);
  24.255 +            page = list_entry(list_ent, struct page_info, list);
  24.256  
  24.257              if ( !get_page_type(page, PGT_writable_page) )
  24.258                  BUG();
  24.259 @@ -1122,7 +1122,7 @@ translate_l1pgtable(struct domain *d, l1
  24.260               (l1e_get_flags(l1[i]) & _PAGE_PRESENT) )
  24.261          {
  24.262              unsigned long mfn = l1e_get_pfn(l1[i]);
  24.263 -            unsigned long gpfn = __mfn_to_gpfn(d, mfn);
  24.264 +            unsigned long gpfn = mfn_to_gmfn(d, mfn);
  24.265              ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
  24.266              l1[i] = l1e_from_pfn(gpfn, l1e_get_flags(l1[i]));
  24.267          }
  24.268 @@ -1150,7 +1150,7 @@ translate_l2pgtable(struct domain *d, l1
  24.269               (l2e_get_flags(l2[i]) & _PAGE_PRESENT) )
  24.270          {
  24.271              unsigned long mfn = l2e_get_pfn(l2[i]);
  24.272 -            unsigned long gpfn = __mfn_to_gpfn(d, mfn);
  24.273 +            unsigned long gpfn = mfn_to_gmfn(d, mfn);
  24.274              ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
  24.275              l2[i] = l2e_from_pfn(gpfn, l2e_get_flags(l2[i]));
  24.276              translate_l1pgtable(d, p2m, mfn);
  24.277 @@ -1554,7 +1554,7 @@ static unsigned long shadow_l2_table(
  24.278  
  24.279          for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
  24.280              spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
  24.281 -            l2e_from_page(virt_to_page(page_get_owner(pfn_to_page(gmfn))->
  24.282 +            l2e_from_page(virt_to_page(page_get_owner(mfn_to_page(gmfn))->
  24.283                                         arch.mm_perdomain_pt) + i,
  24.284                            __PAGE_HYPERVISOR);
  24.285  
  24.286 @@ -1611,7 +1611,7 @@ void shadow_map_l1_into_current_l2(unsig
  24.287          /* This L1 is NOT already shadowed so we need to shadow it. */
  24.288          SH_VVLOG("4a: l1 not shadowed");
  24.289  
  24.290 -        gl1mfn = __gpfn_to_mfn(d, gl1pfn);
  24.291 +        gl1mfn = gmfn_to_mfn(d, gl1pfn);
  24.292          if ( unlikely(!VALID_MFN(gl1mfn)) )
  24.293          {
  24.294              // Attempt to use an invalid pfn as an L1 page.
  24.295 @@ -1687,7 +1687,7 @@ void shadow_map_l1_into_current_l2(unsig
  24.296              set_guest_back_ptr(d, sl1e, sl1mfn, i);
  24.297          }
  24.298  
  24.299 -        pfn_to_page(sl1mfn)->tlbflush_timestamp =
  24.300 +        mfn_to_page(sl1mfn)->tlbflush_timestamp =
  24.301              SHADOW_ENCODE_MIN_MAX(min, max);
  24.302      }
  24.303  }
  24.304 @@ -1770,7 +1770,7 @@ shadow_make_snapshot(
  24.305      u32 min_max = 0;
  24.306      int min, max, length;
  24.307  
  24.308 -    if ( test_and_set_bit(_PGC_out_of_sync, &pfn_to_page(gmfn)->count_info) )
  24.309 +    if ( test_and_set_bit(_PGC_out_of_sync, &mfn_to_page(gmfn)->count_info) )
  24.310      {
  24.311          ASSERT(__shadow_status(d, gpfn, PGT_snapshot));
  24.312          return SHADOW_SNAPSHOT_ELSEWHERE;
  24.313 @@ -1791,8 +1791,8 @@ shadow_make_snapshot(
  24.314  
  24.315      if ( shadow_mode_refcounts(d) &&
  24.316           (shadow_max_pgtable_type(d, gpfn, &sl1mfn) == PGT_l1_shadow) )
  24.317 -        min_max = pfn_to_page(sl1mfn)->tlbflush_timestamp;
  24.318 -    pfn_to_page(smfn)->tlbflush_timestamp = min_max;
  24.319 +        min_max = mfn_to_page(sl1mfn)->tlbflush_timestamp;
  24.320 +    mfn_to_page(smfn)->tlbflush_timestamp = min_max;
  24.321  
  24.322      min = SHADOW_MIN(min_max);
  24.323      max = SHADOW_MAX(min_max);
  24.324 @@ -1821,7 +1821,7 @@ shadow_free_snapshot(struct domain *d, s
  24.325  
  24.326      // Clear the out_of_sync bit.
  24.327      //
  24.328 -    clear_bit(_PGC_out_of_sync, &pfn_to_page(entry->gmfn)->count_info);
  24.329 +    clear_bit(_PGC_out_of_sync, &mfn_to_page(entry->gmfn)->count_info);
  24.330  
  24.331      // XXX Need to think about how to protect the domain's
  24.332      // information less expensively.
  24.333 @@ -1838,11 +1838,11 @@ struct out_of_sync_entry *
  24.334                               unsigned long mfn)
  24.335  {
  24.336      struct domain *d = v->domain;
  24.337 -    struct pfn_info *page = pfn_to_page(mfn);
  24.338 +    struct page_info *page = mfn_to_page(mfn);
  24.339      struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
  24.340  
  24.341      ASSERT(shadow_lock_is_acquired(d));
  24.342 -    ASSERT(pfn_valid(mfn));
  24.343 +    ASSERT(mfn_valid(mfn));
  24.344  
  24.345  #ifndef NDEBUG
  24.346      {
  24.347 @@ -1995,7 +1995,7 @@ int __shadow_out_of_sync(struct vcpu *v,
  24.348  {
  24.349      struct domain *d = v->domain;
  24.350      unsigned long l2mfn = pagetable_get_pfn(v->arch.guest_table);
  24.351 -    unsigned long l2pfn = __mfn_to_gpfn(d, l2mfn);
  24.352 +    unsigned long l2pfn = mfn_to_gmfn(d, l2mfn);
  24.353      l2_pgentry_t l2e;
  24.354      unsigned long l1pfn, l1mfn;
  24.355  
  24.356 @@ -2004,7 +2004,7 @@ int __shadow_out_of_sync(struct vcpu *v,
  24.357  
  24.358      perfc_incrc(shadow_out_of_sync_calls);
  24.359  
  24.360 -    if ( page_out_of_sync(pfn_to_page(l2mfn)) &&
  24.361 +    if ( page_out_of_sync(mfn_to_page(l2mfn)) &&
  24.362           !snapshot_entry_matches(d, (l1_pgentry_t *)v->arch.guest_vtable,
  24.363                                   l2pfn, l2_table_offset(va)) )
  24.364          return 1;
  24.365 @@ -2014,13 +2014,13 @@ int __shadow_out_of_sync(struct vcpu *v,
  24.366          return 0;
  24.367  
  24.368      l1pfn = l2e_get_pfn(l2e);
  24.369 -    l1mfn = __gpfn_to_mfn(d, l1pfn);
  24.370 +    l1mfn = gmfn_to_mfn(d, l1pfn);
  24.371  
  24.372      // If the l1 pfn is invalid, it can't be out of sync...
  24.373      if ( !VALID_MFN(l1mfn) )
  24.374          return 0;
  24.375  
  24.376 -    if ( page_out_of_sync(pfn_to_page(l1mfn)) &&
  24.377 +    if ( page_out_of_sync(mfn_to_page(l1mfn)) &&
  24.378           !snapshot_entry_matches(
  24.379               d, &linear_pg_table[l1_linear_offset(va) & ~(L1_PAGETABLE_ENTRIES-1)],
  24.380               l1pfn, l1_table_offset(va)) )
  24.381 @@ -2148,13 +2148,13 @@ static u32 remove_all_write_access_in_pt
  24.382      int i;
  24.383      u32 found = 0;
  24.384      int is_l1_shadow =
  24.385 -        ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
  24.386 +        ((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
  24.387           PGT_l1_shadow);
  24.388  
  24.389      match = l1e_from_pfn(readonly_gmfn, flags);
  24.390  
  24.391      if ( shadow_mode_external(d) ) {
  24.392 -        i = (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask) 
  24.393 +        i = (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask) 
  24.394              >> PGT_va_shift;
  24.395  
  24.396          if ( (i >= 0 && i < L1_PAGETABLE_ENTRIES) &&
  24.397 @@ -2192,7 +2192,7 @@ int shadow_remove_all_write_access(
  24.398  
  24.399      // If it's not a writable page, then no writable refs can be outstanding.
  24.400      //
  24.401 -    if ( (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
  24.402 +    if ( (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
  24.403           PGT_writable_page )
  24.404      {
  24.405          perfc_incrc(remove_write_not_writable);
  24.406 @@ -2202,7 +2202,7 @@ int shadow_remove_all_write_access(
  24.407      // How many outstanding writable PTEs for this page are there?
  24.408      //
  24.409      write_refs =
  24.410 -        (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
  24.411 +        (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
  24.412      if ( write_refs && MFN_PINNED(readonly_gmfn) )
  24.413      {
  24.414          write_refs--;
  24.415 @@ -2220,7 +2220,7 @@ int shadow_remove_all_write_access(
  24.416  
  24.417           // Use the back pointer to locate the shadow page that can contain
  24.418           // the PTE of interest
  24.419 -         if ( (predicted_smfn = pfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
  24.420 +         if ( (predicted_smfn = mfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
  24.421               found += remove_all_write_access_in_ptpage(
  24.422                   d, predicted_smfn, predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, 0);
  24.423               if ( found == write_refs )
  24.424 @@ -2261,7 +2261,7 @@ static u32 remove_all_access_in_page(
  24.425      int i;
  24.426      u32 count = 0;
  24.427      int is_l1_shadow =
  24.428 -        ((pfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
  24.429 +        ((mfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
  24.430           PGT_l1_shadow);
  24.431  
  24.432      match = l1e_from_pfn(forbidden_gmfn, flags);
  24.433 @@ -2278,7 +2278,7 @@ static u32 remove_all_access_in_page(
  24.434          if ( is_l1_shadow )
  24.435              shadow_put_page_from_l1e(ol2e, d);
  24.436          else /* must be an hl2 page */
  24.437 -            put_page(pfn_to_page(forbidden_gmfn));
  24.438 +            put_page(mfn_to_page(forbidden_gmfn));
  24.439      }
  24.440  
  24.441      unmap_domain_page(pl1e);
  24.442 @@ -2361,7 +2361,7 @@ static int resync_all(struct domain *d, 
  24.443              // the new contents of the guest page iff this it has the right
  24.444              // page type.
  24.445              //
  24.446 -            if ( stype != ( pfn_to_page(entry->gmfn)->u.inuse.type_info & PGT_type_mask) )
  24.447 +            if ( stype != ( mfn_to_page(entry->gmfn)->u.inuse.type_info & PGT_type_mask) )
  24.448                  continue;
  24.449          }
  24.450  
  24.451 @@ -2398,12 +2398,12 @@ static int resync_all(struct domain *d, 
  24.452              if ( !smfn )
  24.453                  break;
  24.454  
  24.455 -            min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
  24.456 +            min_max_shadow = mfn_to_page(smfn)->tlbflush_timestamp;
  24.457              min_shadow     = SHADOW_MIN(min_max_shadow);
  24.458              max_shadow     = SHADOW_MAX(min_max_shadow);
  24.459  
  24.460              min_max_snapshot =
  24.461 -                pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
  24.462 +                mfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
  24.463              min_snapshot     = SHADOW_MIN(min_max_snapshot);
  24.464              max_snapshot     = SHADOW_MAX(min_max_snapshot);
  24.465  
  24.466 @@ -2754,7 +2754,7 @@ int shadow_fault(unsigned long va, struc
  24.467              domain_crash_synchronous();
  24.468          }
  24.469  
  24.470 -        __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
  24.471 +        __mark_dirty(d, gmfn_to_mfn(d, l2e_get_pfn(gpde)));
  24.472      }
  24.473  
  24.474      shadow_set_l1e(va, spte, 1);
  24.475 @@ -2913,7 +2913,7 @@ void __update_pagetables(struct vcpu *v)
  24.476  {
  24.477      struct domain *d = v->domain;
  24.478      unsigned long gmfn = pagetable_get_pfn(v->arch.guest_table);
  24.479 -    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
  24.480 +    unsigned long gpfn = mfn_to_gmfn(d, gmfn);
  24.481      unsigned long smfn, hl2mfn, old_smfn;
  24.482      int need_sync = 0;
  24.483  
  24.484 @@ -3173,7 +3173,7 @@ static int check_pte(
  24.485          FAIL("global bit set in shadow");
  24.486  
  24.487      eff_guest_pfn = l1e_get_pfn(eff_guest_pte);
  24.488 -    eff_guest_mfn = __gpfn_to_mfn(d, eff_guest_pfn);
  24.489 +    eff_guest_mfn = gmfn_to_mfn(d, eff_guest_pfn);
  24.490      shadow_mfn = l1e_get_pfn(shadow_pte);
  24.491  
  24.492      if ( !VALID_MFN(eff_guest_mfn) && !shadow_mode_refcounts(d) )
  24.493 @@ -3190,7 +3190,7 @@ static int check_pte(
  24.494      {
  24.495          printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx page_table_page=%d\n",
  24.496                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
  24.497 -               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
  24.498 +               mfn_to_page(eff_guest_mfn)->u.inuse.type_info,
  24.499                 page_table_page);
  24.500          FAIL("RW coherence");
  24.501      }
  24.502 @@ -3201,7 +3201,7 @@ static int check_pte(
  24.503      {
  24.504          printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx page_table_page=%d\n",
  24.505                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
  24.506 -               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
  24.507 +               mfn_to_page(eff_guest_mfn)->u.inuse.type_info,
  24.508                 page_table_page);
  24.509          FAIL("RW2 coherence");
  24.510      }
  24.511 @@ -3241,7 +3241,7 @@ static int check_l1_table(
  24.512      l1_pgentry_t *p_guest, *p_shadow, *p_snapshot = NULL;
  24.513      int errors = 0;
  24.514  
  24.515 -    if ( page_out_of_sync(pfn_to_page(gmfn)) )
  24.516 +    if ( page_out_of_sync(mfn_to_page(gmfn)) )
  24.517      {
  24.518          snapshot_mfn = __shadow_status(d, gpfn, PGT_snapshot);
  24.519          ASSERT(snapshot_mfn);
  24.520 @@ -3281,13 +3281,13 @@ int check_l2_table(
  24.521      int errors = 0;
  24.522      int limit;
  24.523  
  24.524 -    if ( !oos_pdes && (page_get_owner(pfn_to_page(gmfn)) != d) )
  24.525 +    if ( !oos_pdes && (page_get_owner(mfn_to_page(gmfn)) != d) )
  24.526          FAILPT("domain doesn't own page");
  24.527 -    if ( oos_pdes && (page_get_owner(pfn_to_page(gmfn)) != NULL) )
  24.528 +    if ( oos_pdes && (page_get_owner(mfn_to_page(gmfn)) != NULL) )
  24.529          FAILPT("bogus owner for snapshot page");
  24.530 -    if ( page_get_owner(pfn_to_page(smfn)) != NULL )
  24.531 +    if ( page_get_owner(mfn_to_page(smfn)) != NULL )
  24.532          FAILPT("shadow page mfn=0x%lx is owned by someone, domid=%d",
  24.533 -               smfn, page_get_owner(pfn_to_page(smfn))->domain_id);
  24.534 +               smfn, page_get_owner(mfn_to_page(smfn))->domain_id);
  24.535  
  24.536  #if 0
  24.537      if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
  24.538 @@ -3375,14 +3375,14 @@ int _check_pagetable(struct vcpu *v, cha
  24.539      perfc_incrc(check_pagetable);
  24.540  
  24.541      ptbase_mfn = gptbase >> PAGE_SHIFT;
  24.542 -    ptbase_pfn = __mfn_to_gpfn(d, ptbase_mfn);
  24.543 +    ptbase_pfn = mfn_to_gmfn(d, ptbase_mfn);
  24.544  
  24.545      if ( !(smfn = __shadow_status(d, ptbase_pfn, PGT_base_page_table)) )
  24.546      {
  24.547          printk("%s-PT %lx not shadowed\n", s, gptbase);
  24.548          goto out;
  24.549      }
  24.550 -    if ( page_out_of_sync(pfn_to_page(ptbase_mfn)) )
  24.551 +    if ( page_out_of_sync(mfn_to_page(ptbase_mfn)) )
  24.552      {
  24.553          ptbase_mfn = __shadow_status(d, ptbase_pfn, PGT_snapshot);
  24.554          oos_pdes = 1;
  24.555 @@ -3403,7 +3403,7 @@ int _check_pagetable(struct vcpu *v, cha
  24.556      for ( i = 0; i < limit; i++ )
  24.557      {
  24.558          unsigned long gl1pfn = l2e_get_pfn(gpl2e[i]);
  24.559 -        unsigned long gl1mfn = __gpfn_to_mfn(d, gl1pfn);
  24.560 +        unsigned long gl1mfn = gmfn_to_mfn(d, gl1pfn);
  24.561          unsigned long sl1mfn = l2e_get_pfn(spl2e[i]);
  24.562  
  24.563          if ( l2e_get_intpte(spl2e[i]) != 0 )  /* FIXME: check flags? */
  24.564 @@ -3444,7 +3444,7 @@ int _check_all_pagetables(struct vcpu *v
  24.565          a = &d->arch.shadow_ht[i];
  24.566          while ( a && a->gpfn_and_flags )
  24.567          {
  24.568 -            gmfn = __gpfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
  24.569 +            gmfn = gmfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
  24.570  
  24.571              switch ( a->gpfn_and_flags & PGT_type_mask )
  24.572              {
  24.573 @@ -3454,7 +3454,7 @@ int _check_all_pagetables(struct vcpu *v
  24.574                  break;
  24.575              case PGT_l2_shadow:
  24.576                  errors += check_l2_table(v, gmfn, a->smfn,
  24.577 -                                         page_out_of_sync(pfn_to_page(gmfn)));
  24.578 +                                         page_out_of_sync(mfn_to_page(gmfn)));
  24.579                  break;
  24.580              case PGT_l3_shadow:
  24.581              case PGT_l4_shadow:
    25.1 --- a/xen/arch/x86/shadow_public.c	Wed Feb 01 15:01:04 2006 +0000
    25.2 +++ b/xen/arch/x86/shadow_public.c	Wed Feb 01 16:28:50 2006 +0100
    25.3 @@ -170,14 +170,14 @@ free_shadow_tables(struct domain *d, uns
    25.4  #if CONFIG_PAGING_LEVELS >=3
    25.5      if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
    25.6      {
    25.7 -        struct pfn_info *page = pfn_to_page(smfn);
    25.8 +        struct page_info *page = mfn_to_page(smfn);
    25.9          for ( i = 0; i < PDP_ENTRIES; i++ )
   25.10          {
   25.11              if ( entry_get_flags(ple[i]) & _PAGE_PRESENT )
   25.12                  free_fake_shadow_l2(d,entry_get_pfn(ple[i]));
   25.13          }
   25.14  
   25.15 -        page = pfn_to_page(entry_get_pfn(ple[0]));
   25.16 +        page = mfn_to_page(entry_get_pfn(ple[0]));
   25.17          free_domheap_pages(page, SL2_ORDER);
   25.18          unmap_domain_page(ple);
   25.19      }
   25.20 @@ -210,7 +210,7 @@ free_shadow_tables(struct domain *d, uns
   25.21                      break;
   25.22                  if ( level == PAGING_L2 )
   25.23                  {
   25.24 -                    struct pfn_info *page = pfn_to_page(smfn);
   25.25 +                    struct page_info *page = mfn_to_page(smfn);
   25.26                      if ( is_xen_l2_slot(page->u.inuse.type_info, i) )
   25.27                          continue;
   25.28                  }
   25.29 @@ -234,7 +234,7 @@ free_shadow_tables(struct domain *d, uns
   25.30   */
   25.31  static pagetable_t page_table_convert(struct domain *d)
   25.32  {
   25.33 -    struct pfn_info *l4page, *l3page;
   25.34 +    struct page_info *l4page, *l3page;
   25.35      l4_pgentry_t *l4;
   25.36      l3_pgentry_t *l3, *pae_l3;
   25.37      int i;
   25.38 @@ -242,13 +242,13 @@ static pagetable_t page_table_convert(st
   25.39      l4page = alloc_domheap_page(NULL);
   25.40      if (l4page == NULL)
   25.41          domain_crash_synchronous();
   25.42 -    l4 = map_domain_page(page_to_pfn(l4page));
   25.43 +    l4 = map_domain_page(page_to_mfn(l4page));
   25.44      memset(l4, 0, PAGE_SIZE);
   25.45  
   25.46      l3page = alloc_domheap_page(NULL);
   25.47      if (l3page == NULL)
   25.48          domain_crash_synchronous();
   25.49 -    l3 = map_domain_page(page_to_pfn(l3page));
   25.50 +    l3 = map_domain_page(page_to_mfn(l3page));
   25.51      memset(l3, 0, PAGE_SIZE);
   25.52  
   25.53      l4[0] = l4e_from_page(l3page, __PAGE_HYPERVISOR);
   25.54 @@ -261,14 +261,14 @@ static pagetable_t page_table_convert(st
   25.55      unmap_domain_page(l4);
   25.56      unmap_domain_page(l3);
   25.57  
   25.58 -    return mk_pagetable(page_to_phys(l4page));
   25.59 +    return mk_pagetable(page_to_maddr(l4page));
   25.60  }
   25.61  
   25.62  static void alloc_monitor_pagetable(struct vcpu *v)
   25.63  {
   25.64      unsigned long mmfn;
   25.65      l4_pgentry_t *mpl4e;
   25.66 -    struct pfn_info *mmfn_info;
   25.67 +    struct page_info *mmfn_info;
   25.68      struct domain *d = v->domain;
   25.69      pagetable_t phys_table;
   25.70  
   25.71 @@ -277,7 +277,7 @@ static void alloc_monitor_pagetable(stru
   25.72      mmfn_info = alloc_domheap_page(NULL);
   25.73      ASSERT( mmfn_info );
   25.74  
   25.75 -    mmfn = page_to_pfn(mmfn_info);
   25.76 +    mmfn = page_to_mfn(mmfn_info);
   25.77      mpl4e = (l4_pgentry_t *) map_domain_page_global(mmfn);
   25.78      memcpy(mpl4e, &idle_pg_table[0], PAGE_SIZE);
   25.79      mpl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
   25.80 @@ -302,7 +302,7 @@ void free_monitor_pagetable(struct vcpu 
   25.81       */
   25.82      mfn = pagetable_get_pfn(v->arch.monitor_table);
   25.83      unmap_domain_page_global(v->arch.monitor_vtable);
   25.84 -    free_domheap_page(pfn_to_page(mfn));
   25.85 +    free_domheap_page(mfn_to_page(mfn));
   25.86  
   25.87      v->arch.monitor_table = mk_pagetable(0);
   25.88      v->arch.monitor_vtable = 0;
   25.89 @@ -326,7 +326,7 @@ static void alloc_monitor_pagetable(stru
   25.90  {
   25.91      unsigned long mmfn;
   25.92      l2_pgentry_t *mpl2e;
   25.93 -    struct pfn_info *mmfn_info;
   25.94 +    struct page_info *mmfn_info;
   25.95      struct domain *d = v->domain;
   25.96      int i;
   25.97  
   25.98 @@ -335,7 +335,7 @@ static void alloc_monitor_pagetable(stru
   25.99      mmfn_info = alloc_domheap_page(NULL);
  25.100      ASSERT(mmfn_info != NULL);
  25.101  
  25.102 -    mmfn = page_to_pfn(mmfn_info);
  25.103 +    mmfn = page_to_mfn(mmfn_info);
  25.104      mpl2e = (l2_pgentry_t *)map_domain_page_global(mmfn);
  25.105      memset(mpl2e, 0, PAGE_SIZE);
  25.106  
  25.107 @@ -399,7 +399,7 @@ void free_monitor_pagetable(struct vcpu 
  25.108       */
  25.109      mfn = pagetable_get_pfn(v->arch.monitor_table);
  25.110      unmap_domain_page_global(v->arch.monitor_vtable);
  25.111 -    free_domheap_page(pfn_to_page(mfn));
  25.112 +    free_domheap_page(mfn_to_page(mfn));
  25.113  
  25.114      v->arch.monitor_table = mk_pagetable(0);
  25.115      v->arch.monitor_vtable = 0;
  25.116 @@ -416,7 +416,7 @@ shadow_free_snapshot(struct domain *d, s
  25.117  
  25.118      // Clear the out_of_sync bit.
  25.119      //
  25.120 -    clear_bit(_PGC_out_of_sync, &pfn_to_page(entry->gmfn)->count_info);
  25.121 +    clear_bit(_PGC_out_of_sync, &mfn_to_page(entry->gmfn)->count_info);
  25.122  
  25.123      // XXX Need to think about how to protect the domain's
  25.124      // information less expensively.
  25.125 @@ -431,9 +431,9 @@ shadow_free_snapshot(struct domain *d, s
  25.126  void
  25.127  release_out_of_sync_entry(struct domain *d, struct out_of_sync_entry *entry)
  25.128  {
  25.129 -    struct pfn_info *page;
  25.130 +    struct page_info *page;
  25.131  
  25.132 -    page = pfn_to_page(entry->gmfn);
  25.133 +    page = mfn_to_page(entry->gmfn);
  25.134          
  25.135      // Decrement ref count of guest & shadow pages
  25.136      //
  25.137 @@ -506,13 +506,13 @@ shadow_demote(struct domain *d, unsigned
  25.138      if ( !shadow_mode_refcounts(d) )
  25.139          return;
  25.140  
  25.141 -    ASSERT(pfn_to_page(gmfn)->count_info & PGC_page_table);
  25.142 +    ASSERT(mfn_to_page(gmfn)->count_info & PGC_page_table);
  25.143  
  25.144      if ( shadow_max_pgtable_type(d, gpfn, NULL) == PGT_none )
  25.145      {
  25.146 -        clear_bit(_PGC_page_table, &pfn_to_page(gmfn)->count_info);
  25.147 +        clear_bit(_PGC_page_table, &mfn_to_page(gmfn)->count_info);
  25.148  
  25.149 -        if ( page_out_of_sync(pfn_to_page(gmfn)) )
  25.150 +        if ( page_out_of_sync(mfn_to_page(gmfn)) )
  25.151          {
  25.152              remove_out_of_sync_entries(d, gmfn);
  25.153          }
  25.154 @@ -524,7 +524,7 @@ free_shadow_l1_table(struct domain *d, u
  25.155  {
  25.156      l1_pgentry_t *pl1e = map_domain_page(smfn);
  25.157      int i;
  25.158 -    struct pfn_info *spage = pfn_to_page(smfn);
  25.159 +    struct page_info *spage = mfn_to_page(smfn);
  25.160      u32 min_max = spage->tlbflush_timestamp;
  25.161      int min = SHADOW_MIN(min_max);
  25.162      int max;
  25.163 @@ -561,7 +561,7 @@ free_shadow_hl2_table(struct domain *d, 
  25.164      for ( i = 0; i < limit; i++ )
  25.165      {
  25.166          if ( l1e_get_flags(hl2[i]) & _PAGE_PRESENT )
  25.167 -            put_page(pfn_to_page(l1e_get_pfn(hl2[i])));
  25.168 +            put_page(mfn_to_page(l1e_get_pfn(hl2[i])));
  25.169      }
  25.170  
  25.171      unmap_domain_page(hl2);
  25.172 @@ -605,11 +605,11 @@ void free_fake_shadow_l2(struct domain *
  25.173  
  25.174  void free_shadow_page(unsigned long smfn)
  25.175  {
  25.176 -    struct pfn_info *page = pfn_to_page(smfn);
  25.177 +    struct page_info *page = mfn_to_page(smfn);
  25.178  
  25.179      unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
  25.180 -    struct domain *d = page_get_owner(pfn_to_page(gmfn));
  25.181 -    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
  25.182 +    struct domain *d = page_get_owner(mfn_to_page(gmfn));
  25.183 +    unsigned long gpfn = mfn_to_gmfn(d, gmfn);
  25.184      unsigned long type = page->u.inuse.type_info & PGT_type_mask;
  25.185  
  25.186      SH_VVLOG("%s: free'ing smfn=%lx", __func__, smfn);
  25.187 @@ -670,7 +670,7 @@ void free_shadow_page(unsigned long smfn
  25.188  
  25.189      default:
  25.190          printk("Free shadow weird page type mfn=%lx type=%" PRtype_info "\n",
  25.191 -               page_to_pfn(page), page->u.inuse.type_info);
  25.192 +               page_to_mfn(page), page->u.inuse.type_info);
  25.193          break;
  25.194      }
  25.195  
  25.196 @@ -885,7 +885,7 @@ void free_shadow_pages(struct domain *d)
  25.197      /* Now free the pre-zero'ed pages from the domain. */
  25.198      list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames)
  25.199      {
  25.200 -        struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
  25.201 +        struct page_info *page = list_entry(list_ent, struct page_info, list);
  25.202  
  25.203          list_del(list_ent);
  25.204          perfc_decr(free_l1_pages);
  25.205 @@ -1072,7 +1072,7 @@ int __shadow_mode_enable(struct domain *
  25.206          {
  25.207              // external guests provide their own memory for their P2M maps.
  25.208              //
  25.209 -            ASSERT(d == page_get_owner(pfn_to_page(pagetable_get_pfn(
  25.210 +            ASSERT(d == page_get_owner(mfn_to_page(pagetable_get_pfn(
  25.211                  d->arch.phys_table))));
  25.212          }
  25.213      }
  25.214 @@ -1086,7 +1086,7 @@ int __shadow_mode_enable(struct domain *
  25.215      if ( shadow_mode_refcounts(d) )
  25.216      {
  25.217          struct list_head *list_ent; 
  25.218 -        struct pfn_info *page;
  25.219 +        struct page_info *page;
  25.220  
  25.221          /*
  25.222           * Tear down its counts by disassembling its page-table-based refcounts
  25.223 @@ -1114,7 +1114,7 @@ int __shadow_mode_enable(struct domain *
  25.224          for (list_ent = d->page_list.next; list_ent != &d->page_list; 
  25.225               list_ent = page->list.next) {
  25.226              
  25.227 -            page = list_entry(list_ent, struct pfn_info, list);
  25.228 +            page = list_entry(list_ent, struct page_info, list);
  25.229              if ( !get_page_type(page, PGT_writable_page) )
  25.230                  BUG();
  25.231              put_page_type(page);
  25.232 @@ -1339,7 +1339,7 @@ set_p2m_entry(struct domain *d, unsigned
  25.233      unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
  25.234      l2_pgentry_t *l2, l2e;
  25.235      l1_pgentry_t *l1;
  25.236 -    struct pfn_info *l1page;
  25.237 +    struct page_info *l1page;
  25.238      unsigned long va = pfn << PAGE_SHIFT;
  25.239  
  25.240      ASSERT(tabpfn != 0);
  25.241 @@ -1355,7 +1355,7 @@ set_p2m_entry(struct domain *d, unsigned
  25.242              return 0;
  25.243          }
  25.244  
  25.245 -        l1 = map_domain_page_with_cache(page_to_pfn(l1page), l1cache);
  25.246 +        l1 = map_domain_page_with_cache(page_to_mfn(l1page), l1cache);
  25.247          memset(l1, 0, PAGE_SIZE);
  25.248          unmap_domain_page_with_cache(l1, l1cache);
  25.249  
  25.250 @@ -1375,7 +1375,7 @@ int
  25.251  alloc_p2m_table(struct domain *d)
  25.252  {
  25.253      struct list_head *list_ent;
  25.254 -    struct pfn_info *page, *l2page;
  25.255 +    struct page_info *page, *l2page;
  25.256      l2_pgentry_t *l2;
  25.257      unsigned long mfn, pfn;
  25.258      struct domain_mmap_cache l1cache, l2cache;
  25.259 @@ -1387,16 +1387,16 @@ alloc_p2m_table(struct domain *d)
  25.260      domain_mmap_cache_init(&l1cache);
  25.261      domain_mmap_cache_init(&l2cache);
  25.262  
  25.263 -    d->arch.phys_table = mk_pagetable(page_to_phys(l2page));
  25.264 -    l2 = map_domain_page_with_cache(page_to_pfn(l2page), &l2cache);
  25.265 +    d->arch.phys_table = mk_pagetable(page_to_maddr(l2page));
  25.266 +    l2 = map_domain_page_with_cache(page_to_mfn(l2page), &l2cache);
  25.267      memset(l2, 0, PAGE_SIZE);
  25.268      unmap_domain_page_with_cache(l2, &l2cache);
  25.269  
  25.270      list_ent = d->page_list.next;
  25.271      while ( list_ent != &d->page_list )
  25.272      {
  25.273 -        page = list_entry(list_ent, struct pfn_info, list);
  25.274 -        mfn = page_to_pfn(page);
  25.275 +        page = list_entry(list_ent, struct page_info, list);
  25.276 +        mfn = page_to_mfn(page);
  25.277          pfn = get_pfn_from_mfn(mfn);
  25.278          ASSERT(pfn != INVALID_M2P_ENTRY);
  25.279          ASSERT(pfn < (1u<<20));
  25.280 @@ -1409,8 +1409,8 @@ alloc_p2m_table(struct domain *d)
  25.281      list_ent = d->xenpage_list.next;
  25.282      while ( list_ent != &d->xenpage_list )
  25.283      {
  25.284 -        page = list_entry(list_ent, struct pfn_info, list);
  25.285 -        mfn = page_to_pfn(page);
  25.286 +        page = list_entry(list_ent, struct page_info, list);
  25.287 +        mfn = page_to_mfn(page);
  25.288          pfn = get_pfn_from_mfn(mfn);
  25.289          if ( (pfn != INVALID_M2P_ENTRY) &&
  25.290               (pfn < (1u<<20)) )
  25.291 @@ -1429,7 +1429,7 @@ alloc_p2m_table(struct domain *d)
  25.292  
  25.293  void shadow_l1_normal_pt_update(
  25.294      struct domain *d,
  25.295 -    physaddr_t pa, l1_pgentry_t gpte,
  25.296 +    paddr_t pa, l1_pgentry_t gpte,
  25.297      struct domain_mmap_cache *cache)
  25.298  {
  25.299      unsigned long sl1mfn;    
  25.300 @@ -1454,7 +1454,7 @@ void shadow_l1_normal_pt_update(
  25.301  
  25.302  void shadow_l2_normal_pt_update(
  25.303      struct domain *d,
  25.304 -    physaddr_t pa, l2_pgentry_t gpde,
  25.305 +    paddr_t pa, l2_pgentry_t gpde,
  25.306      struct domain_mmap_cache *cache)
  25.307  {
  25.308      unsigned long sl2mfn;
  25.309 @@ -1479,7 +1479,7 @@ void shadow_l2_normal_pt_update(
  25.310  #if CONFIG_PAGING_LEVELS >= 3
  25.311  void shadow_l3_normal_pt_update(
  25.312      struct domain *d,
  25.313 -    physaddr_t pa, l3_pgentry_t l3e,
  25.314 +    paddr_t pa, l3_pgentry_t l3e,
  25.315      struct domain_mmap_cache *cache)
  25.316  {
  25.317      unsigned long sl3mfn;
  25.318 @@ -1506,7 +1506,7 @@ void shadow_l3_normal_pt_update(
  25.319  #if CONFIG_PAGING_LEVELS >= 4
  25.320  void shadow_l4_normal_pt_update(
  25.321      struct domain *d,
  25.322 -    physaddr_t pa, l4_pgentry_t l4e,
  25.323 +    paddr_t pa, l4_pgentry_t l4e,
  25.324      struct domain_mmap_cache *cache)
  25.325  {
  25.326      unsigned long sl4mfn;
  25.327 @@ -1543,7 +1543,7 @@ translate_l1pgtable(struct domain *d, l1
  25.328               (l1e_get_flags(l1[i]) & _PAGE_PRESENT) )
  25.329          {
  25.330              unsigned long mfn = l1e_get_pfn(l1[i]);
  25.331 -            unsigned long gpfn = __mfn_to_gpfn(d, mfn);
  25.332 +            unsigned long gpfn = mfn_to_gmfn(d, mfn);
  25.333              ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
  25.334              l1[i] = l1e_from_pfn(gpfn, l1e_get_flags(l1[i]));
  25.335          }
  25.336 @@ -1571,7 +1571,7 @@ translate_l2pgtable(struct domain *d, l1
  25.337               (l2e_get_flags(l2[i]) & _PAGE_PRESENT) )
  25.338          {
  25.339              unsigned long mfn = l2e_get_pfn(l2[i]);
  25.340 -            unsigned long gpfn = __mfn_to_gpfn(d, mfn);
  25.341 +            unsigned long gpfn = mfn_to_gmfn(d, mfn);
  25.342              ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
  25.343              l2[i] = l2e_from_pfn(gpfn, l2e_get_flags(l2[i]));
  25.344              translate_l1pgtable(d, p2m, mfn);
  25.345 @@ -1648,7 +1648,7 @@ static u32 remove_all_access_in_page(
  25.346      int i;
  25.347      u32 count = 0;
  25.348      int is_l1_shadow =
  25.349 -        ((pfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
  25.350 +        ((mfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
  25.351           PGT_l1_shadow);
  25.352  
  25.353      match = l1e_from_pfn(forbidden_gmfn, flags);
  25.354 @@ -1665,7 +1665,7 @@ static u32 remove_all_access_in_page(
  25.355          if ( is_l1_shadow )
  25.356              shadow_put_page_from_l1e(ol2e, d);
  25.357          else /* must be an hl2 page */
  25.358 -            put_page(pfn_to_page(forbidden_gmfn));
  25.359 +            put_page(mfn_to_page(forbidden_gmfn));
  25.360      }
  25.361  
  25.362      unmap_domain_page(pl1e);
  25.363 @@ -1715,7 +1715,7 @@ static u32 __shadow_remove_all_access(st
  25.364  }
  25.365  
  25.366  void shadow_drop_references(
  25.367 -    struct domain *d, struct pfn_info *page)
  25.368 +    struct domain *d, struct page_info *page)
  25.369  {
  25.370      if ( likely(!shadow_mode_refcounts(d)) ||
  25.371           ((page->u.inuse.type_info & PGT_count_mask) == 0) )
  25.372 @@ -1723,21 +1723,21 @@ void shadow_drop_references(
  25.373  
  25.374      /* XXX This needs more thought... */
  25.375      printk("%s: needing to call __shadow_remove_all_access for mfn=%lx\n",
  25.376 -           __func__, page_to_pfn(page));
  25.377 -    printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
  25.378 +           __func__, page_to_mfn(page));
  25.379 +    printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_mfn(page),
  25.380             page->count_info, page->u.inuse.type_info);
  25.381  
  25.382      shadow_lock(d);
  25.383 -    __shadow_remove_all_access(d, page_to_pfn(page));
  25.384 +    __shadow_remove_all_access(d, page_to_mfn(page));
  25.385      shadow_unlock(d);
  25.386  
  25.387 -    printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
  25.388 +    printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_mfn(page),
  25.389             page->count_info, page->u.inuse.type_info);
  25.390  }
  25.391  
  25.392  /* XXX Needs more thought. Neither pretty nor fast: a place holder. */
  25.393  void shadow_sync_and_drop_references(
  25.394 -    struct domain *d, struct pfn_info *page)
  25.395 +    struct domain *d, struct page_info *page)
  25.396  {
  25.397      if ( likely(!shadow_mode_refcounts(d)) )
  25.398          return;
  25.399 @@ -1745,9 +1745,9 @@ void shadow_sync_and_drop_references(
  25.400      shadow_lock(d);
  25.401  
  25.402      if ( page_out_of_sync(page) )
  25.403 -        __shadow_sync_mfn(d, page_to_pfn(page));
  25.404 +        __shadow_sync_mfn(d, page_to_mfn(page));
  25.405  
  25.406 -    __shadow_remove_all_access(d, page_to_pfn(page));
  25.407 +    __shadow_remove_all_access(d, page_to_mfn(page));
  25.408  
  25.409      shadow_unlock(d);
  25.410  }
    26.1 --- a/xen/arch/x86/smpboot.c	Wed Feb 01 15:01:04 2006 +0000
    26.2 +++ b/xen/arch/x86/smpboot.c	Wed Feb 01 16:28:50 2006 +0100
    26.3 @@ -98,7 +98,7 @@ static int trampoline_exec;
    26.4  static unsigned long __init setup_trampoline(void)
    26.5  {
    26.6  	memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
    26.7 -	return virt_to_phys(trampoline_base);
    26.8 +	return virt_to_maddr(trampoline_base);
    26.9  }
   26.10  
   26.11  /*
   26.12 @@ -1038,7 +1038,7 @@ static void __init smp_boot_cpus(unsigne
   26.13  	 */
   26.14  	CMOS_WRITE(0, 0xf);
   26.15  
   26.16 -	*((volatile long *) phys_to_virt(0x467)) = 0;
   26.17 +	*((volatile long *) maddr_to_virt(0x467)) = 0;
   26.18  
   26.19  #ifdef BOGOMIPS
   26.20  	/*
    27.1 --- a/xen/arch/x86/x86_32/mm.c	Wed Feb 01 15:01:04 2006 +0000
    27.2 +++ b/xen/arch/x86/x86_32/mm.c	Wed Feb 01 16:28:50 2006 +0100
    27.3 @@ -34,11 +34,11 @@ unsigned int PAGE_HYPERVISOR_NOCACHE = _
    27.4  
    27.5  static unsigned long mpt_size;
    27.6  
    27.7 -struct pfn_info *alloc_xen_pagetable(void)
    27.8 +struct page_info *alloc_xen_pagetable(void)
    27.9  {
   27.10      extern int early_boot;
   27.11      extern unsigned long xenheap_phys_start;
   27.12 -    struct pfn_info *pg;
   27.13 +    struct page_info *pg;
   27.14  
   27.15      if ( !early_boot )
   27.16      {
   27.17 @@ -46,12 +46,12 @@ struct pfn_info *alloc_xen_pagetable(voi
   27.18          return ((v == NULL) ? NULL : virt_to_page(v));
   27.19      }
   27.20  
   27.21 -    pg = phys_to_page(xenheap_phys_start);
   27.22 +    pg = maddr_to_page(xenheap_phys_start);
   27.23      xenheap_phys_start += PAGE_SIZE;
   27.24      return pg;
   27.25  }
   27.26  
   27.27 -void free_xen_pagetable(struct pfn_info *pg)
   27.28 +void free_xen_pagetable(struct page_info *pg)
   27.29  {
   27.30      free_xenheap_page(page_to_virt(pg));
   27.31  }
   27.32 @@ -65,7 +65,7 @@ void __init paging_init(void)
   27.33  {
   27.34      void *ioremap_pt;
   27.35      unsigned long v;
   27.36 -    struct pfn_info *pg;
   27.37 +    struct page_info *pg;
   27.38      int i;
   27.39  
   27.40  #ifdef CONFIG_X86_PAE
   27.41 @@ -149,20 +149,20 @@ void subarch_init_memory(struct domain *
   27.42      unsigned int i, j;
   27.43  
   27.44      /*
   27.45 -     * We are rather picky about the layout of 'struct pfn_info'. The
   27.46 +     * We are rather picky about the layout of 'struct page_info'. The
   27.47       * count_info and domain fields must be adjacent, as we perform atomic
   27.48       * 64-bit operations on them. Also, just for sanity, we assert the size
   27.49       * of the structure here.
   27.50       */
   27.51 -    if ( (offsetof(struct pfn_info, u.inuse._domain) != 
   27.52 -          (offsetof(struct pfn_info, count_info) + sizeof(u32))) ||
   27.53 -         ((offsetof(struct pfn_info, count_info) & 7) != 0) ||
   27.54 -         (sizeof(struct pfn_info) != 24) )
   27.55 +    if ( (offsetof(struct page_info, u.inuse._domain) != 
   27.56 +          (offsetof(struct page_info, count_info) + sizeof(u32))) ||
   27.57 +         ((offsetof(struct page_info, count_info) & 7) != 0) ||
   27.58 +         (sizeof(struct page_info) != 24) )
   27.59      {
   27.60 -        printk("Weird pfn_info layout (%ld,%ld,%d)\n",
   27.61 -               offsetof(struct pfn_info, count_info),
   27.62 -               offsetof(struct pfn_info, u.inuse._domain),
   27.63 -               sizeof(struct pfn_info));
   27.64 +        printk("Weird page_info layout (%ld,%ld,%d)\n",
   27.65 +               offsetof(struct page_info, count_info),
   27.66 +               offsetof(struct page_info, u.inuse._domain),
   27.67 +               sizeof(struct page_info));
   27.68          BUG();
   27.69      }
   27.70  
   27.71 @@ -173,7 +173,7 @@ void subarch_init_memory(struct domain *
   27.72              idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i]);
   27.73          for ( j = 0; j < L2_PAGETABLE_ENTRIES; j++ )
   27.74          {
   27.75 -            struct pfn_info *page = pfn_to_page(m2p_start_mfn + j);
   27.76 +            struct page_info *page = mfn_to_page(m2p_start_mfn + j);
   27.77              page->count_info = PGC_allocated | 1;
   27.78              /* Ensure it's only mapped read-only by domains. */
   27.79              page->u.inuse.type_info = PGT_gdt_page | 1;
    28.1 --- a/xen/arch/x86/x86_64/mm.c	Wed Feb 01 15:01:04 2006 +0000
    28.2 +++ b/xen/arch/x86/x86_64/mm.c	Wed Feb 01 16:28:50 2006 +0100
    28.3 @@ -30,7 +30,7 @@
    28.4  #include <asm/msr.h>
    28.5  #include <public/memory.h>
    28.6  
    28.7 -struct pfn_info *alloc_xen_pagetable(void)
    28.8 +struct page_info *alloc_xen_pagetable(void)
    28.9  {
   28.10      extern int early_boot;
   28.11      unsigned long pfn;
   28.12 @@ -39,10 +39,10 @@ struct pfn_info *alloc_xen_pagetable(voi
   28.13          return alloc_domheap_page(NULL);
   28.14  
   28.15      pfn = alloc_boot_pages(1, 1);
   28.16 -    return ((pfn == 0) ? NULL : pfn_to_page(pfn));
   28.17 +    return ((pfn == 0) ? NULL : mfn_to_page(pfn));
   28.18  }
   28.19  
   28.20 -void free_xen_pagetable(struct pfn_info *pg)
   28.21 +void free_xen_pagetable(struct page_info *pg)
   28.22  {
   28.23      free_domheap_page(pg);
   28.24  }
   28.25 @@ -78,7 +78,7 @@ void __init paging_init(void)
   28.26      unsigned long i, mpt_size;
   28.27      l3_pgentry_t *l3_ro_mpt;
   28.28      l2_pgentry_t *l2_ro_mpt;
   28.29 -    struct pfn_info *pg;
   28.30 +    struct page_info *pg;
   28.31  
   28.32      idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
   28.33  
   28.34 @@ -106,7 +106,7 @@ void __init paging_init(void)
   28.35          if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL )
   28.36              panic("Not enough memory for m2p table\n");
   28.37          map_pages_to_xen(
   28.38 -            RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), page_to_pfn(pg), 
   28.39 +            RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), page_to_mfn(pg), 
   28.40              1UL << PAGETABLE_ORDER,
   28.41              PAGE_HYPERVISOR);
   28.42          memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), 0x55,
   28.43 @@ -140,19 +140,19 @@ void subarch_init_memory(struct domain *
   28.44      l2_pgentry_t l2e;
   28.45  
   28.46      /*
   28.47 -     * We are rather picky about the layout of 'struct pfn_info'. The
   28.48 +     * We are rather picky about the layout of 'struct page_info'. The
   28.49       * count_info and domain fields must be adjacent, as we perform atomic
   28.50       * 64-bit operations on them.
   28.51       */
   28.52 -    if ( ((offsetof(struct pfn_info, u.inuse._domain) != 
   28.53 -           (offsetof(struct pfn_info, count_info) + sizeof(u32)))) ||
   28.54 -         ((offsetof(struct pfn_info, count_info) & 7) != 0) ||
   28.55 -         (sizeof(struct pfn_info) != 40) )
   28.56 +    if ( ((offsetof(struct page_info, u.inuse._domain) != 
   28.57 +           (offsetof(struct page_info, count_info) + sizeof(u32)))) ||
   28.58 +         ((offsetof(struct page_info, count_info) & 7) != 0) ||
   28.59 +         (sizeof(struct page_info) != 40) )
   28.60      {
   28.61 -        printk("Weird pfn_info layout (%ld,%ld,%ld)\n",
   28.62 -               offsetof(struct pfn_info, count_info),
   28.63 -               offsetof(struct pfn_info, u.inuse._domain),
   28.64 -               sizeof(struct pfn_info));
   28.65 +        printk("Weird page_info layout (%ld,%ld,%ld)\n",
   28.66 +               offsetof(struct page_info, count_info),
   28.67 +               offsetof(struct page_info, u.inuse._domain),
   28.68 +               sizeof(struct page_info));
   28.69          for ( ; ; ) ;
   28.70      }
   28.71  
   28.72 @@ -172,7 +172,7 @@ void subarch_init_memory(struct domain *
   28.73  
   28.74          for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
   28.75          {
   28.76 -            struct pfn_info *page = pfn_to_page(m2p_start_mfn + i);
   28.77 +            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
   28.78              page->count_info = PGC_allocated | 1;
   28.79              /* gdt to make sure it's only mapped read-only by non-privileged
   28.80                 domains. */
    29.1 --- a/xen/common/grant_table.c	Wed Feb 01 15:01:04 2006 +0000
    29.2 +++ b/xen/common/grant_table.c	Wed Feb 01 16:28:50 2006 +0100
    29.3 @@ -237,7 +237,7 @@ static int
    29.4          if ( !act->pin )
    29.5          {
    29.6              act->domid = sdom;
    29.7 -            act->frame = __gpfn_to_mfn(rd, sha->frame);
    29.8 +            act->frame = gmfn_to_mfn(rd, sha->frame);
    29.9          }
   29.10      }
   29.11      else if ( (act->pin & 0x80808080U) != 0 )
   29.12 @@ -254,10 +254,10 @@ static int
   29.13      spin_unlock(&rd->grant_table->lock);
   29.14  
   29.15      frame = act->frame;
   29.16 -    if ( unlikely(!pfn_valid(frame)) ||
   29.17 +    if ( unlikely(!mfn_valid(frame)) ||
   29.18           unlikely(!((dev_hst_ro_flags & GNTMAP_readonly) ?
   29.19 -                    get_page(pfn_to_page(frame), rd) :
   29.20 -                    get_page_and_type(pfn_to_page(frame), rd,
   29.21 +                    get_page(mfn_to_page(frame), rd) :
   29.22 +                    get_page_and_type(mfn_to_page(frame), rd,
   29.23                                        PGT_writable_page))) )
   29.24          PIN_FAIL(undo_out, GNTST_general_error,
   29.25                   "Could not pin the granted frame (%lx)!\n", frame);
   29.26 @@ -268,16 +268,16 @@ static int
   29.27          if ( rc != GNTST_okay )
   29.28          {
   29.29              if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
   29.30 -                put_page_type(pfn_to_page(frame));
   29.31 -            put_page(pfn_to_page(frame));
   29.32 +                put_page_type(mfn_to_page(frame));
   29.33 +            put_page(mfn_to_page(frame));
   29.34              goto undo_out;
   29.35          }
   29.36  
   29.37          if ( dev_hst_ro_flags & GNTMAP_device_map )
   29.38          {
   29.39 -            (void)get_page(pfn_to_page(frame), rd);
   29.40 +            (void)get_page(mfn_to_page(frame), rd);
   29.41              if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
   29.42 -                get_page_type(pfn_to_page(frame), PGT_writable_page);
   29.43 +                get_page_type(mfn_to_page(frame), PGT_writable_page);
   29.44          }
   29.45      }
   29.46  
   29.47 @@ -407,12 +407,12 @@ static int
   29.48              if ( flags & GNTMAP_readonly )
   29.49              {
   29.50                  act->pin -= GNTPIN_devr_inc;
   29.51 -                put_page(pfn_to_page(frame));
   29.52 +                put_page(mfn_to_page(frame));
   29.53              }
   29.54              else
   29.55              {
   29.56                  act->pin -= GNTPIN_devw_inc;
   29.57 -                put_page_and_type(pfn_to_page(frame));
   29.58 +                put_page_and_type(mfn_to_page(frame));
   29.59              }
   29.60          }
   29.61      }
   29.62 @@ -427,12 +427,12 @@ static int
   29.63          if ( flags & GNTMAP_readonly )
   29.64          {
   29.65              act->pin -= GNTPIN_hstr_inc;
   29.66 -            put_page(pfn_to_page(frame));
   29.67 +            put_page(mfn_to_page(frame));
   29.68          }
   29.69          else
   29.70          {
   29.71              act->pin -= GNTPIN_hstw_inc;
   29.72 -            put_page_and_type(pfn_to_page(frame));
   29.73 +            put_page_and_type(mfn_to_page(frame));
   29.74          }
   29.75      }
   29.76  
   29.77 @@ -481,7 +481,7 @@ gnttab_setup_table(
   29.78      gnttab_setup_table_t  op;
   29.79      struct domain        *d;
   29.80      int                   i;
   29.81 -    unsigned long         gpfn;
   29.82 +    unsigned long         gmfn;
   29.83  
   29.84      if ( count != 1 )
   29.85          return -EINVAL;
   29.86 @@ -523,8 +523,8 @@ gnttab_setup_table(
   29.87          (void)put_user(GNTST_okay, &uop->status);
   29.88          for ( i = 0; i < op.nr_frames; i++ )
   29.89          {
   29.90 -            gpfn = gnttab_shared_gpfn(d, d->grant_table, i);
   29.91 -            (void)put_user(gpfn, &op.frame_list[i]);
   29.92 +            gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
   29.93 +            (void)put_user(gmfn, &op.frame_list[i]);
   29.94          }
   29.95      }
   29.96  
   29.97 @@ -568,7 +568,7 @@ gnttab_dump_table(
   29.98      gt = d->grant_table;
   29.99      (void)put_user(GNTST_okay, &uop->status);
  29.100  
  29.101 -    shared_mfn = virt_to_phys(d->grant_table->shared);
  29.102 +    shared_mfn = virt_to_maddr(d->grant_table->shared);
  29.103  
  29.104      DPRINTK("Grant table for dom (%hu) MFN (%x)\n",
  29.105              op.dom, shared_mfn);
  29.106 @@ -706,7 +706,7 @@ gnttab_transfer(
  29.107  {
  29.108      struct domain *d = current->domain;
  29.109      struct domain *e;
  29.110 -    struct pfn_info *page;
  29.111 +    struct page_info *page;
  29.112      int i;
  29.113      grant_entry_t *sha;
  29.114      gnttab_transfer_t gop;
  29.115 @@ -723,7 +723,7 @@ gnttab_transfer(
  29.116          }
  29.117  
  29.118          /* Check the passed page frame for basic validity. */
  29.119 -        if ( unlikely(!pfn_valid(gop.mfn)) )
  29.120 +        if ( unlikely(!mfn_valid(gop.mfn)) )
  29.121          { 
  29.122              DPRINTK("gnttab_transfer: out-of-range %lx\n",
  29.123                      (unsigned long)gop.mfn);
  29.124 @@ -731,8 +731,8 @@ gnttab_transfer(
  29.125              continue;
  29.126          }
  29.127  
  29.128 -        mfn = __gpfn_to_mfn(d, gop.mfn);
  29.129 -        page = pfn_to_page(mfn);
  29.130 +        mfn = gmfn_to_mfn(d, gop.mfn);
  29.131 +        page = mfn_to_page(mfn);
  29.132          if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
  29.133          { 
  29.134              DPRINTK("gnttab_transfer: xen frame %lx\n",
  29.135 @@ -895,7 +895,7 @@ grant_table_create(
  29.136      memset(t->shared, 0, NR_GRANT_FRAMES * PAGE_SIZE);
  29.137  
  29.138      for ( i = 0; i < NR_GRANT_FRAMES; i++ )
  29.139 -        gnttab_create_shared_mfn(d, t, i);
  29.140 +        gnttab_create_shared_page(d, t, i);
  29.141  
  29.142      /* Okay, install the structure. */
  29.143      wmb(); /* avoid races with lock-free access to d->grant_table */
  29.144 @@ -952,7 +952,7 @@ gnttab_release_mappings(
  29.145              {
  29.146                  BUG_ON(!(act->pin & GNTPIN_devr_mask));
  29.147                  act->pin -= GNTPIN_devr_inc;
  29.148 -                put_page(pfn_to_page(act->frame));
  29.149 +                put_page(mfn_to_page(act->frame));
  29.150              }
  29.151  
  29.152              if ( map->ref_and_flags & GNTMAP_host_map )
  29.153 @@ -960,7 +960,7 @@ gnttab_release_mappings(
  29.154                  BUG_ON(!(act->pin & GNTPIN_hstr_mask));
  29.155                  act->pin -= GNTPIN_hstr_inc;
  29.156                  /* Done implicitly when page tables are destroyed. */
  29.157 -                /* put_page(pfn_to_page(act->frame)); */
  29.158 +                /* put_page(mfn_to_page(act->frame)); */
  29.159              }
  29.160          }
  29.161          else
  29.162 @@ -969,7 +969,7 @@ gnttab_release_mappings(
  29.163              {
  29.164                  BUG_ON(!(act->pin & GNTPIN_devw_mask));
  29.165                  act->pin -= GNTPIN_devw_inc;
  29.166 -                put_page_and_type(pfn_to_page(act->frame));
  29.167 +                put_page_and_type(mfn_to_page(act->frame));
  29.168              }
  29.169  
  29.170              if ( map->ref_and_flags & GNTMAP_host_map )
  29.171 @@ -977,7 +977,7 @@ gnttab_release_mappings(
  29.172                  BUG_ON(!(act->pin & GNTPIN_hstw_mask));
  29.173                  act->pin -= GNTPIN_hstw_inc;
  29.174                  /* Done implicitly when page tables are destroyed. */
  29.175 -                /* put_page_and_type(pfn_to_page(act->frame)); */
  29.176 +                /* put_page_and_type(mfn_to_page(act->frame)); */
  29.177              }
  29.178  
  29.179              if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
    30.1 --- a/xen/common/memory.c	Wed Feb 01 15:01:04 2006 +0000
    30.2 +++ b/xen/common/memory.c	Wed Feb 01 16:28:50 2006 +0100
    30.3 @@ -29,7 +29,7 @@ increase_reservation(
    30.4      unsigned int   flags,
    30.5      int           *preempted)
    30.6  {
    30.7 -    struct pfn_info *page;
    30.8 +    struct page_info *page;
    30.9      unsigned long    i;
   30.10  
   30.11      if ( (extent_list != NULL) &&
   30.12 @@ -59,7 +59,7 @@ increase_reservation(
   30.13  
   30.14          /* Inform the domain of the new page's machine address. */ 
   30.15          if ( (extent_list != NULL) &&
   30.16 -             (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
   30.17 +             (__put_user(page_to_mfn(page), &extent_list[i]) != 0) )
   30.18              return i;
   30.19      }
   30.20  
   30.21 @@ -75,7 +75,7 @@ populate_physmap(
   30.22      unsigned int   flags,
   30.23      int           *preempted)
   30.24  {
   30.25 -    struct pfn_info *page;
   30.26 +    struct page_info *page;
   30.27      unsigned long    i, j, pfn, mfn;
   30.28  
   30.29      if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
   30.30 @@ -102,7 +102,7 @@ populate_physmap(
   30.31              goto out;
   30.32          }
   30.33  
   30.34 -        mfn = page_to_pfn(page);
   30.35 +        mfn = page_to_mfn(page);
   30.36  
   30.37          if ( unlikely(__get_user(pfn, &extent_list[i]) != 0) )
   30.38              goto out;
   30.39 @@ -136,8 +136,8 @@ decrease_reservation(
   30.40      unsigned int   flags,
   30.41      int           *preempted)
   30.42  {
   30.43 -    struct pfn_info *page;
   30.44 -    unsigned long    i, j, gpfn, mfn;
   30.45 +    struct page_info *page;
   30.46 +    unsigned long    i, j, gmfn, mfn;
   30.47  
   30.48      if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
   30.49          return 0;
   30.50 @@ -150,12 +150,12 @@ decrease_reservation(
   30.51              return i;
   30.52          }
   30.53  
   30.54 -        if ( unlikely(__get_user(gpfn, &extent_list[i]) != 0) )
   30.55 +        if ( unlikely(__get_user(gmfn, &extent_list[i]) != 0) )
   30.56              return i;
   30.57  
   30.58          for ( j = 0; j < (1 << extent_order); j++ )
   30.59          {
   30.60 -            mfn = __gpfn_to_mfn(d, gpfn + j);
   30.61 +            mfn = gmfn_to_mfn(d, gmfn + j);
   30.62              if ( unlikely(mfn >= max_page) )
   30.63              {
   30.64                  DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
   30.65 @@ -163,7 +163,7 @@ decrease_reservation(
   30.66                  return i;
   30.67              }
   30.68              
   30.69 -            page = pfn_to_page(mfn);
   30.70 +            page = mfn_to_page(mfn);
   30.71              if ( unlikely(!get_page(page, d)) )
   30.72              {
   30.73                  DPRINTK("Bad page free for domain %u\n", d->domain_id);
   30.74 @@ -176,7 +176,7 @@ decrease_reservation(
   30.75              if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
   30.76                  put_page(page);
   30.77  
   30.78 -            guest_physmap_remove_page(d, gpfn + j, mfn);
   30.79 +            guest_physmap_remove_page(d, gmfn + j, mfn);
   30.80  
   30.81              put_page(page);
   30.82          }
    31.1 --- a/xen/common/page_alloc.c	Wed Feb 01 15:01:04 2006 +0000
    31.2 +++ b/xen/common/page_alloc.c	Wed Feb 01 16:28:50 2006 +0100
    31.3 @@ -132,7 +132,7 @@ static void map_free(unsigned long first
    31.4   */
    31.5  
    31.6  /* Initialise allocator to handle up to @max_page pages. */
    31.7 -physaddr_t init_boot_allocator(physaddr_t bitmap_start)
    31.8 +paddr_t init_boot_allocator(paddr_t bitmap_start)
    31.9  {
   31.10      unsigned long bitmap_size;
   31.11  
   31.12 @@ -145,7 +145,7 @@ physaddr_t init_boot_allocator(physaddr_
   31.13      bitmap_size  = max_page / 8;
   31.14      bitmap_size += sizeof(unsigned long);
   31.15      bitmap_size  = round_pgup(bitmap_size);
   31.16 -    alloc_bitmap = (unsigned long *)phys_to_virt(bitmap_start);
   31.17 +    alloc_bitmap = (unsigned long *)maddr_to_virt(bitmap_start);
   31.18  
   31.19      /* All allocated by default. */
   31.20      memset(alloc_bitmap, ~0, bitmap_size);
   31.21 @@ -153,7 +153,7 @@ physaddr_t init_boot_allocator(physaddr_
   31.22      return bitmap_start + bitmap_size;
   31.23  }
   31.24  
   31.25 -void init_boot_pages(physaddr_t ps, physaddr_t pe)
   31.26 +void init_boot_pages(paddr_t ps, paddr_t pe)
   31.27  {
   31.28      unsigned long bad_pfn;
   31.29      char *p;
   31.30 @@ -245,13 +245,13 @@ void end_boot_allocator(void)
   31.31          if ( next_free )
   31.32              map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
   31.33          if ( curr_free )
   31.34 -            free_heap_pages(pfn_dom_zone_type(i), pfn_to_page(i), 0);
   31.35 +            free_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 0);
   31.36      }
   31.37  }
   31.38  
   31.39  /* Hand the specified arbitrary page range to the specified heap zone. */
   31.40  void init_heap_pages(
   31.41 -    unsigned int zone, struct pfn_info *pg, unsigned long nr_pages)
   31.42 +    unsigned int zone, struct page_info *pg, unsigned long nr_pages)
   31.43  {
   31.44      unsigned long i;
   31.45  
   31.46 @@ -263,10 +263,10 @@ void init_heap_pages(
   31.47  
   31.48  
   31.49  /* Allocate 2^@order contiguous pages. */
   31.50 -struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order)
   31.51 +struct page_info *alloc_heap_pages(unsigned int zone, unsigned int order)
   31.52  {
   31.53      int i;
   31.54 -    struct pfn_info *pg;
   31.55 +    struct page_info *pg;
   31.56  
   31.57      ASSERT(zone < NR_ZONES);
   31.58  
   31.59 @@ -285,7 +285,7 @@ struct pfn_info *alloc_heap_pages(unsign
   31.60      return NULL;
   31.61  
   31.62   found: 
   31.63 -    pg = list_entry(heap[zone][i].next, struct pfn_info, list);
   31.64 +    pg = list_entry(heap[zone][i].next, struct page_info, list);
   31.65      list_del(&pg->list);
   31.66  
   31.67      /* We may have to halve the chunk a number of times. */
   31.68 @@ -296,7 +296,7 @@ struct pfn_info *alloc_heap_pages(unsign
   31.69          pg += 1 << i;
   31.70      }
   31.71      
   31.72 -    map_alloc(page_to_pfn(pg), 1 << order);
   31.73 +    map_alloc(page_to_mfn(pg), 1 << order);
   31.74      avail[zone] -= 1 << order;
   31.75  
   31.76      spin_unlock(&heap_lock);
   31.77 @@ -307,7 +307,7 @@ struct pfn_info *alloc_heap_pages(unsign
   31.78  
   31.79  /* Free 2^@order set of pages. */
   31.80  void free_heap_pages(
   31.81 -    unsigned int zone, struct pfn_info *pg, unsigned int order)
   31.82 +    unsigned int zone, struct page_info *pg, unsigned int order)
   31.83  {
   31.84      unsigned long mask;
   31.85  
   31.86 @@ -316,7 +316,7 @@ void free_heap_pages(
   31.87  
   31.88      spin_lock(&heap_lock);
   31.89  
   31.90 -    map_free(page_to_pfn(pg), 1 << order);
   31.91 +    map_free(page_to_mfn(pg), 1 << order);
   31.92      avail[zone] += 1 << order;
   31.93      
   31.94      /* Merge chunks as far as possible. */
   31.95 @@ -324,10 +324,10 @@ void free_heap_pages(
   31.96      {
   31.97          mask = 1 << order;
   31.98  
   31.99 -        if ( (page_to_pfn(pg) & mask) )
  31.100 +        if ( (page_to_mfn(pg) & mask) )
  31.101          {
  31.102              /* Merge with predecessor block? */
  31.103 -            if ( allocated_in_map(page_to_pfn(pg)-mask) ||
  31.104 +            if ( allocated_in_map(page_to_mfn(pg)-mask) ||
  31.105                   (PFN_ORDER(pg-mask) != order) )
  31.106                  break;
  31.107              list_del(&(pg-mask)->list);
  31.108 @@ -336,7 +336,7 @@ void free_heap_pages(
  31.109          else
  31.110          {
  31.111              /* Merge with successor block? */
  31.112 -            if ( allocated_in_map(page_to_pfn(pg)+mask) ||
  31.113 +            if ( allocated_in_map(page_to_mfn(pg)+mask) ||
  31.114                   (PFN_ORDER(pg+mask) != order) )
  31.115                  break;
  31.116              list_del(&(pg+mask)->list);
  31.117 @@ -383,9 +383,9 @@ void scrub_heap_pages(void)
  31.118          /* Re-check page status with lock held. */
  31.119          if ( !allocated_in_map(pfn) )
  31.120          {
  31.121 -            if ( IS_XEN_HEAP_FRAME(pfn_to_page(pfn)) )
  31.122 +            if ( IS_XEN_HEAP_FRAME(mfn_to_page(pfn)) )
  31.123              {
  31.124 -                p = page_to_virt(pfn_to_page(pfn));
  31.125 +                p = page_to_virt(mfn_to_page(pfn));
  31.126                  memguard_unguard_range(p, PAGE_SIZE);
  31.127                  clear_page(p);
  31.128                  memguard_guard_range(p, PAGE_SIZE);
  31.129 @@ -410,7 +410,7 @@ void scrub_heap_pages(void)
  31.130   * XEN-HEAP SUB-ALLOCATOR
  31.131   */
  31.132  
  31.133 -void init_xenheap_pages(physaddr_t ps, physaddr_t pe)
  31.134 +void init_xenheap_pages(paddr_t ps, paddr_t pe)
  31.135  {
  31.136      unsigned long flags;
  31.137  
  31.138 @@ -419,17 +419,17 @@ void init_xenheap_pages(physaddr_t ps, p
  31.139      if ( pe <= ps )
  31.140          return;
  31.141  
  31.142 -    memguard_guard_range(phys_to_virt(ps), pe - ps);
  31.143 +    memguard_guard_range(maddr_to_virt(ps), pe - ps);
  31.144  
  31.145      /*
  31.146       * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
  31.147       * prevent merging of power-of-two blocks across the zone boundary.
  31.148       */
  31.149 -    if ( !IS_XEN_HEAP_FRAME(phys_to_page(pe)) )
  31.150 +    if ( !IS_XEN_HEAP_FRAME(maddr_to_page(pe)) )
  31.151          pe -= PAGE_SIZE;
  31.152  
  31.153      local_irq_save(flags);
  31.154 -    init_heap_pages(MEMZONE_XEN, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT);
  31.155 +    init_heap_pages(MEMZONE_XEN, maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
  31.156      local_irq_restore(flags);
  31.157  }
  31.158  
  31.159 @@ -437,7 +437,7 @@ void init_xenheap_pages(physaddr_t ps, p
  31.160  void *alloc_xenheap_pages(unsigned int order)
  31.161  {
  31.162      unsigned long flags;
  31.163 -    struct pfn_info *pg;
  31.164 +    struct page_info *pg;
  31.165      int i;
  31.166  
  31.167      local_irq_save(flags);
  31.168 @@ -484,7 +484,7 @@ void free_xenheap_pages(void *v, unsigne
  31.169   * DOMAIN-HEAP SUB-ALLOCATOR
  31.170   */
  31.171  
  31.172 -void init_domheap_pages(physaddr_t ps, physaddr_t pe)
  31.173 +void init_domheap_pages(paddr_t ps, paddr_t pe)
  31.174  {
  31.175      unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm;
  31.176  
  31.177 @@ -496,19 +496,19 @@ void init_domheap_pages(physaddr_t ps, p
  31.178      s_dma = min(s_tot, MAX_DMADOM_PFN + 1);
  31.179      e_dma = min(e_tot, MAX_DMADOM_PFN + 1);
  31.180      if ( s_dma < e_dma )
  31.181 -        init_heap_pages(MEMZONE_DMADOM, pfn_to_page(s_dma), e_dma - s_dma);
  31.182 +        init_heap_pages(MEMZONE_DMADOM, mfn_to_page(s_dma), e_dma - s_dma);
  31.183  
  31.184      s_nrm = max(s_tot, MAX_DMADOM_PFN + 1);
  31.185      e_nrm = max(e_tot, MAX_DMADOM_PFN + 1);
  31.186      if ( s_nrm < e_nrm )
  31.187 -        init_heap_pages(MEMZONE_DOM, pfn_to_page(s_nrm), e_nrm - s_nrm);
  31.188 +        init_heap_pages(MEMZONE_DOM, mfn_to_page(s_nrm), e_nrm - s_nrm);
  31.189  }
  31.190  
  31.191  
  31.192 -struct pfn_info *alloc_domheap_pages(
  31.193 +struct page_info *alloc_domheap_pages(
  31.194      struct domain *d, unsigned int order, unsigned int flags)
  31.195  {
  31.196 -    struct pfn_info *pg = NULL;
  31.197 +    struct page_info *pg = NULL;
  31.198      cpumask_t mask;
  31.199      int i;
  31.200  
  31.201 @@ -560,7 +560,7 @@ struct pfn_info *alloc_domheap_pages(
  31.202          DPRINTK("...or the domain is dying (%d)\n", 
  31.203                  !!test_bit(_DOMF_dying, &d->domain_flags));
  31.204          spin_unlock(&d->page_alloc_lock);
  31.205 -        free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
  31.206 +        free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
  31.207          return NULL;
  31.208      }
  31.209  
  31.210 @@ -583,7 +583,7 @@ struct pfn_info *alloc_domheap_pages(
  31.211  }
  31.212  
  31.213  
  31.214 -void free_domheap_pages(struct pfn_info *pg, unsigned int order)
  31.215 +void free_domheap_pages(struct page_info *pg, unsigned int order)
  31.216  {
  31.217      int            i, drop_dom_ref;
  31.218      struct domain *d = page_get_owner(pg);
  31.219 @@ -624,7 +624,7 @@ void free_domheap_pages(struct pfn_info 
  31.220  
  31.221          if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) )
  31.222          {
  31.223 -            free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
  31.224 +            free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
  31.225          }
  31.226          else
  31.227          {
  31.228 @@ -646,7 +646,7 @@ void free_domheap_pages(struct pfn_info 
  31.229          /* Freeing anonymous domain-heap pages. */
  31.230          for ( i = 0; i < (1 << order); i++ )
  31.231              pg[i].u.free.cpumask = CPU_MASK_NONE;
  31.232 -        free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
  31.233 +        free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
  31.234          drop_dom_ref = 0;
  31.235      }
  31.236  
  31.237 @@ -669,7 +669,7 @@ unsigned long avail_domheap_pages(void)
  31.238  static void page_scrub_softirq(void)
  31.239  {
  31.240      struct list_head *ent;
  31.241 -    struct pfn_info  *pg;
  31.242 +    struct page_info  *pg;
  31.243      void             *p;
  31.244      int               i;
  31.245      s_time_t          start = NOW();
  31.246 @@ -701,12 +701,12 @@ static void page_scrub_softirq(void)
  31.247          /* Working backwards, scrub each page in turn. */
  31.248          while ( ent != &page_scrub_list )
  31.249          {
  31.250 -            pg = list_entry(ent, struct pfn_info, list);
  31.251 +            pg = list_entry(ent, struct page_info, list);
  31.252              ent = ent->prev;
  31.253 -            p = map_domain_page(page_to_pfn(pg));
  31.254 +            p = map_domain_page(page_to_mfn(pg));
  31.255              clear_page(p);
  31.256              unmap_domain_page(p);
  31.257 -            free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, 0);
  31.258 +            free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0);
  31.259          }
  31.260      } while ( (NOW() - start) < MILLISECS(1) );
  31.261  }
    32.1 --- a/xen/common/xmalloc.c	Wed Feb 01 15:01:04 2006 +0000
    32.2 +++ b/xen/common/xmalloc.c	Wed Feb 01 16:28:50 2006 +0100
    32.3 @@ -21,8 +21,8 @@
    32.4  
    32.5  /*
    32.6   * TODO (Keir, 17/2/05):
    32.7 - *  1. Use space in pfn_info to avoid xmalloc_hdr in allocated blocks.
    32.8 - *  2. pfn_info points into free list to make xfree() O(1) complexity.
    32.9 + *  1. Use space in page_info to avoid xmalloc_hdr in allocated blocks.
   32.10 + *  2. page_info points into free list to make xfree() O(1) complexity.
   32.11   *  3. Perhaps make this a sub-page buddy allocator? xmalloc() == O(1).
   32.12   *     (Disadvantage is potentially greater internal fragmentation).
   32.13   */
    33.1 --- a/xen/include/asm-ia64/config.h	Wed Feb 01 15:01:04 2006 +0000
    33.2 +++ b/xen/include/asm-ia64/config.h	Wed Feb 01 16:28:50 2006 +0100
    33.3 @@ -49,7 +49,7 @@
    33.4  typedef int pid_t;
    33.5  
    33.6  // now needed for xen/include/mm.h
    33.7 -typedef unsigned long physaddr_t;
    33.8 +typedef unsigned long paddr_t;
    33.9  // from include/linux/kernel.h
   33.10  #define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
   33.11  
   33.12 @@ -212,8 +212,8 @@ void sort_main_extable(void);
   33.13  #define _atomic_read(v) ((v).counter)
   33.14  #define atomic_compareandswap(old, new, v) ((atomic_t){ cmpxchg(v, _atomic_read(old), _atomic_read(new)) })
   33.15  
   33.16 -// see include/asm-ia64/mm.h, handle remaining pfn_info uses until gone
   33.17 -#define pfn_info page
   33.18 +// see include/asm-ia64/mm.h, handle remaining page_info uses until gone
   33.19 +#define page_info page
   33.20  
   33.21  // see common/memory.c
   33.22  #define set_pfn_from_mfn(x,y)	do { } while (0)
    34.1 --- a/xen/include/asm-ia64/grant_table.h	Wed Feb 01 15:01:04 2006 +0000
    34.2 +++ b/xen/include/asm-ia64/grant_table.h	Wed Feb 01 16:28:50 2006 +0100
    34.3 @@ -12,12 +12,12 @@
    34.4  
    34.5  #define steal_page_for_grant_transfer(d, p)  0
    34.6  
    34.7 -#define gnttab_create_shared_mfn(d, t, i) ((void)0)
    34.8 +#define gnttab_create_shared_page(d, t, i) ((void)0)
    34.9  
   34.10 -#define gnttab_shared_gpfn(d, t, i)                                     \
   34.11 +#define gnttab_shared_gmfn(d, t, i)                                     \
   34.12      ( ((d) == dom0) ?                                                   \
   34.13 -      ((virt_to_phys((t)->shared) >> PAGE_SHIFT) + (i)) :               \
   34.14 -      (map_domain_page((d), 1UL<<40, virt_to_phys((t)->shared)),        \
   34.15 +      ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i)) :              \
   34.16 +      (map_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)),       \
   34.17         1UL << (40 - PAGE_SHIFT))                                        \
   34.18      )
   34.19  
    35.1 --- a/xen/include/asm-ia64/linux-xen/asm/io.h	Wed Feb 01 15:01:04 2006 +0000
    35.2 +++ b/xen/include/asm-ia64/linux-xen/asm/io.h	Wed Feb 01 16:28:50 2006 +0100
    35.3 @@ -80,13 +80,13 @@ extern unsigned int num_io_spaces;
    35.4   * Change virtual addresses to physical addresses and vv.
    35.5   */
    35.6  static inline unsigned long
    35.7 -virt_to_phys (volatile void *address)
    35.8 +virt_to_maddr (volatile void *address)
    35.9  {
   35.10  	return (unsigned long) address - PAGE_OFFSET;
   35.11  }
   35.12  
   35.13  static inline void*
   35.14 -phys_to_virt (unsigned long address)
   35.15 +maddr_to_virt (unsigned long address)
   35.16  {
   35.17  	return (void *) (address + PAGE_OFFSET);
   35.18  }
   35.19 @@ -98,9 +98,9 @@ extern int valid_phys_addr_range (unsign
   35.20   * The following two macros are deprecated and scheduled for removal.
   35.21   * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
   35.22   */
   35.23 -#define bus_to_virt	phys_to_virt
   35.24 -#define virt_to_bus	virt_to_phys
   35.25 -#define page_to_bus	page_to_phys
   35.26 +#define bus_to_virt	maddr_to_virt
   35.27 +#define virt_to_bus	virt_to_maddr
   35.28 +#define page_to_bus	page_to_maddr
   35.29  
   35.30  # endif /* KERNEL */
   35.31  
    36.1 --- a/xen/include/asm-ia64/linux-xen/asm/page.h	Wed Feb 01 15:01:04 2006 +0000
    36.2 +++ b/xen/include/asm-ia64/linux-xen/asm/page.h	Wed Feb 01 16:28:50 2006 +0100
    36.3 @@ -86,28 +86,28 @@ do {						\
    36.4  
    36.5  #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
    36.6  
    36.7 -#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
    36.8 +#define virt_addr_valid(kaddr)	mfn_valid(__pa(kaddr) >> PAGE_SHIFT)
    36.9  
   36.10  #ifdef CONFIG_VIRTUAL_MEM_MAP
   36.11 -extern int ia64_pfn_valid (unsigned long pfn);
   36.12 +extern int ia64_mfn_valid (unsigned long pfn);
   36.13  #else
   36.14 -# define ia64_pfn_valid(pfn) 1
   36.15 +# define ia64_mfn_valid(pfn) 1
   36.16  #endif
   36.17  
   36.18  #ifndef CONFIG_DISCONTIGMEM
   36.19 -# define pfn_valid(pfn)		(((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
   36.20 -# define page_to_pfn(page)	((unsigned long) (page - mem_map))
   36.21 -# define pfn_to_page(pfn)	(mem_map + (pfn))
   36.22 +# define mfn_valid(pfn)		(((pfn) < max_mapnr) && ia64_mfn_valid(pfn))
   36.23 +# define page_to_mfn(page)	((unsigned long) (page - mem_map))
   36.24 +# define mfn_to_page(pfn)	(mem_map + (pfn))
   36.25  #else
   36.26  extern struct page *vmem_map;
   36.27  extern unsigned long max_low_pfn;
   36.28 -# define pfn_valid(pfn)		(((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
   36.29 -# define page_to_pfn(page)	((unsigned long) (page - vmem_map))
   36.30 -# define pfn_to_page(pfn)	(vmem_map + (pfn))
   36.31 +# define mfn_valid(pfn)		(((pfn) < max_low_pfn) && ia64_mfn_valid(pfn))
   36.32 +# define page_to_mfn(page)	((unsigned long) (page - vmem_map))
   36.33 +# define mfn_to_page(pfn)	(vmem_map + (pfn))
   36.34  #endif
   36.35  
   36.36 -#define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
   36.37 -#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
   36.38 +#define page_to_maddr(page)	(page_to_mfn(page) << PAGE_SHIFT)
   36.39 +#define virt_to_page(kaddr)	mfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
   36.40  
   36.41  typedef union ia64_va {
   36.42  	struct {
    37.1 --- a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h	Wed Feb 01 15:01:04 2006 +0000
    37.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h	Wed Feb 01 16:28:50 2006 +0100
    37.3 @@ -109,7 +109,7 @@ static inline void pmd_free(pmd_t * pmd)
    37.4  static inline void
    37.5  pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
    37.6  {
    37.7 -	pmd_val(*pmd_entry) = page_to_phys(pte);
    37.8 +	pmd_val(*pmd_entry) = page_to_maddr(pte);
    37.9  }
   37.10  
   37.11  static inline void
    38.1 --- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h	Wed Feb 01 15:01:04 2006 +0000
    38.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h	Wed Feb 01 16:28:50 2006 +0100
    38.3 @@ -235,7 +235,7 @@ ia64_phys_addr_valid (unsigned long addr
    38.4  /* Extract pfn from pte.  */
    38.5  #define pte_pfn(_pte)		((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
    38.6  
    38.7 -#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
    38.8 +#define mk_pte(page, pgprot)	pfn_pte(page_to_mfn(page), (pgprot))
    38.9  
   38.10  /* This takes a physical page address that is used by the remapping functions */
   38.11  #define mk_pte_phys(physpage, pgprot) \
    39.1 --- a/xen/include/asm-ia64/linux-xen/asm/uaccess.h	Wed Feb 01 15:01:04 2006 +0000
    39.2 +++ b/xen/include/asm-ia64/linux-xen/asm/uaccess.h	Wed Feb 01 16:28:50 2006 +0100
    39.3 @@ -384,7 +384,7 @@ xlate_dev_mem_ptr (unsigned long p)
    39.4  	struct page *page;
    39.5  	char * ptr;
    39.6  
    39.7 -	page = pfn_to_page(p >> PAGE_SHIFT);
    39.8 +	page = mfn_to_page(p >> PAGE_SHIFT);
    39.9  	if (PageUncached(page))
   39.10  		ptr = (char *)p + __IA64_UNCACHED_OFFSET;
   39.11  	else
    40.1 --- a/xen/include/asm-ia64/linux/mmzone.h	Wed Feb 01 15:01:04 2006 +0000
    40.2 +++ b/xen/include/asm-ia64/linux/mmzone.h	Wed Feb 01 16:28:50 2006 +0100
    40.3 @@ -289,7 +289,7 @@ typedef struct pglist_data {
    40.4  #ifdef CONFIG_FLAT_NODE_MEM_MAP
    40.5  #define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
    40.6  #else
    40.7 -#define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
    40.8 +#define pgdat_page_nr(pgdat, pagenr)	mfn_to_page((pgdat)->node_start_pfn + (pagenr))
    40.9  #endif
   40.10  #define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
   40.11  
   40.12 @@ -536,18 +536,18 @@ static inline struct mem_section *__pfn_
   40.13  	return __nr_to_section(pfn_to_section_nr(pfn));
   40.14  }
   40.15  
   40.16 -#define pfn_to_page(pfn) 						\
   40.17 +#define mfn_to_page(pfn) 						\
   40.18  ({ 									\
   40.19  	unsigned long __pfn = (pfn);					\
   40.20  	__section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn;	\
   40.21  })
   40.22 -#define page_to_pfn(page)						\
   40.23 +#define page_to_mfn(page)						\
   40.24  ({									\
   40.25  	page - __section_mem_map_addr(__nr_to_section(			\
   40.26  		page_to_section(page)));				\
   40.27  })
   40.28  
   40.29 -static inline int pfn_valid(unsigned long pfn)
   40.30 +static inline int mfn_valid(unsigned long pfn)
   40.31  {
   40.32  	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
   40.33  		return 0;
   40.34 @@ -568,7 +568,7 @@ static inline int pfn_valid(unsigned lon
   40.35  	NODE_DATA(pfn_to_nid(pfn));					\
   40.36  })
   40.37  
   40.38 -#define early_pfn_valid(pfn)	pfn_valid(pfn)
   40.39 +#define early_mfn_valid(pfn)	mfn_valid(pfn)
   40.40  void sparse_init(void);
   40.41  #else
   40.42  #define sparse_init()	do {} while (0)
   40.43 @@ -580,8 +580,8 @@ void sparse_init(void);
   40.44  #define early_pfn_in_nid(pfn, nid)	(1)
   40.45  #endif
   40.46  
   40.47 -#ifndef early_pfn_valid
   40.48 -#define early_pfn_valid(pfn)	(1)
   40.49 +#ifndef early_mfn_valid
   40.50 +#define early_mfn_valid(pfn)	(1)
   40.51  #endif
   40.52  
   40.53  void memory_present(int nid, unsigned long start, unsigned long end);
    41.1 --- a/xen/include/asm-ia64/mm.h	Wed Feb 01 15:01:04 2006 +0000
    41.2 +++ b/xen/include/asm-ia64/mm.h	Wed Feb 01 16:28:50 2006 +0100
    41.3 @@ -29,7 +29,7 @@ typedef unsigned long page_flags_t;
    41.4   * Per-page-frame information.
    41.5   * 
    41.6   * Every architecture must ensure the following:
    41.7 - *  1. 'struct pfn_info' contains a 'struct list_head list'.
    41.8 + *  1. 'struct page_info' contains a 'struct list_head list'.
    41.9   *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
   41.10   */
   41.11  #define PFN_ORDER(_pfn)	((_pfn)->u.free.order)
   41.12 @@ -106,8 +106,8 @@ struct page
   41.13  /* 30-bit count of references to this frame. */
   41.14  #define PGC_count_mask      ((1U<<30)-1)
   41.15  
   41.16 -#define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
   41.17 -				 && (page_to_phys(_pfn) >= xen_pstart))
   41.18 +#define IS_XEN_HEAP_FRAME(_pfn) ((page_to_maddr(_pfn) < xenheap_phys_end) \
   41.19 +				 && (page_to_maddr(_pfn) >= xen_pstart))
   41.20  
   41.21  static inline struct domain *unpickle_domptr(u32 _d)
   41.22  { return (_d == 0) ? NULL : __va(_d); }
   41.23 @@ -120,7 +120,7 @@ static inline u32 pickle_domptr(struct d
   41.24  /* Dummy now */
   41.25  #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) do { } while (0)
   41.26  
   41.27 -extern struct pfn_info *frame_table;
   41.28 +extern struct page_info *frame_table;
   41.29  extern unsigned long frame_table_size;
   41.30  extern struct list_head free_list;
   41.31  extern spinlock_t free_list_lock;
   41.32 @@ -134,7 +134,7 @@ extern void __init init_frametable(void)
   41.33  #endif
   41.34  void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
   41.35  
   41.36 -static inline void put_page(struct pfn_info *page)
   41.37 +static inline void put_page(struct page_info *page)
   41.38  {
   41.39  #ifdef VALIDATE_VT	// doesn't work with non-VTI in grant tables yet
   41.40      u32 nx, x, y = page->count_info;
   41.41 @@ -151,7 +151,7 @@ static inline void put_page(struct pfn_i
   41.42  }
   41.43  
   41.44  /* count_info and ownership are checked atomically. */
   41.45 -static inline int get_page(struct pfn_info *page,
   41.46 +static inline int get_page(struct page_info *page,
   41.47                             struct domain *domain)
   41.48  {
   41.49  #ifdef VALIDATE_VT
   41.50 @@ -165,7 +165,7 @@ static inline int get_page(struct pfn_in
   41.51  	    unlikely((nx & PGC_count_mask) == 0) ||	/* Count overflow? */
   41.52  	    unlikely((x >> 32) != _domain)) {		/* Wrong owner? */
   41.53  	    DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
   41.54 -		page_to_pfn(page), domain, unpickle_domptr(domain),
   41.55 +		page_to_mfn(page), domain, unpickle_domptr(domain),
   41.56  		x, page->u.inuse.type_info);
   41.57  	    return 0;
   41.58  	}
   41.59 @@ -178,14 +178,14 @@ static inline int get_page(struct pfn_in
   41.60  /* No type info now */
   41.61  #define put_page_type(page)
   41.62  #define get_page_type(page, type) 1
   41.63 -static inline void put_page_and_type(struct pfn_info *page)
   41.64 +static inline void put_page_and_type(struct page_info *page)
   41.65  {
   41.66      put_page_type(page);
   41.67      put_page(page);
   41.68  }
   41.69  
   41.70  
   41.71 -static inline int get_page_and_type(struct pfn_info *page,
   41.72 +static inline int get_page_and_type(struct page_info *page,
   41.73                                      struct domain *domain,
   41.74                                      u32 type)
   41.75  {
   41.76 @@ -366,7 +366,7 @@ extern unsigned long max_mapnr;
   41.77  
   41.78  static inline void *lowmem_page_address(struct page *page)
   41.79  {
   41.80 -	return __va(page_to_pfn(page) << PAGE_SHIFT);
   41.81 +	return __va(page_to_mfn(page) << PAGE_SHIFT);
   41.82  }
   41.83  
   41.84  #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
   41.85 @@ -422,29 +422,29 @@ extern unsigned long lookup_domain_mpa(s
   41.86  * here. However if it's allocated by HV, we should access it directly
   41.87  */
   41.88  
   41.89 -#define __mfn_to_gpfn(_d, mfn)			\
   41.90 +#define mfn_to_gmfn(_d, mfn)			\
   41.91      machine_to_phys_mapping[(mfn)]
   41.92  
   41.93 -#define __gpfn_to_mfn(_d, gpfn)			\
   41.94 -    __gpfn_to_mfn_foreign((_d), (gpfn))
   41.95 +#define gmfn_to_mfn(_d, gpfn)			\
   41.96 +    gmfn_to_mfn_foreign((_d), (gpfn))
   41.97  
   41.98  #define __gpfn_invalid(_d, gpfn)			\
   41.99  	(lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_INV_MASK)
  41.100  
  41.101 -#define __gpfn_valid(_d, gpfn)	!__gpfn_invalid(_d, gpfn)
  41.102 +#define __gmfn_valid(_d, gpfn)	!__gpfn_invalid(_d, gpfn)
  41.103  
  41.104  /* Return I/O type if trye */
  41.105  #define __gpfn_is_io(_d, gpfn)				\
  41.106 -	(__gpfn_valid(_d, gpfn) ? 			\
  41.107 +	(__gmfn_valid(_d, gpfn) ? 			\
  41.108  	(lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) : 0)
  41.109  
  41.110  #define __gpfn_is_mem(_d, gpfn)				\
  41.111 -	(__gpfn_valid(_d, gpfn) ?			\
  41.112 +	(__gmfn_valid(_d, gpfn) ?			\
  41.113  	((lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) == GPFN_MEM) : 0)
  41.114  
  41.115  
  41.116  #define __gpa_to_mpa(_d, gpa)   \
  41.117 -    ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
  41.118 +    ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
  41.119  
  41.120  /* Arch-specific portion of memory_op hypercall. */
  41.121  #define arch_memory_op(op, arg) (-ENOSYS)
    42.1 --- a/xen/include/asm-ia64/xenpage.h	Wed Feb 01 15:01:04 2006 +0000
    42.2 +++ b/xen/include/asm-ia64/xenpage.h	Wed Feb 01 16:28:50 2006 +0100
    42.3 @@ -5,20 +5,20 @@
    42.4  #error "xenpage.h: page macros need to be defined for CONFIG_DISCONTIGMEM"
    42.5  #endif
    42.6  
    42.7 -#undef pfn_valid
    42.8 -#undef page_to_pfn
    42.9 -#undef pfn_to_page
   42.10 -# define pfn_valid(_pfn)	((_pfn) < max_page)
   42.11 -# define page_to_pfn(_page)	((unsigned long) ((_page) - frame_table))
   42.12 -# define pfn_to_page(_pfn)	(frame_table + (_pfn))
   42.13 +#undef mfn_valid
   42.14 +#undef page_to_mfn
   42.15 +#undef mfn_to_page
   42.16 +# define mfn_valid(_pfn)	((_pfn) < max_page)
   42.17 +# define page_to_mfn(_page)	((unsigned long) ((_page) - frame_table))
   42.18 +# define mfn_to_page(_pfn)	(frame_table + (_pfn))
   42.19  
   42.20 -#undef page_to_phys
   42.21 +#undef page_to_maddr
   42.22  #undef virt_to_page
   42.23 -#define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
   42.24 -#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
   42.25 +#define page_to_maddr(page)	(page_to_mfn(page) << PAGE_SHIFT)
   42.26 +#define virt_to_page(kaddr)	mfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
   42.27  
   42.28 -#define page_to_virt(_page)	phys_to_virt(page_to_phys(_page))
   42.29 -#define phys_to_page(kaddr)	pfn_to_page(((kaddr) >> PAGE_SHIFT))
   42.30 +#define page_to_virt(_page)	maddr_to_virt(page_to_maddr(_page))
   42.31 +#define maddr_to_page(kaddr)	mfn_to_page(((kaddr) >> PAGE_SHIFT))
   42.32  
   42.33  #ifndef __ASSEMBLY__
   42.34  typedef union xen_va {
   42.35 @@ -30,7 +30,7 @@ typedef union xen_va {
   42.36  	void *p;
   42.37  } xen_va;
   42.38  
   42.39 -static inline int get_order_from_bytes(physaddr_t size)
   42.40 +static inline int get_order_from_bytes(paddr_t size)
   42.41  {
   42.42      int order;
   42.43      size = (size-1) >> PAGE_SHIFT;
    43.1 --- a/xen/include/asm-x86/grant_table.h	Wed Feb 01 15:01:04 2006 +0000
    43.2 +++ b/xen/include/asm-x86/grant_table.h	Wed Feb 01 16:28:50 2006 +0100
    43.3 @@ -19,22 +19,22 @@ int destroy_grant_host_mapping(
    43.4      unsigned long addr, unsigned long frame, unsigned int flags);
    43.5  
    43.6  int steal_page_for_grant_transfer(
    43.7 -    struct domain *d, struct pfn_info *page);
    43.8 +    struct domain *d, struct page_info *page);
    43.9  
   43.10 -#define gnttab_create_shared_mfn(d, t, i)                                \
   43.11 +#define gnttab_create_shared_page(d, t, i)                               \
   43.12      do {                                                                 \
   43.13          SHARE_PFN_WITH_DOMAIN(                                           \
   43.14              virt_to_page((char *)(t)->shared + ((i) * PAGE_SIZE)), (d)); \
   43.15          set_pfn_from_mfn(                                                \
   43.16 -            (virt_to_phys((t)->shared) >> PAGE_SHIFT) + (i),             \
   43.17 +            (virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i),            \
   43.18              INVALID_M2P_ENTRY);                                          \
   43.19      } while ( 0 )
   43.20  
   43.21  #define gnttab_shared_mfn(d, t, i)                      \
   43.22 -    ((virt_to_phys((t)->shared) >> PAGE_SHIFT) + (i))
   43.23 +    ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i))
   43.24  
   43.25 -#define gnttab_shared_gpfn(d, t, i)                     \
   43.26 -    (__mfn_to_gpfn(d, gnttab_shared_mfn(d, t, i)))
   43.27 +#define gnttab_shared_gmfn(d, t, i)                     \
   43.28 +    (mfn_to_gmfn(d, gnttab_shared_mfn(d, t, i)))
   43.29  
   43.30  #define gnttab_log_dirty(d, f) mark_dirty((d), (f))
   43.31  
    44.1 --- a/xen/include/asm-x86/io.h	Wed Feb 01 15:01:04 2006 +0000
    44.2 +++ b/xen/include/asm-x86/io.h	Wed Feb 01 16:28:50 2006 +0100
    44.3 @@ -5,52 +5,6 @@
    44.4  #include <xen/types.h>
    44.5  #include <asm/page.h>
    44.6  
    44.7 -#define IO_SPACE_LIMIT 0xffff
    44.8 -
    44.9 -/**
   44.10 - *  virt_to_phys    -   map virtual addresses to physical
   44.11 - *  @address: address to remap
   44.12 - *
   44.13 - *  The returned physical address is the physical (CPU) mapping for
   44.14 - *  the memory address given. It is only valid to use this function on
   44.15 - *  addresses directly mapped or allocated via xmalloc.
   44.16 - *
   44.17 - *  This function does not give bus mappings for DMA transfers. In
   44.18 - *  almost all conceivable cases a device driver should not be using
   44.19 - *  this function
   44.20 - */
   44.21 -
   44.22 -static inline unsigned long virt_to_phys(volatile void * address)
   44.23 -{
   44.24 -    return __pa(address);
   44.25 -}
   44.26 -
   44.27 -/**
   44.28 - *  phys_to_virt    -   map physical address to virtual
   44.29 - *  @address: address to remap
   44.30 - *
   44.31 - *  The returned virtual address is a current CPU mapping for
   44.32 - *  the memory address given. It is only valid to use this function on
   44.33 - *  addresses that have a kernel mapping
   44.34 - *
   44.35 - *  This function does not handle bus mappings for DMA transfers. In
   44.36 - *  almost all conceivable cases a device driver should not be using
   44.37 - *  this function
   44.38 - */
   44.39 -
   44.40 -static inline void * phys_to_virt(unsigned long address)
   44.41 -{
   44.42 -    return __va(address);
   44.43 -}
   44.44 -
   44.45 -/*
   44.46 - * Change "struct pfn_info" to physical address.
   44.47 - */
   44.48 -#define page_to_phys(page)  ((physaddr_t)(page - frame_table) << PAGE_SHIFT)
   44.49 -
   44.50 -#define page_to_pfn(_page)  ((unsigned long)((_page) - frame_table))
   44.51 -#define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
   44.52 -
   44.53  /* We don't need real ioremap() on Xen/x86. */
   44.54  #define ioremap(x,l) (__va(x))
   44.55  
   44.56 @@ -61,13 +15,6 @@ static inline void * phys_to_virt(unsign
   44.57  #define writew(d,x) (*(volatile short *)(x) = (d))
   44.58  #define writel(d,x) (*(volatile int *)(x) = (d))
   44.59  
   44.60 -/*
   44.61 - * IO bus memory addresses are also 1:1 with the physical address
   44.62 - */
   44.63 -#define virt_to_bus virt_to_phys
   44.64 -#define bus_to_virt phys_to_virt
   44.65 -#define page_to_bus page_to_phys
   44.66 -
   44.67  #define __OUT1(s,x) \
   44.68  static inline void out##s(unsigned x value, unsigned short port) {
   44.69  
    45.1 --- a/xen/include/asm-x86/mach-default/bios_ebda.h	Wed Feb 01 15:01:04 2006 +0000
    45.2 +++ b/xen/include/asm-x86/mach-default/bios_ebda.h	Wed Feb 01 16:28:50 2006 +0100
    45.3 @@ -7,7 +7,7 @@
    45.4   */
    45.5  static inline unsigned int get_bios_ebda(void)
    45.6  {
    45.7 -	unsigned int address = *(unsigned short *)phys_to_virt(0x40E);
    45.8 +	unsigned int address = *(unsigned short *)maddr_to_virt(0x40E);
    45.9  	address <<= 4;
   45.10  	return address;	/* 0 means none */
   45.11  }
    46.1 --- a/xen/include/asm-x86/mach-default/mach_wakecpu.h	Wed Feb 01 15:01:04 2006 +0000
    46.2 +++ b/xen/include/asm-x86/mach-default/mach_wakecpu.h	Wed Feb 01 16:28:50 2006 +0100
    46.3 @@ -8,8 +8,8 @@
    46.4  
    46.5  #define WAKE_SECONDARY_VIA_INIT
    46.6  
    46.7 -#define TRAMPOLINE_LOW phys_to_virt(0x467)
    46.8 -#define TRAMPOLINE_HIGH phys_to_virt(0x469)
    46.9 +#define TRAMPOLINE_LOW maddr_to_virt(0x467)
   46.10 +#define TRAMPOLINE_HIGH maddr_to_virt(0x469)
   46.11  
   46.12  #define boot_cpu_apicid boot_cpu_physical_apicid
   46.13  
    47.1 --- a/xen/include/asm-x86/mach-es7000/mach_wakecpu.h	Wed Feb 01 15:01:04 2006 +0000
    47.2 +++ b/xen/include/asm-x86/mach-es7000/mach_wakecpu.h	Wed Feb 01 16:28:50 2006 +0100
    47.3 @@ -23,8 +23,8 @@ wakeup_secondary_cpu(int phys_apicid, un
    47.4  }
    47.5  #endif
    47.6  
    47.7 -#define TRAMPOLINE_LOW phys_to_virt(0x467)
    47.8 -#define TRAMPOLINE_HIGH phys_to_virt(0x469)
    47.9 +#define TRAMPOLINE_LOW maddr_to_virt(0x467)
   47.10 +#define TRAMPOLINE_HIGH maddr_to_virt(0x469)
   47.11  
   47.12  #define boot_cpu_apicid boot_cpu_physical_apicid
   47.13  
    48.1 --- a/xen/include/asm-x86/mm.h	Wed Feb 01 15:01:04 2006 +0000
    48.2 +++ b/xen/include/asm-x86/mm.h	Wed Feb 01 16:28:50 2006 +0100
    48.3 @@ -12,12 +12,12 @@
    48.4   * Per-page-frame information.
    48.5   * 
    48.6   * Every architecture must ensure the following:
    48.7 - *  1. 'struct pfn_info' contains a 'struct list_head list'.
    48.8 + *  1. 'struct page_info' contains a 'struct list_head list'.
    48.9   *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
   48.10   */
   48.11  #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
   48.12  
   48.13 -struct pfn_info
   48.14 +struct page_info
   48.15  {
   48.16      /* Each frame can be threaded onto a doubly-linked list. */
   48.17      struct list_head list;
   48.18 @@ -121,7 +121,7 @@ struct pfn_info
   48.19  #define PageSetSlab(page)   ((void)0)
   48.20  #define PageClearSlab(page) ((void)0)
   48.21  
   48.22 -#define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < xenheap_phys_end)
   48.23 +#define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
   48.24  
   48.25  #if defined(__i386__)
   48.26  #define pickle_domptr(_d)   ((u32)(unsigned long)(_d))
   48.27 @@ -154,20 +154,20 @@ static inline u32 pickle_domptr(struct d
   48.28          spin_unlock(&(_dom)->page_alloc_lock);                              \
   48.29      } while ( 0 )
   48.30  
   48.31 -extern struct pfn_info *frame_table;
   48.32 +extern struct page_info *frame_table;
   48.33  extern unsigned long max_page;
   48.34  extern unsigned long total_pages;
   48.35  void init_frametable(void);
   48.36  
   48.37 -int alloc_page_type(struct pfn_info *page, unsigned long type);
   48.38 -void free_page_type(struct pfn_info *page, unsigned long type);
   48.39 +int alloc_page_type(struct page_info *page, unsigned long type);
   48.40 +void free_page_type(struct page_info *page, unsigned long type);
   48.41  extern void invalidate_shadow_ldt(struct vcpu *d);
   48.42  extern int shadow_remove_all_write_access(
   48.43 -    struct domain *d, unsigned long gpfn, unsigned long gmfn);
   48.44 +    struct domain *d, unsigned long gmfn, unsigned long mfn);
   48.45  extern u32 shadow_remove_all_access( struct domain *d, unsigned long gmfn);
   48.46  extern int _shadow_mode_refcounts(struct domain *d);
   48.47  
   48.48 -static inline void put_page(struct pfn_info *page)
   48.49 +static inline void put_page(struct page_info *page)
   48.50  {
   48.51      u32 nx, x, y = page->count_info;
   48.52  
   48.53 @@ -182,7 +182,7 @@ static inline void put_page(struct pfn_i
   48.54  }
   48.55  
   48.56  
   48.57 -static inline int get_page(struct pfn_info *page,
   48.58 +static inline int get_page(struct page_info *page,
   48.59                             struct domain *domain)
   48.60  {
   48.61      u32 x, nx, y = page->count_info;
   48.62 @@ -199,7 +199,7 @@ static inline int get_page(struct pfn_in
   48.63          {
   48.64              if ( !_shadow_mode_refcounts(domain) )
   48.65                  DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%" PRtype_info "\n",
   48.66 -                        page_to_pfn(page), domain, unpickle_domptr(d),
   48.67 +                        page_to_mfn(page), domain, unpickle_domptr(d),
   48.68                          x, page->u.inuse.type_info);
   48.69              return 0;
   48.70          }
   48.71 @@ -214,19 +214,19 @@ static inline int get_page(struct pfn_in
   48.72      return 1;
   48.73  }
   48.74  
   48.75 -void put_page_type(struct pfn_info *page);
   48.76 -int  get_page_type(struct pfn_info *page, unsigned long type);
   48.77 +void put_page_type(struct page_info *page);
   48.78 +int  get_page_type(struct page_info *page, unsigned long type);
   48.79  int  get_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
   48.80  void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
   48.81  
   48.82 -static inline void put_page_and_type(struct pfn_info *page)
   48.83 +static inline void put_page_and_type(struct page_info *page)
   48.84  {
   48.85      put_page_type(page);
   48.86      put_page(page);
   48.87  }
   48.88  
   48.89  
   48.90 -static inline int get_page_and_type(struct pfn_info *page,
   48.91 +static inline int get_page_and_type(struct page_info *page,
   48.92                                      struct domain *domain,
   48.93                                      unsigned long type)
   48.94  {
    49.1 --- a/xen/include/asm-x86/page-guest32.h	Wed Feb 01 15:01:04 2006 +0000
    49.2 +++ b/xen/include/asm-x86/page-guest32.h	Wed Feb 01 16:28:50 2006 +0100
    49.3 @@ -34,9 +34,9 @@ typedef l2_pgentry_t root_pgentry_32_t;
    49.4  #define l2e_get_flags_32(x)           (get_pte_flags_32((x).l2))
    49.5  
    49.6  #define l1e_get_paddr_32(x)           \
    49.7 -    ((physaddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
    49.8 +    ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
    49.9  #define l2e_get_paddr_32(x)           \
   49.10 -    ((physaddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
   49.11 +    ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
   49.12  
   49.13  /* Construct an empty pte. */
   49.14  #define l1e_empty_32()                ((l1_pgentry_32_t) { 0 })
   49.15 @@ -50,12 +50,12 @@ typedef l2_pgentry_t root_pgentry_32_t;
   49.16  
   49.17  /* Construct a pte from a physical address and access flags. */
   49.18  #ifndef __ASSEMBLY__
   49.19 -static inline l1_pgentry_32_t l1e_from_paddr_32(physaddr_t pa, unsigned int flags)
   49.20 +static inline l1_pgentry_32_t l1e_from_paddr_32(paddr_t pa, unsigned int flags)
   49.21  {
   49.22      ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
   49.23      return (l1_pgentry_32_t) { pa | put_pte_flags_32(flags) };
   49.24  }
   49.25 -static inline l2_pgentry_32_t l2e_from_paddr_32(physaddr_t pa, unsigned int flags)
   49.26 +static inline l2_pgentry_32_t l2e_from_paddr_32(paddr_t pa, unsigned int flags)
   49.27  {
   49.28      ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
   49.29      return (l2_pgentry_32_t) { pa | put_pte_flags_32(flags) };
   49.30 @@ -64,8 +64,8 @@ static inline l2_pgentry_32_t l2e_from_p
   49.31  
   49.32  
   49.33  /* Construct a pte from a page pointer and access flags. */
   49.34 -#define l1e_from_page_32(page, flags) (l1e_from_pfn_32(page_to_pfn(page),(flags)))
   49.35 -#define l2e_from_page_32(page, flags) (l2e_from_pfn_32(page_to_pfn(page),(flags)))
   49.36 +#define l1e_from_page_32(page, flags) (l1e_from_pfn_32(page_to_mfn(page),(flags)))
   49.37 +#define l2e_from_page_32(page, flags) (l2e_from_pfn_32(page_to_mfn(page),(flags)))
   49.38  
   49.39  /* Add extra flags to an existing pte. */
   49.40  #define l1e_add_flags_32(x, flags)    ((x).l1 |= put_pte_flags_32(flags))
    50.1 --- a/xen/include/asm-x86/page.h	Wed Feb 01 15:01:04 2006 +0000
    50.2 +++ b/xen/include/asm-x86/page.h	Wed Feb 01 16:28:50 2006 +0100
    50.3 @@ -41,21 +41,21 @@
    50.4  #define l4e_get_pfn(x)             \
    50.5      ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
    50.6  
    50.7 -/* Get physical address of page mapped by pte (physaddr_t). */
    50.8 +/* Get physical address of page mapped by pte (paddr_t). */
    50.9  #define l1e_get_paddr(x)           \
   50.10 -    ((physaddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
   50.11 +    ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
   50.12  #define l2e_get_paddr(x)           \
   50.13 -    ((physaddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
   50.14 +    ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
   50.15  #define l3e_get_paddr(x)           \
   50.16 -    ((physaddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
   50.17 +    ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
   50.18  #define l4e_get_paddr(x)           \
   50.19 -    ((physaddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
   50.20 +    ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
   50.21  
   50.22 -/* Get pointer to info structure of page mapped by pte (struct pfn_info *). */
   50.23 -#define l1e_get_page(x)           (pfn_to_page(l1e_get_pfn(x)))
   50.24 -#define l2e_get_page(x)           (pfn_to_page(l2e_get_pfn(x)))
   50.25 -#define l3e_get_page(x)           (pfn_to_page(l3e_get_pfn(x)))
   50.26 -#define l4e_get_page(x)           (pfn_to_page(l4e_get_pfn(x)))
   50.27 +/* Get pointer to info structure of page mapped by pte (struct page_info *). */
   50.28 +#define l1e_get_page(x)           (mfn_to_page(l1e_get_pfn(x)))
   50.29 +#define l2e_get_page(x)           (mfn_to_page(l2e_get_pfn(x)))
   50.30 +#define l3e_get_page(x)           (mfn_to_page(l3e_get_pfn(x)))
   50.31 +#define l4e_get_page(x)           (mfn_to_page(l4e_get_pfn(x)))
   50.32  
   50.33  /* Get pte access flags (unsigned int). */
   50.34  #define l1e_get_flags(x)           (get_pte_flags((x).l1))
   50.35 @@ -81,25 +81,25 @@
   50.36  
   50.37  /* Construct a pte from a physical address and access flags. */
   50.38  #ifndef __ASSEMBLY__
   50.39 -static inline l1_pgentry_t l1e_from_paddr(physaddr_t pa, unsigned int flags)
   50.40 +static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags)
   50.41  {
   50.42      ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
   50.43      return (l1_pgentry_t) { pa | put_pte_flags(flags) };
   50.44  }
   50.45 -static inline l2_pgentry_t l2e_from_paddr(physaddr_t pa, unsigned int flags)
   50.46 +static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags)
   50.47  {
   50.48      ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
   50.49      return (l2_pgentry_t) { pa | put_pte_flags(flags) };
   50.50  }
   50.51  #if CONFIG_PAGING_LEVELS >= 3
   50.52 -static inline l3_pgentry_t l3e_from_paddr(physaddr_t pa, unsigned int flags)
   50.53 +static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags)
   50.54  {
   50.55      ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
   50.56      return (l3_pgentry_t) { pa | put_pte_flags(flags) };
   50.57  }
   50.58  #endif
   50.59  #if CONFIG_PAGING_LEVELS >= 4
   50.60 -static inline l4_pgentry_t l4e_from_paddr(physaddr_t pa, unsigned int flags)
   50.61 +static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
   50.62  {
   50.63      ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
   50.64      return (l4_pgentry_t) { pa | put_pte_flags(flags) };
   50.65 @@ -114,10 +114,10 @@ static inline l4_pgentry_t l4e_from_padd
   50.66  #define l4e_from_intpte(intpte)    ((l4_pgentry_t) { (intpte_t)(intpte) })
   50.67  
   50.68  /* Construct a pte from a page pointer and access flags. */
   50.69 -#define l1e_from_page(page, flags) (l1e_from_pfn(page_to_pfn(page),(flags)))
   50.70 -#define l2e_from_page(page, flags) (l2e_from_pfn(page_to_pfn(page),(flags)))
   50.71 -#define l3e_from_page(page, flags) (l3e_from_pfn(page_to_pfn(page),(flags)))
   50.72 -#define l4e_from_page(page, flags) (l4e_from_pfn(page_to_pfn(page),(flags)))
   50.73 +#define l1e_from_page(page, flags) (l1e_from_pfn(page_to_mfn(page),(flags)))
   50.74 +#define l2e_from_page(page, flags) (l2e_from_pfn(page_to_mfn(page),(flags)))
   50.75 +#define l3e_from_page(page, flags) (l3e_from_pfn(page_to_mfn(page),(flags)))
   50.76 +#define l4e_from_page(page, flags) (l4e_from_pfn(page_to_mfn(page),(flags)))
   50.77  
   50.78  /* Add extra flags to an existing pte. */
   50.79  #define l1e_add_flags(x, flags)    ((x).l1 |= put_pte_flags(flags))
   50.80 @@ -172,7 +172,7 @@ typedef struct { u32 pfn; } pagetable_t;
   50.81  /* x86_64 */
   50.82  typedef struct { u64 pfn; } pagetable_t;
   50.83  #endif
   50.84 -#define pagetable_get_paddr(x) ((physaddr_t)(x).pfn << PAGE_SHIFT)
   50.85 +#define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT)
   50.86  #define pagetable_get_pfn(x)   ((x).pfn)
   50.87  #define mk_pagetable(pa)       \
   50.88      ({ pagetable_t __p; __p.pfn = (pa) >> PAGE_SHIFT; __p; })
   50.89 @@ -181,16 +181,31 @@ typedef struct { u64 pfn; } pagetable_t;
   50.90  #define clear_page(_p)      memset((void *)(_p), 0, PAGE_SIZE)
   50.91  #define copy_page(_t,_f)    memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
   50.92  
   50.93 +#define mfn_valid(mfn)      ((mfn) < max_page)
   50.94 +
   50.95 +/* Convert between Xen-heap virtual addresses and machine addresses. */
   50.96  #define PAGE_OFFSET         ((unsigned long)__PAGE_OFFSET)
   50.97 -#define __pa(x)             ((unsigned long)(x)-PAGE_OFFSET)
   50.98 -#define __va(x)             ((void *)((unsigned long)(x)+PAGE_OFFSET))
   50.99 -#define pfn_to_page(_pfn)   (frame_table + (_pfn))
  50.100 -#define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
  50.101 -#define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
  50.102 -#define pfn_valid(_pfn)     ((_pfn) < max_page)
  50.103 +#define virt_to_maddr(va)   ((unsigned long)(va)-PAGE_OFFSET)
  50.104 +#define maddr_to_virt(ma)   ((void *)((unsigned long)(ma)+PAGE_OFFSET))
  50.105 +/* Shorthand versions of the above functions. */
  50.106 +#define __pa(x)             (virt_to_maddr(x))
  50.107 +#define __va(x)             (maddr_to_virt(x))
  50.108  
  50.109 -#define pfn_to_phys(pfn)    ((physaddr_t)(pfn) << PAGE_SHIFT)
  50.110 -#define phys_to_pfn(pa)     ((unsigned long)((pa) >> PAGE_SHIFT))
  50.111 +/* Convert between machine frame numbers and page-info structures. */
  50.112 +#define mfn_to_page(mfn)    (frame_table + (mfn))
  50.113 +#define page_to_mfn(pg)     ((unsigned long)((pg) - frame_table))
  50.114 +
  50.115 +/* Convert between machine addresses and page-info structures. */
  50.116 +#define maddr_to_page(ma)   (frame_table + ((ma) >> PAGE_SHIFT))
  50.117 +#define page_to_maddr(pg)   ((paddr_t)((pg) - frame_table) << PAGE_SHIFT)
  50.118 +
  50.119 +/* Convert between Xen-heap virtual addresses and page-info structures. */
  50.120 +#define virt_to_page(va)    (frame_table + (__pa(va) >> PAGE_SHIFT))
  50.121 +#define page_to_virt(pg)    (maddr_to_virt(page_to_maddr(pg)))
  50.122 +
  50.123 +/* Convert between frame number and address formats.  */
  50.124 +#define pfn_to_paddr(pfn)   ((paddr_t)(pfn) << PAGE_SHIFT)
  50.125 +#define paddr_to_pfn(pa)    ((unsigned long)((pa) >> PAGE_SHIFT))
  50.126  
  50.127  /* High table entries are reserved by the hypervisor. */
  50.128  #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
  50.129 @@ -228,9 +243,9 @@ typedef struct { u64 pfn; } pagetable_t;
  50.130                       (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<2))))
  50.131  
  50.132  #define linear_pg_table linear_l1_table
  50.133 -#define linear_l2_table(_ed) ((_ed)->arch.guest_vtable)
  50.134 -#define linear_l3_table(_ed) ((_ed)->arch.guest_vl3table)
  50.135 -#define linear_l4_table(_ed) ((_ed)->arch.guest_vl4table)
  50.136 +#define linear_l2_table(v) ((v)->arch.guest_vtable)
  50.137 +#define linear_l3_table(v) ((v)->arch.guest_vl3table)
  50.138 +#define linear_l4_table(v) ((v)->arch.guest_vl4table)
  50.139  
  50.140  #ifndef __ASSEMBLY__
  50.141  #if CONFIG_PAGING_LEVELS == 3
  50.142 @@ -298,7 +313,7 @@ extern void paging_init(void);
  50.143  
  50.144  #ifndef __ASSEMBLY__
  50.145  
  50.146 -static inline int get_order_from_bytes(physaddr_t size)
  50.147 +static inline int get_order_from_bytes(paddr_t size)
  50.148  {
  50.149      int order;
  50.150      size = (size-1) >> PAGE_SHIFT;
  50.151 @@ -317,17 +332,17 @@ static inline int get_order_from_pages(u
  50.152  }
  50.153  
  50.154  /* Allocator functions for Xen pagetables. */
  50.155 -struct pfn_info *alloc_xen_pagetable(void);
  50.156 -void free_xen_pagetable(struct pfn_info *pg);
  50.157 +struct page_info *alloc_xen_pagetable(void);
  50.158 +void free_xen_pagetable(struct page_info *pg);
  50.159  l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
  50.160  
  50.161 -/* Map physical page range in Xen virtual address space. */
  50.162 +/* Map machine page range in Xen virtual address space. */
  50.163  #define MAP_SMALL_PAGES (1UL<<16) /* don't use superpages for the mapping */
  50.164  int
  50.165  map_pages_to_xen(
  50.166      unsigned long virt,
  50.167 -    unsigned long pfn,
  50.168 -    unsigned long nr_pfns,
  50.169 +    unsigned long mfn,
  50.170 +    unsigned long nr_mfns,
  50.171      unsigned long flags);
  50.172  
  50.173  #endif /* !__ASSEMBLY__ */
    51.1 --- a/xen/include/asm-x86/shadow.h	Wed Feb 01 15:01:04 2006 +0000
    51.2 +++ b/xen/include/asm-x86/shadow.h	Wed Feb 01 16:28:50 2006 +0100
    51.3 @@ -133,10 +133,10 @@ extern int set_p2m_entry(
    51.4  extern void remove_shadow(struct domain *d, unsigned long gpfn, u32 stype);
    51.5  
    51.6  extern void shadow_l1_normal_pt_update(struct domain *d,
    51.7 -                                       physaddr_t pa, l1_pgentry_t l1e,
    51.8 +                                       paddr_t pa, l1_pgentry_t l1e,
    51.9                                         struct domain_mmap_cache *cache);
   51.10  extern void shadow_l2_normal_pt_update(struct domain *d,
   51.11 -                                       physaddr_t pa, l2_pgentry_t l2e,
   51.12 +                                       paddr_t pa, l2_pgentry_t l2e,
   51.13                                         struct domain_mmap_cache *cache);
   51.14  #if CONFIG_PAGING_LEVELS >= 3
   51.15  #include <asm/page-guest32.h>
   51.16 @@ -150,12 +150,12 @@ extern void shadow_l2_normal_pt_update(s
   51.17  
   51.18  extern unsigned long gva_to_gpa(unsigned long gva);
   51.19  extern void shadow_l3_normal_pt_update(struct domain *d,
   51.20 -                                       physaddr_t pa, l3_pgentry_t l3e,
   51.21 +                                       paddr_t pa, l3_pgentry_t l3e,
   51.22                                         struct domain_mmap_cache *cache);
   51.23  #endif
   51.24  #if CONFIG_PAGING_LEVELS >= 4
   51.25  extern void shadow_l4_normal_pt_update(struct domain *d,
   51.26 -                                       physaddr_t pa, l4_pgentry_t l4e,
   51.27 +                                       paddr_t pa, l4_pgentry_t l4e,
   51.28                                         struct domain_mmap_cache *cache);
   51.29  #endif
   51.30  extern int shadow_do_update_va_mapping(unsigned long va,
   51.31 @@ -170,7 +170,7 @@ static inline unsigned long __shadow_sta
   51.32  static inline void update_hl2e(struct vcpu *v, unsigned long va);
   51.33  #endif
   51.34  
   51.35 -static inline int page_is_page_table(struct pfn_info *page)
   51.36 +static inline int page_is_page_table(struct page_info *page)
   51.37  {
   51.38      struct domain *owner = page_get_owner(page);
   51.39      u32 type_info;
   51.40 @@ -184,23 +184,23 @@ static inline int page_is_page_table(str
   51.41  
   51.42  static inline int mfn_is_page_table(unsigned long mfn)
   51.43  {
   51.44 -    if ( !pfn_valid(mfn) )
   51.45 +    if ( !mfn_valid(mfn) )
   51.46          return 0;
   51.47  
   51.48 -    return page_is_page_table(pfn_to_page(mfn));
   51.49 +    return page_is_page_table(mfn_to_page(mfn));
   51.50  }
   51.51  
   51.52 -static inline int page_out_of_sync(struct pfn_info *page)
   51.53 +static inline int page_out_of_sync(struct page_info *page)
   51.54  {
   51.55      return page->count_info & PGC_out_of_sync;
   51.56  }
   51.57  
   51.58  static inline int mfn_out_of_sync(unsigned long mfn)
   51.59  {
   51.60 -    if ( !pfn_valid(mfn) )
   51.61 +    if ( !mfn_valid(mfn) )
   51.62          return 0;
   51.63  
   51.64 -    return page_out_of_sync(pfn_to_page(mfn));
   51.65 +    return page_out_of_sync(mfn_to_page(mfn));
   51.66  }
   51.67  
   51.68  
   51.69 @@ -283,12 +283,12 @@ static inline void shadow_mode_disable(s
   51.70  
   51.71  /************************************************************************/
   51.72  
   51.73 -#define __mfn_to_gpfn(_d, mfn)                         \
   51.74 +#define mfn_to_gmfn(_d, mfn)                         \
   51.75      ( (shadow_mode_translate(_d))                      \
   51.76        ? get_pfn_from_mfn(mfn)                          \
   51.77        : (mfn) )
   51.78  
   51.79 -#define __gpfn_to_mfn(_d, gpfn)                        \
   51.80 +#define gmfn_to_mfn(_d, gpfn)                        \
   51.81      ({                                                 \
   51.82          unlikely(shadow_mode_translate(_d))            \
   51.83          ? (likely(current->domain == (_d))             \
   51.84 @@ -317,7 +317,7 @@ struct out_of_sync_entry {
   51.85      unsigned long gpfn;    /* why is this here? */
   51.86      unsigned long gmfn;
   51.87      unsigned long snapshot_mfn;
   51.88 -    physaddr_t writable_pl1e; /* NB: this is a machine address */
   51.89 +    paddr_t writable_pl1e; /* NB: this is a machine address */
   51.90      unsigned long va;
   51.91  };
   51.92  
   51.93 @@ -401,8 +401,8 @@ shadow_get_page_from_l1e(l1_pgentry_t l1
   51.94      if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
   51.95           !(l1e_get_flags(nl1e) & L1_DISALLOW_MASK) &&
   51.96           (mfn = l1e_get_pfn(nl1e)) &&
   51.97 -         pfn_valid(mfn) &&
   51.98 -         (owner = page_get_owner(pfn_to_page(mfn))) &&
   51.99 +         mfn_valid(mfn) &&
  51.100 +         (owner = page_get_owner(mfn_to_page(mfn))) &&
  51.101           (d != owner) )
  51.102      {
  51.103          res = get_page_from_l1e(nl1e, owner);
  51.104 @@ -432,7 +432,7 @@ shadow_put_page_from_l1e(l1_pgentry_t l1
  51.105  }
  51.106  
  51.107  static inline void
  51.108 -shadow_put_page_type(struct domain *d, struct pfn_info *page)
  51.109 +shadow_put_page_type(struct domain *d, struct page_info *page)
  51.110  {
  51.111      if ( !shadow_mode_refcounts(d) )
  51.112          return;
  51.113 @@ -441,7 +441,7 @@ shadow_put_page_type(struct domain *d, s
  51.114  }
  51.115  
  51.116  static inline int shadow_get_page(struct domain *d,
  51.117 -                                  struct pfn_info *page,
  51.118 +                                  struct page_info *page,
  51.119                                    struct domain *owner)
  51.120  {
  51.121      if ( !shadow_mode_refcounts(d) )
  51.122 @@ -450,7 +450,7 @@ static inline int shadow_get_page(struct
  51.123  }
  51.124  
  51.125  static inline void shadow_put_page(struct domain *d,
  51.126 -                                   struct pfn_info *page)
  51.127 +                                   struct page_info *page)
  51.128  {
  51.129      if ( !shadow_mode_refcounts(d) )
  51.130          return;
  51.131 @@ -493,9 +493,9 @@ static inline void __mark_dirty(struct d
  51.132          SH_VLOG("mark_dirty OOR! mfn=%lx pfn=%lx max=%x (dom %p)",
  51.133                 mfn, pfn, d->arch.shadow_dirty_bitmap_size, d);
  51.134          SH_VLOG("dom=%p caf=%08x taf=%" PRtype_info, 
  51.135 -                page_get_owner(pfn_to_page(mfn)),
  51.136 -                pfn_to_page(mfn)->count_info, 
  51.137 -                pfn_to_page(mfn)->u.inuse.type_info );
  51.138 +                page_get_owner(mfn_to_page(mfn)),
  51.139 +                mfn_to_page(mfn)->count_info, 
  51.140 +                mfn_to_page(mfn)->u.inuse.type_info );
  51.141      }
  51.142  #endif
  51.143  }
  51.144 @@ -577,12 +577,12 @@ update_hl2e(struct vcpu *v, unsigned lon
  51.145      if ( (l1e_has_changed(old_hl2e, new_hl2e, PAGE_FLAG_MASK)) )
  51.146      {
  51.147          if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
  51.148 -             !shadow_get_page(v->domain, pfn_to_page(l1e_get_pfn(new_hl2e)),
  51.149 +             !shadow_get_page(v->domain, mfn_to_page(l1e_get_pfn(new_hl2e)),
  51.150                                v->domain) )
  51.151              new_hl2e = l1e_empty();
  51.152          if ( l1e_get_flags(old_hl2e) & _PAGE_PRESENT )
  51.153          {
  51.154 -            shadow_put_page(v->domain, pfn_to_page(l1e_get_pfn(old_hl2e)));
  51.155 +            shadow_put_page(v->domain, mfn_to_page(l1e_get_pfn(old_hl2e)));
  51.156              need_flush = 1;
  51.157          }
  51.158  
  51.159 @@ -598,7 +598,7 @@ update_hl2e(struct vcpu *v, unsigned lon
  51.160  }
  51.161  
  51.162  static inline void shadow_drop_references(
  51.163 -    struct domain *d, struct pfn_info *page)
  51.164 +    struct domain *d, struct page_info *page)
  51.165  {
  51.166      if ( likely(!shadow_mode_refcounts(d)) ||
  51.167           ((page->u.inuse.type_info & PGT_count_mask) == 0) )
  51.168 @@ -606,21 +606,21 @@ static inline void shadow_drop_reference
  51.169  
  51.170      /* XXX This needs more thought... */
  51.171      printk("%s: needing to call shadow_remove_all_access for mfn=%lx\n",
  51.172 -           __func__, page_to_pfn(page));
  51.173 -    printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
  51.174 +           __func__, page_to_mfn(page));
  51.175 +    printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_mfn(page),
  51.176             page->count_info, page->u.inuse.type_info);
  51.177  
  51.178      shadow_lock(d);
  51.179 -    shadow_remove_all_access(d, page_to_pfn(page));
  51.180 +    shadow_remove_all_access(d, page_to_mfn(page));
  51.181      shadow_unlock(d);
  51.182  
  51.183 -    printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
  51.184 +    printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_mfn(page),
  51.185             page->count_info, page->u.inuse.type_info);
  51.186  }
  51.187  
  51.188  /* XXX Needs more thought. Neither pretty nor fast: a place holder. */
  51.189  static inline void shadow_sync_and_drop_references(
  51.190 -    struct domain *d, struct pfn_info *page)
  51.191 +    struct domain *d, struct page_info *page)
  51.192  {
  51.193      if ( likely(!shadow_mode_refcounts(d)) )
  51.194          return;
  51.195 @@ -628,9 +628,9 @@ static inline void shadow_sync_and_drop_
  51.196      shadow_lock(d);
  51.197  
  51.198      if ( page_out_of_sync(page) )
  51.199 -        __shadow_sync_mfn(d, page_to_pfn(page));
  51.200 +        __shadow_sync_mfn(d, page_to_mfn(page));
  51.201  
  51.202 -    shadow_remove_all_access(d, page_to_pfn(page));
  51.203 +    shadow_remove_all_access(d, page_to_mfn(page));
  51.204  
  51.205      shadow_unlock(d);
  51.206  }
  51.207 @@ -647,7 +647,7 @@ static inline void guest_physmap_add_pag
  51.208      domain_mmap_cache_init(&c1);
  51.209      domain_mmap_cache_init(&c2);
  51.210      shadow_lock(d);
  51.211 -    shadow_sync_and_drop_references(d, pfn_to_page(mfn));
  51.212 +    shadow_sync_and_drop_references(d, mfn_to_page(mfn));
  51.213      set_p2m_entry(d, gpfn, mfn, &c1, &c2);
  51.214      set_pfn_from_mfn(mfn, gpfn);
  51.215      shadow_unlock(d);
  51.216 @@ -666,7 +666,7 @@ static inline void guest_physmap_remove_
  51.217      domain_mmap_cache_init(&c1);
  51.218      domain_mmap_cache_init(&c2);
  51.219      shadow_lock(d);
  51.220 -    shadow_sync_and_drop_references(d, pfn_to_page(mfn));
  51.221 +    shadow_sync_and_drop_references(d, mfn_to_page(mfn));
  51.222      set_p2m_entry(d, gpfn, -1, &c1, &c2);
  51.223      set_pfn_from_mfn(mfn, INVALID_M2P_ENTRY);
  51.224      shadow_unlock(d);
  51.225 @@ -684,22 +684,22 @@ get_shadow_ref(unsigned long smfn)
  51.226  {
  51.227      u32 x, nx;
  51.228  
  51.229 -    ASSERT(pfn_valid(smfn));
  51.230 +    ASSERT(mfn_valid(smfn));
  51.231  
  51.232 -    x = pfn_to_page(smfn)->count_info;
  51.233 +    x = mfn_to_page(smfn)->count_info;
  51.234      nx = x + 1;
  51.235  
  51.236      if ( unlikely(nx == 0) )
  51.237      {
  51.238          printk("get_shadow_ref overflow, gmfn=%" PRtype_info  " smfn=%lx\n",
  51.239 -               pfn_to_page(smfn)->u.inuse.type_info & PGT_mfn_mask,
  51.240 +               mfn_to_page(smfn)->u.inuse.type_info & PGT_mfn_mask,
  51.241                 smfn);
  51.242          BUG();
  51.243      }
  51.244      
  51.245      // Guarded by the shadow lock...
  51.246      //
  51.247 -    pfn_to_page(smfn)->count_info = nx;
  51.248 +    mfn_to_page(smfn)->count_info = nx;
  51.249  
  51.250      return 1;
  51.251  }
  51.252 @@ -714,9 +714,9 @@ put_shadow_ref(unsigned long smfn)
  51.253  {
  51.254      u32 x, nx;
  51.255  
  51.256 -    ASSERT(pfn_valid(smfn));
  51.257 +    ASSERT(mfn_valid(smfn));
  51.258  
  51.259 -    x = pfn_to_page(smfn)->count_info;
  51.260 +    x = mfn_to_page(smfn)->count_info;
  51.261      nx = x - 1;
  51.262  
  51.263      if ( unlikely(x == 0) )
  51.264 @@ -724,14 +724,14 @@ put_shadow_ref(unsigned long smfn)
  51.265          printk("put_shadow_ref underflow, smfn=%lx oc=%08x t=%" 
  51.266                 PRtype_info "\n",
  51.267                 smfn,
  51.268 -               pfn_to_page(smfn)->count_info,
  51.269 -               pfn_to_page(smfn)->u.inuse.type_info);
  51.270 +               mfn_to_page(smfn)->count_info,
  51.271 +               mfn_to_page(smfn)->u.inuse.type_info);
  51.272          BUG();
  51.273      }
  51.274  
  51.275      // Guarded by the shadow lock...
  51.276      //
  51.277 -    pfn_to_page(smfn)->count_info = nx;
  51.278 +    mfn_to_page(smfn)->count_info = nx;
  51.279  
  51.280      if ( unlikely(nx == 0) )
  51.281      {
  51.282 @@ -742,9 +742,9 @@ put_shadow_ref(unsigned long smfn)
  51.283  static inline void
  51.284  shadow_pin(unsigned long smfn)
  51.285  {
  51.286 -    ASSERT( !(pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
  51.287 +    ASSERT( !(mfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
  51.288  
  51.289 -    pfn_to_page(smfn)->u.inuse.type_info |= PGT_pinned;
  51.290 +    mfn_to_page(smfn)->u.inuse.type_info |= PGT_pinned;
  51.291      if ( unlikely(!get_shadow_ref(smfn)) )
  51.292          BUG();
  51.293  }
  51.294 @@ -752,9 +752,9 @@ shadow_pin(unsigned long smfn)
  51.295  static inline void
  51.296  shadow_unpin(unsigned long smfn)
  51.297  {
  51.298 -    ASSERT( (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
  51.299 +    ASSERT( (mfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
  51.300  
  51.301 -    pfn_to_page(smfn)->u.inuse.type_info &= ~PGT_pinned;
  51.302 +    mfn_to_page(smfn)->u.inuse.type_info &= ~PGT_pinned;
  51.303      put_shadow_ref(smfn);
  51.304  }
  51.305  
  51.306 @@ -770,9 +770,9 @@ static inline void set_guest_back_ptr(
  51.307  
  51.308          ASSERT(shadow_lock_is_acquired(d));
  51.309          gmfn = l1e_get_pfn(spte);
  51.310 -        pfn_to_page(gmfn)->tlbflush_timestamp = smfn;
  51.311 -        pfn_to_page(gmfn)->u.inuse.type_info &= ~PGT_va_mask;
  51.312 -        pfn_to_page(gmfn)->u.inuse.type_info |= (unsigned long) index << PGT_va_shift;
  51.313 +        mfn_to_page(gmfn)->tlbflush_timestamp = smfn;
  51.314 +        mfn_to_page(gmfn)->u.inuse.type_info &= ~PGT_va_mask;
  51.315 +        mfn_to_page(gmfn)->u.inuse.type_info |= (unsigned long) index << PGT_va_shift;
  51.316      }
  51.317  }
  51.318  
  51.319 @@ -790,7 +790,7 @@ static inline int l1pte_write_fault(
  51.320      l1_pgentry_t gpte = *gpte_p;
  51.321      l1_pgentry_t spte;
  51.322      unsigned long gpfn = l1e_get_pfn(gpte);
  51.323 -    unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
  51.324 +    unsigned long gmfn = gmfn_to_mfn(d, gpfn);
  51.325  
  51.326      //printk("l1pte_write_fault gmfn=%lx\n", gmfn);
  51.327  
  51.328 @@ -825,7 +825,7 @@ static inline int l1pte_read_fault(
  51.329      l1_pgentry_t gpte = *gpte_p;
  51.330      l1_pgentry_t spte = *spte_p;
  51.331      unsigned long pfn = l1e_get_pfn(gpte);
  51.332 -    unsigned long mfn = __gpfn_to_mfn(d, pfn);
  51.333 +    unsigned long mfn = gmfn_to_mfn(d, pfn);
  51.334  
  51.335      if ( unlikely(!VALID_MFN(mfn)) )
  51.336      {
  51.337 @@ -862,7 +862,7 @@ static inline void l1pte_propagate_from_
  51.338  
  51.339      if ( ((guest_l1e_get_flags(gpte) & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
  51.340            (_PAGE_PRESENT|_PAGE_ACCESSED)) &&
  51.341 -         VALID_MFN(mfn = __gpfn_to_mfn(d, l1e_get_pfn(gpte))) )
  51.342 +         VALID_MFN(mfn = gmfn_to_mfn(d, l1e_get_pfn(gpte))) )
  51.343      {
  51.344          spte = l1e_from_pfn(
  51.345              mfn, guest_l1e_get_flags(gpte) & ~(_PAGE_GLOBAL | _PAGE_AVAIL));
  51.346 @@ -893,7 +893,7 @@ static inline void hl2e_propagate_from_g
  51.347  
  51.348      if ( l2e_get_flags(gpde) & _PAGE_PRESENT )
  51.349      {
  51.350 -        mfn = __gpfn_to_mfn(d, pfn);
  51.351 +        mfn = gmfn_to_mfn(d, pfn);
  51.352          if ( VALID_MFN(mfn) && (mfn < max_page) )
  51.353              hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
  51.354      }
  51.355 @@ -979,7 +979,7 @@ validate_pte_change(
  51.356              //
  51.357              perfc_incrc(validate_pte_changes2);
  51.358              if ( likely(l1e_get_flags(new_spte) & _PAGE_PRESENT) )
  51.359 -                shadow_put_page_type(d, pfn_to_page(l1e_get_pfn(new_spte)));
  51.360 +                shadow_put_page_type(d, mfn_to_page(l1e_get_pfn(new_spte)));
  51.361          }
  51.362          else if ( ((l1e_get_flags(old_spte) | l1e_get_flags(new_spte)) &
  51.363                     _PAGE_PRESENT ) &&
  51.364 @@ -1035,11 +1035,11 @@ validate_hl2e_change(
  51.365          perfc_incrc(validate_hl2e_changes);
  51.366  
  51.367          if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
  51.368 -             !get_page(pfn_to_page(l1e_get_pfn(new_hl2e)), d) )
  51.369 +             !get_page(mfn_to_page(l1e_get_pfn(new_hl2e)), d) )
  51.370              new_hl2e = l1e_empty();
  51.371          if ( l1e_get_flags(old_hl2e) & _PAGE_PRESENT )
  51.372          {
  51.373 -            put_page(pfn_to_page(l1e_get_pfn(old_hl2e)));
  51.374 +            put_page(mfn_to_page(l1e_get_pfn(old_hl2e)));
  51.375              need_flush = 1;
  51.376          }
  51.377      }
  51.378 @@ -1234,7 +1234,7 @@ static inline unsigned long __shadow_sta
  51.379      struct domain *d, unsigned long gpfn, unsigned long stype)
  51.380  {
  51.381      unsigned long gmfn = ((current->domain == d)
  51.382 -                          ? __gpfn_to_mfn(d, gpfn)
  51.383 +                          ? gmfn_to_mfn(d, gpfn)
  51.384                            : INVALID_MFN);
  51.385  
  51.386      ASSERT(shadow_lock_is_acquired(d));
  51.387 @@ -1254,8 +1254,8 @@ static inline unsigned long __shadow_sta
  51.388              printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%" PRtype_info " "
  51.389                     "mfn_out_of_sync(gmfn)=%d mfn_is_page_table(gmfn)=%d\n",
  51.390                     d->domain_id, gpfn, gmfn, stype,
  51.391 -                   pfn_to_page(gmfn)->count_info,
  51.392 -                   pfn_to_page(gmfn)->u.inuse.type_info,
  51.393 +                   mfn_to_page(gmfn)->count_info,
  51.394 +                   mfn_to_page(gmfn)->u.inuse.type_info,
  51.395                     mfn_out_of_sync(gmfn), mfn_is_page_table(gmfn));
  51.396              BUG();
  51.397          }
  51.398 @@ -1407,7 +1407,7 @@ static inline void delete_shadow_status(
  51.399   found:
  51.400      // release ref to page
  51.401      if ( stype != PGT_writable_pred )
  51.402 -        put_page(pfn_to_page(gmfn));
  51.403 +        put_page(mfn_to_page(gmfn));
  51.404  
  51.405      shadow_audit(d, 0);
  51.406  }
  51.407 @@ -1446,7 +1446,7 @@ static inline void set_shadow_status(
  51.408      //       is given away by the domain?
  51.409      //
  51.410      if ( stype != PGT_writable_pred )
  51.411 -        get_page(pfn_to_page(gmfn), d);
  51.412 +        get_page(mfn_to_page(gmfn), d);
  51.413  
  51.414      /*
  51.415       * STEP 1. If page is already in the table, update it in place.
  51.416 @@ -1459,7 +1459,7 @@ static inline void set_shadow_status(
  51.417                  BUG(); // we should never replace entries into the hash table
  51.418              x->smfn = smfn;
  51.419              if ( stype != PGT_writable_pred )
  51.420 -                put_page(pfn_to_page(gmfn)); // already had a ref...
  51.421 +                put_page(mfn_to_page(gmfn)); // already had a ref...
  51.422              goto done;
  51.423          }
  51.424  
  51.425 @@ -1535,7 +1535,7 @@ static inline void set_shadow_status(
  51.426  void static inline
  51.427  shadow_update_min_max(unsigned long smfn, int index)
  51.428  {
  51.429 -    struct pfn_info *sl1page = pfn_to_page(smfn);
  51.430 +    struct page_info *sl1page = mfn_to_page(smfn);
  51.431      u32 min_max = sl1page->tlbflush_timestamp;
  51.432      int min = SHADOW_MIN(min_max);
  51.433      int max = SHADOW_MAX(min_max);
  51.434 @@ -1634,8 +1634,8 @@ shadow_mode_page_writable(unsigned long 
  51.435  {
  51.436      struct vcpu *v = current;
  51.437      struct domain *d = v->domain;
  51.438 -    unsigned long mfn = __gpfn_to_mfn(d, gpfn);
  51.439 -    u32 type = pfn_to_page(mfn)->u.inuse.type_info & PGT_type_mask;
  51.440 +    unsigned long mfn = gmfn_to_mfn(d, gpfn);
  51.441 +    u32 type = mfn_to_page(mfn)->u.inuse.type_info & PGT_type_mask;
  51.442  
  51.443      if ( shadow_mode_refcounts(d) &&
  51.444           (type == PGT_writable_page) )
    52.1 --- a/xen/include/asm-x86/shadow_public.h	Wed Feb 01 15:01:04 2006 +0000
    52.2 +++ b/xen/include/asm-x86/shadow_public.h	Wed Feb 01 16:28:50 2006 +0100
    52.3 @@ -22,14 +22,14 @@
    52.4  #ifndef _XEN_SHADOW_PUBLIC_H
    52.5  #define _XEN_SHADOW_PUBLIC_H
    52.6  #if CONFIG_PAGING_LEVELS >= 3
    52.7 -#define MFN_PINNED(_x) (pfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
    52.8 +#define MFN_PINNED(_x) (mfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
    52.9  
   52.10  extern int alloc_p2m_table(struct domain *d);
   52.11  
   52.12  extern void shadow_sync_and_drop_references(
   52.13 -      struct domain *d, struct pfn_info *page);
   52.14 +      struct domain *d, struct page_info *page);
   52.15  extern void shadow_drop_references(
   52.16 -      struct domain *d, struct pfn_info *page);
   52.17 +      struct domain *d, struct page_info *page);
   52.18  
   52.19  extern int shadow_set_guest_paging_levels(struct domain *d, int levels);
   52.20  
    53.1 --- a/xen/include/asm-x86/types.h	Wed Feb 01 15:01:04 2006 +0000
    53.2 +++ b/xen/include/asm-x86/types.h	Wed Feb 01 16:28:50 2006 +0100
    53.3 @@ -37,17 +37,17 @@ typedef unsigned int u32;
    53.4  typedef signed long long s64;
    53.5  typedef unsigned long long u64;
    53.6  #if defined(CONFIG_X86_PAE)
    53.7 -typedef u64 physaddr_t;
    53.8 -#define PRIphysaddr "016llx"
    53.9 +typedef u64 paddr_t;
   53.10 +#define PRIpaddr "016llx"
   53.11  #else
   53.12 -typedef unsigned long physaddr_t;
   53.13 -#define PRIphysaddr "08lx"
   53.14 +typedef unsigned long paddr_t;
   53.15 +#define PRIpaddr "08lx"
   53.16  #endif
   53.17  #elif defined(__x86_64__)
   53.18  typedef signed long s64;
   53.19  typedef unsigned long u64;
   53.20 -typedef unsigned long physaddr_t;
   53.21 -#define PRIphysaddr "016lx"
   53.22 +typedef unsigned long paddr_t;
   53.23 +#define PRIpaddr "016lx"
   53.24  #endif
   53.25  
   53.26  typedef unsigned long size_t;
    54.1 --- a/xen/include/xen/domain_page.h	Wed Feb 01 15:01:04 2006 +0000
    54.2 +++ b/xen/include/xen/domain_page.h	Wed Feb 01 16:28:50 2006 +0100
    54.3 @@ -96,10 +96,10 @@ domain_mmap_cache_destroy(struct domain_
    54.4  
    54.5  #else /* !CONFIG_DOMAIN_PAGE */
    54.6  
    54.7 -#define map_domain_page(pfn)                phys_to_virt((pfn)<<PAGE_SHIFT)
    54.8 +#define map_domain_page(pfn)                maddr_to_virt((pfn)<<PAGE_SHIFT)
    54.9  #define unmap_domain_page(va)               ((void)(va))
   54.10  
   54.11 -#define map_domain_page_global(pfn)         phys_to_virt((pfn)<<PAGE_SHIFT)
   54.12 +#define map_domain_page_global(pfn)         maddr_to_virt((pfn)<<PAGE_SHIFT)
   54.13  #define unmap_domain_page_global(va)        ((void)(va))
   54.14  
   54.15  struct domain_mmap_cache { 
    55.1 --- a/xen/include/xen/mm.h	Wed Feb 01 15:01:04 2006 +0000
    55.2 +++ b/xen/include/xen/mm.h	Wed Feb 01 16:28:50 2006 +0100
    55.3 @@ -1,3 +1,29 @@
    55.4 +/******************************************************************************
    55.5 + * include/xen/mm.h
    55.6 + * 
    55.7 + * Definitions for memory pages, frame numbers, addresses, allocations, etc.
    55.8 + * 
    55.9 + * Note that Xen must handle several different physical 'address spaces' and
   55.10 + * there is a consistent terminology for these:
   55.11 + * 
   55.12 + * 1. gpfn/gpaddr: A guest-specific pseudo-physical frame number or address.
   55.13 + * 2. gmfn/gmaddr: A machine address from the p.o.v. of a particular guest.
   55.14 + * 3. mfn/maddr:   A real machine frame number or address.
   55.15 + * 4. pfn/paddr:   Used in 'polymorphic' functions that work across all
   55.16 + *                 address spaces, depending on context. See the pagetable
   55.17 + *                 conversion macros in asm-x86/page.h for examples.
   55.18 + *                 Also 'paddr_t' is big enough to store any physical address.
   55.19 + * 
   55.20 + * This scheme provides consistent function and variable names even when
   55.21 + * different guests are running in different memory-management modes.
   55.22 + * 1. A guest running in auto-translated mode (e.g., shadow_mode_translate())
   55.23 + *    will have gpfn == gmfn and gmfn != mfn.
   55.24 + * 2. A paravirtualised x86 guest will have gpfn != gmfn and gmfn == mfn.
   55.25 + * 3. A paravirtualised guest with no pseudophysical overlay will have
   55.26 + *    gpfn == gpmfn == mfn.
   55.27 + * 
   55.28 + * Copyright (c) 2002-2006, K A Fraser <keir@xensource.com>
   55.29 + */
   55.30  
   55.31  #ifndef __XEN_MM_H__
   55.32  #define __XEN_MM_H__
   55.33 @@ -8,34 +34,34 @@
   55.34  #include <xen/spinlock.h>
   55.35  
   55.36  struct domain;
   55.37 -struct pfn_info;
   55.38 +struct page_info;
   55.39  
   55.40  /* Boot-time allocator. Turns into generic allocator after bootstrap. */
   55.41 -physaddr_t init_boot_allocator(physaddr_t bitmap_start);
   55.42 -void init_boot_pages(physaddr_t ps, physaddr_t pe);
   55.43 +paddr_t init_boot_allocator(paddr_t bitmap_start);
   55.44 +void init_boot_pages(paddr_t ps, paddr_t pe);
   55.45  unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
   55.46  void end_boot_allocator(void);
   55.47  
   55.48  /* Generic allocator. These functions are *not* interrupt-safe. */
   55.49  void init_heap_pages(
   55.50 -    unsigned int zone, struct pfn_info *pg, unsigned long nr_pages);
   55.51 -struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order);
   55.52 +    unsigned int zone, struct page_info *pg, unsigned long nr_pages);
   55.53 +struct page_info *alloc_heap_pages(unsigned int zone, unsigned int order);
   55.54  void free_heap_pages(
   55.55 -    unsigned int zone, struct pfn_info *pg, unsigned int order);
   55.56 +    unsigned int zone, struct page_info *pg, unsigned int order);
   55.57  void scrub_heap_pages(void);
   55.58  
   55.59  /* Xen suballocator. These functions are interrupt-safe. */
   55.60 -void init_xenheap_pages(physaddr_t ps, physaddr_t pe);
   55.61 +void init_xenheap_pages(paddr_t ps, paddr_t pe);
   55.62  void *alloc_xenheap_pages(unsigned int order);
   55.63  void free_xenheap_pages(void *v, unsigned int order);
   55.64  #define alloc_xenheap_page() (alloc_xenheap_pages(0))
   55.65  #define free_xenheap_page(v) (free_xenheap_pages(v,0))
   55.66  
   55.67  /* Domain suballocator. These functions are *not* interrupt-safe.*/
   55.68 -void init_domheap_pages(physaddr_t ps, physaddr_t pe);
   55.69 -struct pfn_info *alloc_domheap_pages(
   55.70 +void init_domheap_pages(paddr_t ps, paddr_t pe);
   55.71 +struct page_info *alloc_domheap_pages(
   55.72      struct domain *d, unsigned int order, unsigned int flags);
   55.73 -void free_domheap_pages(struct pfn_info *pg, unsigned int order);
   55.74 +void free_domheap_pages(struct page_info *pg, unsigned int order);
   55.75  unsigned long avail_domheap_pages(void);
   55.76  #define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
   55.77  #define free_domheap_page(p)  (free_domheap_pages(p,0))