ia64/xen-unstable

changeset 8778:9eb9fa8a9933

[IA64] fix name conflict(map_domain_page)

one is defined in xen/include/xen/domain_page.h.
another is defined in xen/arch/ia64/xen/domain.c.
this patch renames one defined in xen/arch/ia64/xen/domain.c.

For consistency its family is also renamed.
map_new_domain_page() -> assign_new_domain_page()
map_domain_page() -> assign_domain_page()
map_domain_io_page() -> assign_domain_io_page()

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Thu Feb 09 13:35:11 2006 -0700 (2006-02-09)
parents 9c6cd777259b
children 7b2399cb6508
files xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/xen/domain.c xen/include/asm-ia64/grant_table.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Thu Feb 09 12:59:50 2006 -0700
     1.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Thu Feb 09 13:35:11 2006 -0700
     1.3 @@ -317,7 +317,7 @@ int vmx_alloc_contig_pages(struct domain
     1.4  	    for (j = io_ranges[i].start;
     1.5  		 j < io_ranges[i].start + io_ranges[i].size;
     1.6  		 j += PAGE_SIZE)
     1.7 -		map_domain_page(d, j, io_ranges[i].type);
     1.8 +		assign_domain_page(d, j, io_ranges[i].type);
     1.9  	}
    1.10  
    1.11  	conf_nr = VMX_CONFIG_PAGES(d);
    1.12 @@ -334,14 +334,14 @@ int vmx_alloc_contig_pages(struct domain
    1.13  	for (i = 0;
    1.14  	     i < (end < MMIO_START ? end : MMIO_START);
    1.15  	     i += PAGE_SIZE, pgnr++)
    1.16 -	    map_domain_page(d, i, pgnr << PAGE_SHIFT);
    1.17 +	    assign_domain_page(d, i, pgnr << PAGE_SHIFT);
    1.18  
    1.19  	/* Map normal memory beyond 4G */
    1.20  	if (unlikely(end > MMIO_START)) {
    1.21  	    start = 4 * MEM_G;
    1.22  	    end = start + (end - 3 * MEM_G);
    1.23  	    for (i = start; i < end; i += PAGE_SIZE, pgnr++)
    1.24 -		map_domain_page(d, i, pgnr << PAGE_SHIFT);
    1.25 +		assign_domain_page(d, i, pgnr << PAGE_SHIFT);
    1.26  	}
    1.27  
    1.28  	d->arch.max_pfn = end >> PAGE_SHIFT;
    1.29 @@ -356,7 +356,7 @@ int vmx_alloc_contig_pages(struct domain
    1.30  	/* Map guest firmware */
    1.31  	pgnr = page_to_mfn(page);
    1.32  	for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
    1.33 -	    map_domain_page(d, i, pgnr << PAGE_SHIFT);
    1.34 +	    assign_domain_page(d, i, pgnr << PAGE_SHIFT);
    1.35  
    1.36  	if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
    1.37  	    printk("Could not allocate order=1 pages for vmx contig alloc\n");
    1.38 @@ -365,9 +365,9 @@ int vmx_alloc_contig_pages(struct domain
    1.39  
    1.40  	/* Map for shared I/O page and xenstore */
    1.41  	pgnr = page_to_mfn(page);
    1.42 -	map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
    1.43 +	assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
    1.44  	pgnr++;
    1.45 -	map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
    1.46 +	assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
    1.47  
    1.48  	set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
    1.49  	return 0;
     2.1 --- a/xen/arch/ia64/xen/domain.c	Thu Feb 09 12:59:50 2006 -0700
     2.2 +++ b/xen/arch/ia64/xen/domain.c	Thu Feb 09 13:35:11 2006 -0700
     2.3 @@ -389,7 +389,7 @@ printk("map_new_domain0_page: start=%p,e
     2.4  }
     2.5  
     2.6  /* allocate new page for domain and map it to the specified metaphysical addr */
     2.7 -struct page * map_new_domain_page(struct domain *d, unsigned long mpaddr)
     2.8 +struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
     2.9  {
    2.10  	struct mm_struct *mm = d->arch.mm;
    2.11  	struct page *p = (struct page *)0;
    2.12 @@ -400,7 +400,7 @@ struct page * map_new_domain_page(struct
    2.13  extern unsigned long vhpt_paddr, vhpt_pend;
    2.14  
    2.15  	if (!mm->pgd) {
    2.16 -		printk("map_new_domain_page: domain pgd must exist!\n");
    2.17 +		printk("assign_new_domain_page: domain pgd must exist!\n");
    2.18  		return(p);
    2.19  	}
    2.20  	pgd = pgd_offset(mm,mpaddr);
    2.21 @@ -428,21 +428,21 @@ extern unsigned long vhpt_paddr, vhpt_pe
    2.22  			if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
    2.23  		}
    2.24  		if (unlikely(!p)) {
    2.25 -printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
    2.26 +printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
    2.27  			return(p);
    2.28  		}
    2.29  if (unlikely(page_to_maddr(p) > vhpt_paddr && page_to_maddr(p) < vhpt_pend)) {
    2.30 -  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
    2.31 +  printf("assign_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
    2.32  }
    2.33  		set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT,
    2.34  			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
    2.35  	}
    2.36 -	else printk("map_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    2.37 +	else printk("assign_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    2.38  	return p;
    2.39  }
    2.40  
    2.41  /* map a physical address to the specified metaphysical addr */
    2.42 -void map_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
    2.43 +void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
    2.44  {
    2.45  	struct mm_struct *mm = d->arch.mm;
    2.46  	pgd_t *pgd;
    2.47 @@ -451,7 +451,7 @@ void map_domain_page(struct domain *d, u
    2.48  	pte_t *pte;
    2.49  
    2.50  	if (!mm->pgd) {
    2.51 -		printk("map_domain_page: domain pgd must exist!\n");
    2.52 +		printk("assign_domain_page: domain pgd must exist!\n");
    2.53  		return;
    2.54  	}
    2.55  	pgd = pgd_offset(mm,mpaddr);
    2.56 @@ -472,11 +472,11 @@ void map_domain_page(struct domain *d, u
    2.57  		set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
    2.58  			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
    2.59  	}
    2.60 -	else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    2.61 +	else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    2.62  }
    2.63  #if 0
    2.64  /* map a physical address with specified I/O flag */
    2.65 -void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags)
    2.66 +void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags)
    2.67  {
    2.68  	struct mm_struct *mm = d->arch.mm;
    2.69  	pgd_t *pgd;
    2.70 @@ -486,7 +486,7 @@ void map_domain_io_page(struct domain *d
    2.71  	pte_t io_pte;
    2.72  
    2.73  	if (!mm->pgd) {
    2.74 -		printk("map_domain_page: domain pgd must exist!\n");
    2.75 +		printk("assign_domain_page: domain pgd must exist!\n");
    2.76  		return;
    2.77  	}
    2.78  	ASSERT(flags & GPFN_IO_MASK);
    2.79 @@ -509,7 +509,7 @@ void map_domain_io_page(struct domain *d
    2.80  		pte_val(io_pte) = flags;
    2.81  		set_pte(pte, io_pte);
    2.82  	}
    2.83 -	else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    2.84 +	else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
    2.85  }
    2.86  #endif
    2.87  void mpafoo(unsigned long mpaddr)
    2.88 @@ -557,7 +557,7 @@ tryagain:
    2.89  	}
    2.90  	/* if lookup fails and mpaddr is "legal", "create" the page */
    2.91  	if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
    2.92 -		if (map_new_domain_page(d,mpaddr)) goto tryagain;
    2.93 +		if (assign_new_domain_page(d,mpaddr)) goto tryagain;
    2.94  	}
    2.95  	printk("lookup_domain_mpa: bad mpa %p (> %p\n",
    2.96  		mpaddr,d->max_pages<<PAGE_SHIFT);
    2.97 @@ -655,7 +655,7 @@ void loaddomainelfimage(struct domain *d
    2.98  	else
    2.99  #endif
   2.100  	while (memsz > 0) {
   2.101 -		p = map_new_domain_page(d,dom_mpaddr);
   2.102 +		p = assign_new_domain_page(d,dom_mpaddr);
   2.103  		if (unlikely(!p)) BUG();
   2.104  		dom_imva = __va(page_to_maddr(p));
   2.105  		if (filesz > 0) {
     3.1 --- a/xen/include/asm-ia64/grant_table.h	Thu Feb 09 12:59:50 2006 -0700
     3.2 +++ b/xen/include/asm-ia64/grant_table.h	Thu Feb 09 13:35:11 2006 -0700
     3.3 @@ -17,7 +17,7 @@
     3.4  #define gnttab_shared_gmfn(d, t, i)                                     \
     3.5      ( ((d) == dom0) ?                                                   \
     3.6        ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i)) :              \
     3.7 -      (map_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)),       \
     3.8 +      (assign_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)),       \
     3.9         1UL << (40 - PAGE_SHIFT))                                        \
    3.10      )
    3.11