ia64/xen-unstable

changeset 10423:d60da6514d65

[IA64] fix races caused by p2m entry update

fixed some races in ia64_do_page_fault(), vcpu_itc_i(), vcpu_itc_d() and vcpu_fc().
introduce struct p2m_entry and check it later and try it again.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Mon Jun 19 13:00:37 2006 -0600 (2006-06-19)
parents 7e26d6ffdde7
children 535b466ee1ef
files xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vtlb.c xen/arch/ia64/xen/faults.c xen/arch/ia64/xen/fw_emul.c xen/arch/ia64/xen/mm.c xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/mm.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Mon Jun 19 12:54:34 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Mon Jun 19 13:00:37 2006 -0600
     1.3 @@ -58,7 +58,6 @@
     1.4  
     1.5  extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
     1.6  extern void rnat_consumption (VCPU *vcpu);
     1.7 -extern unsigned long translate_domain_mpaddr(unsigned long mpaddr);
     1.8  extern void alt_itlb (VCPU *vcpu, u64 vadr);
     1.9  extern void itlb_fault (VCPU *vcpu, u64 vadr);
    1.10  extern void ivhpt_fault (VCPU *vcpu, u64 vadr);
     2.1 --- a/xen/arch/ia64/vmx/vtlb.c	Mon Jun 19 12:54:34 2006 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Mon Jun 19 13:00:37 2006 -0600
     2.3 @@ -425,7 +425,7 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
     2.4      phy_pte.val = *pte;
     2.5      addr = *pte;
     2.6      addr = ((addr & _PAGE_PPN_MASK)>>ps<<ps)|(va&((1UL<<ps)-1));
     2.7 -    addr = lookup_domain_mpa(v->domain, addr);
     2.8 +    addr = lookup_domain_mpa(v->domain, addr, NULL);
     2.9      if(addr & GPFN_IO_MASK){
    2.10          *pte |= VTLB_PTE_IO;
    2.11          return -1;
     3.1 --- a/xen/arch/ia64/xen/faults.c	Mon Jun 19 12:54:34 2006 -0600
     3.2 +++ b/xen/arch/ia64/xen/faults.c	Mon Jun 19 13:00:37 2006 -0600
     3.3 @@ -236,9 +236,10 @@ void ia64_do_page_fault (unsigned long a
     3.4  	fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
     3.5  	if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
     3.6  		u64 logps;
     3.7 -		pteval = translate_domain_pte(pteval, address, itir, &logps);
     3.8 +		struct p2m_entry entry;
     3.9 +		pteval = translate_domain_pte(pteval, address, itir, &logps, &entry);
    3.10  		vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
    3.11 -		if (read_seqretry(vtlb_lock, seq)) {
    3.12 +		if (read_seqretry(vtlb_lock, seq) || p2m_entry_retry(&entry)) {
    3.13  			vcpu_flush_tlb_vhpt_range(address & ((1 << logps) - 1),
    3.14  			                          logps);
    3.15  			goto again;
     4.1 --- a/xen/arch/ia64/xen/fw_emul.c	Mon Jun 19 12:54:34 2006 -0600
     4.2 +++ b/xen/arch/ia64/xen/fw_emul.c	Mon Jun 19 13:00:37 2006 -0600
     4.3 @@ -370,7 +370,7 @@ efi_translate_domain_addr(unsigned long 
     4.4  		if (*fault != IA64_NO_FAULT) return 0;
     4.5  	}
     4.6  
     4.7 -	return ((unsigned long) __va(translate_domain_mpaddr(mpaddr)));
     4.8 +	return ((unsigned long) __va(translate_domain_mpaddr(mpaddr, NULL)));
     4.9  }
    4.10  
    4.11  static efi_status_t
    4.12 @@ -549,7 +549,7 @@ do_ssc(unsigned long ssc, struct pt_regs
    4.13  	    case SSC_WAIT_COMPLETION:
    4.14  		if (arg0) {	// metaphysical address
    4.15  
    4.16 -			arg0 = translate_domain_mpaddr(arg0);
    4.17 +			arg0 = translate_domain_mpaddr(arg0, NULL);
    4.18  /**/			stat = (struct ssc_disk_stat *)__va(arg0);
    4.19  ///**/			if (stat->fd == last_fd) stat->count = last_count;
    4.20  /**/			stat->count = last_count;
    4.21 @@ -564,7 +564,7 @@ do_ssc(unsigned long ssc, struct pt_regs
    4.22  		arg1 = vcpu_get_gr(current,33);	// access rights
    4.23  if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware.  (ignoring...)\n"); arg0 = 0; }
    4.24  		if (arg0) {	// metaphysical address
    4.25 -			arg0 = translate_domain_mpaddr(arg0);
    4.26 +			arg0 = translate_domain_mpaddr(arg0, NULL);
    4.27  			retval = ia64_ssc(arg0,arg1,0,0,ssc);
    4.28  		}
    4.29  		else retval = -1L;
    4.30 @@ -581,7 +581,7 @@ if (!running_on_sim) { printf("SSC_OPEN,
    4.31  			unsigned long mpaddr;
    4.32  			long len;
    4.33  
    4.34 -			arg2 = translate_domain_mpaddr(arg2);
    4.35 +			arg2 = translate_domain_mpaddr(arg2, NULL);
    4.36  			req = (struct ssc_disk_req *) __va(arg2);
    4.37  			req->len &= 0xffffffffL;	// avoid strange bug
    4.38  			len = req->len;
    4.39 @@ -592,7 +592,7 @@ if (!running_on_sim) { printf("SSC_OPEN,
    4.40  			retval = 0;
    4.41  			if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
    4.42  				// do partial page first
    4.43 -				req->addr = translate_domain_mpaddr(mpaddr);
    4.44 +				req->addr = translate_domain_mpaddr(mpaddr, NULL);
    4.45  				req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
    4.46  				len -= req->len; mpaddr += req->len;
    4.47  				retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
    4.48 @@ -602,7 +602,7 @@ if (!running_on_sim) { printf("SSC_OPEN,
    4.49  //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
    4.50  			}
    4.51  			if (retval >= 0) while (len > 0) {
    4.52 -				req->addr = translate_domain_mpaddr(mpaddr);
    4.53 +				req->addr = translate_domain_mpaddr(mpaddr, NULL);
    4.54  				req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
    4.55  				len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
    4.56  				retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
     5.1 --- a/xen/arch/ia64/xen/mm.c	Mon Jun 19 12:54:34 2006 -0600
     5.2 +++ b/xen/arch/ia64/xen/mm.c	Mon Jun 19 13:00:37 2006 -0600
     5.3 @@ -245,7 +245,7 @@ gmfn_to_mfn_foreign(struct domain *d, un
     5.4  	if (d == dom0)
     5.5  		return(gpfn);
     5.6  #endif
     5.7 -	pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
     5.8 +	pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT, NULL);
     5.9  	if (!pte) {
    5.10  		panic("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
    5.11  	}
    5.12 @@ -256,7 +256,8 @@ gmfn_to_mfn_foreign(struct domain *d, un
    5.13  // address, convert the pte for a physical address for (possibly different)
    5.14  // Xen PAGE_SIZE and return modified pte.  (NOTE: TLB insert should use
    5.15  // PAGE_SIZE!)
    5.16 -u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps)
    5.17 +u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps,
    5.18 +                         struct p2m_entry* entry)
    5.19  {
    5.20  	struct domain *d = current->domain;
    5.21  	ia64_itir_t itir = {.itir = itir__};
    5.22 @@ -298,7 +299,7 @@ u64 translate_domain_pte(u64 pteval, u64
    5.23  			       address, pteval, itir.itir);
    5.24  	}
    5.25  #endif
    5.26 -	pteval2 = lookup_domain_mpa(d,mpaddr);
    5.27 +	pteval2 = lookup_domain_mpa(d, mpaddr, entry);
    5.28  	arflags  = pteval  & _PAGE_AR_MASK;
    5.29  	arflags2 = pteval2 & _PAGE_AR_MASK;
    5.30  	if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {
    5.31 @@ -311,7 +312,7 @@ u64 translate_domain_pte(u64 pteval, u64
    5.32  		        pteval2, arflags2, mpaddr);
    5.33  #endif
    5.34  		pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;
    5.35 -}
    5.36 +    }
    5.37  
    5.38  	pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
    5.39  	pteval2 |= (pteval & _PAGE_ED);
    5.40 @@ -321,7 +322,8 @@ u64 translate_domain_pte(u64 pteval, u64
    5.41  }
    5.42  
    5.43  // given a current domain metaphysical address, return the physical address
    5.44 -unsigned long translate_domain_mpaddr(unsigned long mpaddr)
    5.45 +unsigned long translate_domain_mpaddr(unsigned long mpaddr,
    5.46 +                                      struct p2m_entry* entry)
    5.47  {
    5.48  	unsigned long pteval;
    5.49  
    5.50 @@ -333,7 +335,7 @@ unsigned long translate_domain_mpaddr(un
    5.51  		}
    5.52  	}
    5.53  #endif
    5.54 -	pteval = lookup_domain_mpa(current->domain,mpaddr);
    5.55 +	pteval = lookup_domain_mpa(current->domain, mpaddr, entry);
    5.56  	return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
    5.57  }
    5.58  
    5.59 @@ -484,23 +486,10 @@ unsigned long
    5.60          return GPFN_INV_MASK;
    5.61      return INVALID_MFN;
    5.62  }
    5.63 -
    5.64 -unsigned long
    5.65 -__lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
    5.66 -{
    5.67 -    unsigned long machine = ____lookup_domain_mpa(d, mpaddr);
    5.68 -    if (machine != INVALID_MFN)
    5.69 -        return machine;
    5.70 -
    5.71 -    printk("%s: d 0x%p id %d current 0x%p id %d\n",
    5.72 -           __func__, d, d->domain_id, current, current->vcpu_id);
    5.73 -    printk("%s: bad mpa 0x%lx (max_pages 0x%lx)\n",
    5.74 -           __func__, mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
    5.75 -    return INVALID_MFN;
    5.76 -}
    5.77  #endif
    5.78  
    5.79 -unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
    5.80 +unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr,
    5.81 +                                struct p2m_entry* entry)
    5.82  {
    5.83      volatile pte_t *pte;
    5.84  
    5.85 @@ -521,6 +510,8 @@ unsigned long lookup_domain_mpa(struct d
    5.86          pte_t tmp_pte = *pte;// pte is volatile. copy the value.
    5.87          if (pte_present(tmp_pte)) {
    5.88  //printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte));
    5.89 +            if (entry != NULL)
    5.90 +                p2m_entry_set(entry, pte, tmp_pte);
    5.91              return pte_val(tmp_pte);
    5.92          } else if (VMX_DOMAIN(d->vcpu[0]))
    5.93              return GPFN_INV_MASK;
    5.94 @@ -535,6 +526,8 @@ unsigned long lookup_domain_mpa(struct d
    5.95          printk("%s: bad mpa 0x%lx (=> 0x%lx)\n", __func__,
    5.96                 mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
    5.97  
    5.98 +    if (entry != NULL)
    5.99 +        p2m_entry_set(entry, NULL, __pte(0));
   5.100      //XXX This is a work around until the emulation memory access to a region
   5.101      //    where memory or device are attached is implemented.
   5.102      return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
   5.103 @@ -544,7 +537,7 @@ unsigned long lookup_domain_mpa(struct d
   5.104  #if 1
   5.105  void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
   5.106  {
   5.107 -    unsigned long pte = lookup_domain_mpa(d,mpaddr);
   5.108 +    unsigned long pte = lookup_domain_mpa(d, mpaddr, NULL);
   5.109      unsigned long imva;
   5.110  
   5.111      pte &= _PAGE_PPN_MASK;
     6.1 --- a/xen/arch/ia64/xen/vcpu.c	Mon Jun 19 12:54:34 2006 -0600
     6.2 +++ b/xen/arch/ia64/xen/vcpu.c	Mon Jun 19 13:00:37 2006 -0600
     6.3 @@ -29,7 +29,6 @@ extern void getfpreg (unsigned long regn
     6.4  extern void setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs);
     6.5  
     6.6  extern void panic_domain(struct pt_regs *, const char *, ...);
     6.7 -extern unsigned long translate_domain_mpaddr(unsigned long);
     6.8  extern IA64_BUNDLE __get_domain_bundle(UINT64);
     6.9  
    6.10  typedef	union {
    6.11 @@ -1978,18 +1977,24 @@ IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 
    6.12  {
    6.13  	unsigned long pteval, logps = itir_ps(itir);
    6.14  	BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
    6.15 +	struct p2m_entry entry;
    6.16  
    6.17  	if (logps < PAGE_SHIFT) {
    6.18  		printf("vcpu_itc_d: domain trying to use smaller page size!\n");
    6.19  		//FIXME: kill domain here
    6.20  		while(1);
    6.21  	}
    6.22 +again:
    6.23  	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
    6.24 -	pteval = translate_domain_pte(pte, ifa, itir, &logps);
    6.25 +	pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
    6.26  	if (!pteval) return IA64_ILLOP_FAULT;
    6.27  	if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
    6.28  	vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
    6.29  	if (swap_rr0) set_metaphysical_rr0();
    6.30 +	if (p2m_entry_retry(&entry)) {
    6.31 +		vcpu_flush_tlb_vhpt_range(ifa & ((1 << logps) - 1), logps);
    6.32 +		goto again;
    6.33 +	}
    6.34  	return IA64_NO_FAULT;
    6.35  }
    6.36  
    6.37 @@ -1997,6 +2002,7 @@ IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 
    6.38  {
    6.39  	unsigned long pteval, logps = itir_ps(itir);
    6.40  	BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
    6.41 +	struct p2m_entry entry;
    6.42  
    6.43  	// FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
    6.44  	if (logps < PAGE_SHIFT) {
    6.45 @@ -2004,13 +2010,18 @@ IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 
    6.46  		//FIXME: kill domain here
    6.47  		while(1);
    6.48  	}
    6.49 +again:
    6.50  	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
    6.51 -	pteval = translate_domain_pte(pte, ifa, itir, &logps);
    6.52 +	pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
    6.53  	// FIXME: what to do if bad physical address? (machine check?)
    6.54  	if (!pteval) return IA64_ILLOP_FAULT;
    6.55  	if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
    6.56  	vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
    6.57  	if (swap_rr0) set_metaphysical_rr0();
    6.58 +	if (p2m_entry_retry(&entry)) {
    6.59 +		vcpu_flush_tlb_vhpt_range(ifa & ((1 << logps) - 1), logps);
    6.60 +		goto again;
    6.61 +	}
    6.62  	return IA64_NO_FAULT;
    6.63  }
    6.64  
    6.65 @@ -2040,10 +2051,14 @@ IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vad
    6.66  	UINT64 mpaddr, paddr;
    6.67  	IA64FAULT fault;
    6.68  
    6.69 +again:
    6.70  	fault = vcpu_tpa(vcpu, vadr, &mpaddr);
    6.71  	if (fault == IA64_NO_FAULT) {
    6.72 -		paddr = translate_domain_mpaddr(mpaddr);
    6.73 +		struct p2m_entry entry;
    6.74 +		paddr = translate_domain_mpaddr(mpaddr, &entry);
    6.75  		ia64_fc(__va(paddr));
    6.76 +		if (p2m_entry_retry(&entry))
    6.77 +			goto again;
    6.78  	}
    6.79  	return fault;
    6.80  }
     7.1 --- a/xen/include/asm-ia64/domain.h	Mon Jun 19 12:54:34 2006 -0600
     7.2 +++ b/xen/include/asm-ia64/domain.h	Mon Jun 19 13:00:37 2006 -0600
     7.3 @@ -12,10 +12,34 @@
     7.4  #include <xen/cpumask.h>
     7.5  #include <asm/fpswa.h>
     7.6  
     7.7 +struct p2m_entry {
     7.8 +    volatile pte_t*     pte;
     7.9 +    pte_t               used;
    7.10 +};
    7.11 +
    7.12 +static inline void
    7.13 +p2m_entry_set(struct p2m_entry* entry, volatile pte_t* pte, pte_t used)
    7.14 +{
    7.15 +    entry->pte  = pte;
    7.16 +    entry->used = used;
    7.17 +}
    7.18 +
    7.19 +static inline int
    7.20 +p2m_entry_retry(struct p2m_entry* entry)
    7.21 +{
    7.22 +    //XXX see lookup_domian_pte().
    7.23 +    //    NULL is set for invalid gpaddr for the time being.
    7.24 +    if (entry->pte == NULL)
    7.25 +        return 0;
    7.26 +
    7.27 +    return (pte_val(*entry->pte) != pte_val(entry->used));
    7.28 +}
    7.29 +
    7.30  extern void domain_relinquish_resources(struct domain *);
    7.31  
    7.32  /* given a current domain metaphysical address, return the physical address */
    7.33 -extern unsigned long translate_domain_mpaddr(unsigned long mpaddr);
    7.34 +extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
    7.35 +                                             struct p2m_entry* entry);
    7.36  
    7.37  /* Flush cache of domain d.
    7.38     If sync_only is true, only synchronize I&D caches,
     8.1 --- a/xen/include/asm-ia64/mm.h	Mon Jun 19 12:54:34 2006 -0600
     8.2 +++ b/xen/include/asm-ia64/mm.h	Mon Jun 19 13:00:37 2006 -0600
     8.3 @@ -150,8 +150,6 @@ extern unsigned long max_page;
     8.4  extern void __init init_frametable(void);
     8.5  void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
     8.6  
     8.7 -extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
     8.8 -
     8.9  static inline void put_page(struct page_info *page)
    8.10  {
    8.11      u32 nx, x, y = page->count_info;
    8.12 @@ -428,7 +426,8 @@ extern void assign_new_domain0_page(stru
    8.13  extern void __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags);
    8.14  extern void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
    8.15  extern void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags);
    8.16 -extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
    8.17 +struct p2m_entry;
    8.18 +extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr, struct p2m_entry* entry);
    8.19  extern void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr);
    8.20  
    8.21  #ifdef CONFIG_XEN_IA64_DOM0_VP
    8.22 @@ -436,7 +435,6 @@ extern unsigned long assign_domain_mmio_
    8.23  extern unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size, unsigned long flags);
    8.24  int domain_page_mapped(struct domain *d, unsigned long mpaddr);
    8.25  int efi_mmio(unsigned long physaddr, unsigned long size);
    8.26 -extern unsigned long __lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
    8.27  extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
    8.28  extern unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3);
    8.29  extern unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order);
    8.30 @@ -445,7 +443,7 @@ extern unsigned long dom0vp_add_physmap(
    8.31  
    8.32  extern volatile unsigned long *mpt_table;
    8.33  extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
    8.34 -extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps);
    8.35 +extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, struct p2m_entry* entry);
    8.36  #define machine_to_phys_mapping	mpt_table
    8.37  
    8.38  #define INVALID_M2P_ENTRY        (~0UL)
    8.39 @@ -466,7 +464,7 @@ extern u64 translate_domain_pte(u64 ptev
    8.40      gmfn_to_mfn_foreign((_d), (gpfn))
    8.41  
    8.42  #define __gpfn_invalid(_d, gpfn)			\
    8.43 -	(lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_INV_MASK)
    8.44 +	(lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL) & GPFN_INV_MASK)
    8.45  
    8.46  #define __gmfn_valid(_d, gpfn)	!__gpfn_invalid(_d, gpfn)
    8.47  
    8.48 @@ -474,7 +472,7 @@ extern u64 translate_domain_pte(u64 ptev
    8.49  #define __gpfn_is_io(_d, gpfn)				\
    8.50  ({                                          \
    8.51      u64 pte, ret=0;                                \
    8.52 -    pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT));      \
    8.53 +    pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL);	\
    8.54      if(!(pte&GPFN_INV_MASK))        \
    8.55          ret = pte & GPFN_IO_MASK;        \
    8.56      ret;                \
    8.57 @@ -483,7 +481,7 @@ extern u64 translate_domain_pte(u64 ptev
    8.58  #define __gpfn_is_mem(_d, gpfn)				\
    8.59  ({                                          \
    8.60      u64 pte, ret=0;                                \
    8.61 -    pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT));      \
    8.62 +    pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL);		   \
    8.63      if((!(pte&GPFN_INV_MASK))&&((pte & GPFN_IO_MASK)==GPFN_MEM))   \
    8.64          ret = 1;             \
    8.65      ret;                \