Also take the opportunity to convert arch/x86/debug.c to the typesafe gfn.
Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
Acked-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Kevin Tian <kevin.tian@intel.com>
}
/* If request to get default access. */
- if ( gfn_x(gfn) == INVALID_GFN )
+ if ( gfn_eq(gfn, INVALID_GFN) )
{
*access = memaccess[p2m->default_access];
return 0;
p2m->mem_access_enabled = true;
/* If request to set default access. */
- if ( gfn_x(gfn) == INVALID_GFN )
+ if ( gfn_eq(gfn, INVALID_GFN) )
{
p2m->default_access = a;
return 0;
/* Returns: mfn for the given (hvm guest) vaddr */
static mfn_t
-dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr,
- unsigned long *gfn)
+dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr, gfn_t *gfn)
{
mfn_t mfn;
uint32_t pfec = PFEC_page_present;
DBGP2("vaddr:%lx domid:%d\n", vaddr, dp->domain_id);
- *gfn = paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec);
- if ( *gfn == INVALID_GFN )
+ *gfn = _gfn(paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec));
+ if ( gfn_eq(*gfn, INVALID_GFN) )
{
DBGP2("kdb:bad gfn from gva_to_gfn\n");
return INVALID_MFN;
}
- mfn = get_gfn(dp, *gfn, &gfntype);
+ mfn = get_gfn(dp, gfn_x(*gfn), &gfntype);
if ( p2m_is_readonly(gfntype) && toaddr )
{
DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
if ( mfn_eq(mfn, INVALID_MFN) )
{
- put_gfn(dp, *gfn);
+ put_gfn(dp, gfn_x(*gfn));
*gfn = INVALID_GFN;
}
char *va;
unsigned long addr = (unsigned long)gaddr;
mfn_t mfn;
- unsigned long gfn = INVALID_GFN, pagecnt;
+ gfn_t gfn = INVALID_GFN;
+ unsigned long pagecnt;
pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
}
unmap_domain_page(va);
- if ( gfn != INVALID_GFN )
- put_gfn(dp, gfn);
+ if ( !gfn_eq(gfn, INVALID_GFN) )
+ put_gfn(dp, gfn_x(gfn));
addr += pagecnt;
buf += pagecnt;
* gfn == INVALID_GFN indicates that the shared_info page was never mapped
* to the domain's address space and there is nothing to replace.
*/
- if ( gfn == INVALID_GFN )
+ if ( gfn == gfn_x(INVALID_GFN) )
goto exit_put_page;
if ( mfn_x(get_gfn_query(d, gfn, &p2mt)) != mfn )
return rc;
pfn = _paddr >> PAGE_SHIFT;
}
- else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == INVALID_GFN )
+ else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) )
{
if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
return X86EMUL_RETRY;
npfn = paging_gva_to_gfn(curr, addr, &pfec);
/* Is it contiguous with the preceding PFNs? If not then we're done. */
- if ( (npfn == INVALID_GFN) || (npfn != (pfn + (reverse ? -i : i))) )
+ if ( (npfn == gfn_x(INVALID_GFN)) ||
+ (npfn != (pfn + (reverse ? -i : i))) )
{
if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
return X86EMUL_RETRY;
if ( done == 0 )
{
ASSERT(!reverse);
- if ( npfn != INVALID_GFN )
+ if ( npfn != gfn_x(INVALID_GFN) )
return X86EMUL_UNHANDLEABLE;
hvm_inject_page_fault(pfec, addr & PAGE_MASK);
return X86EMUL_EXCEPTION;
if ( flags & HVMCOPY_virt )
{
gfn = paging_gva_to_gfn(curr, addr, &pfec);
- if ( gfn == INVALID_GFN )
+ if ( gfn == gfn_x(INVALID_GFN) )
{
if ( pfec & PFEC_page_paged )
return HVMCOPY_gfn_paged_out;
count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
gfn = paging_gva_to_gfn(curr, addr, &pfec);
- if ( gfn == INVALID_GFN )
+ if ( gfn == gfn_x(INVALID_GFN) )
{
if ( pfec & PFEC_page_paged )
return HVMCOPY_gfn_paged_out;
a.u.enable_notify.vcpu_id != curr->vcpu_id )
rc = -EINVAL;
- if ( (gfn_x(vcpu_altp2m(curr).veinfo_gfn) != INVALID_GFN) ||
+ if ( !gfn_eq(vcpu_altp2m(curr).veinfo_gfn, INVALID_GFN) ||
mfn_eq(get_gfn_query_unlocked(curr->domain,
a.u.enable_notify.gfn, &p2mt), INVALID_MFN) )
return -EINVAL;
{
unsigned int i = gmfn - d->arch.hvm_domain.ioreq_gmfn.base;
- if ( gmfn != INVALID_GFN )
+ if ( gmfn != gfn_x(INVALID_GFN) )
set_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
}
if ( rc )
return rc;
- if ( bufioreq_pfn != INVALID_GFN )
+ if ( bufioreq_pfn != gfn_x(INVALID_GFN) )
rc = hvm_map_ioreq_page(s, 1, bufioreq_pfn);
if ( rc )
bool_t handle_bufioreq)
{
struct domain *d = s->domain;
- unsigned long ioreq_pfn = INVALID_GFN;
- unsigned long bufioreq_pfn = INVALID_GFN;
+ unsigned long ioreq_pfn = gfn_x(INVALID_GFN);
+ unsigned long bufioreq_pfn = gfn_x(INVALID_GFN);
int rc;
if ( is_default )
/* Walk the guest-supplied NPT table, just as if it were a pagetable */
gfn = paging_ga_to_gfn_cr3(v, nested_cr3, L2_gpa, &pfec, page_order);
- if ( gfn == INVALID_GFN )
+ if ( gfn == gfn_x(INVALID_GFN) )
return NESTEDHVM_PAGEFAULT_INJECT;
*L1_gpa = (gfn << PAGE_SHIFT) + (L2_gpa & ~PAGE_MASK);
static bool_t vmx_vcpu_emulate_ve(struct vcpu *v)
{
bool_t rc = 0, writable;
- unsigned long gfn = gfn_x(vcpu_altp2m(v).veinfo_gfn);
+ gfn_t gfn = vcpu_altp2m(v).veinfo_gfn;
ve_info_t *veinfo;
- if ( gfn == INVALID_GFN )
+ if ( gfn_eq(gfn, INVALID_GFN) )
return 0;
- veinfo = hvm_map_guest_frame_rw(gfn, 0, &writable);
+ veinfo = hvm_map_guest_frame_rw(gfn_x(gfn), 0, &writable);
if ( !veinfo )
return 0;
if ( !writable || veinfo->semaphore != 0 )
struct altp2mvcpu *av = &vcpu_altp2m(v);
av->p2midx = INVALID_ALTP2M;
- av->veinfo_gfn = _gfn(INVALID_GFN);
+ av->veinfo_gfn = INVALID_GFN;
}
void
if ( top_page )
put_page(top_page);
p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
- return INVALID_GFN;
+ return gfn_x(INVALID_GFN);
}
if ( p2m_is_shared(p2mt) )
{
pfec[0] = PFEC_page_shared;
if ( top_page )
put_page(top_page);
- return INVALID_GFN;
+ return gfn_x(INVALID_GFN);
}
if ( !top_page )
{
ASSERT(p2m_is_hostp2m(p2m));
pfec[0] = PFEC_page_paged;
p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
- return INVALID_GFN;
+ return gfn_x(INVALID_GFN);
}
if ( p2m_is_shared(p2mt) )
{
pfec[0] = PFEC_page_shared;
- return INVALID_GFN;
+ return gfn_x(INVALID_GFN);
}
if ( page_order )
if ( !hvm_nx_enabled(v) && !hvm_smep_enabled(v) )
pfec[0] &= ~PFEC_insn_fetch;
- return INVALID_GFN;
+ return gfn_x(INVALID_GFN);
}
ept_walk_t gw;
rwx_acc &= EPTE_RWX_MASK;
- *l1gfn = INVALID_GFN;
+ *l1gfn = gfn_x(INVALID_GFN);
rc = nept_walk_tables(v, l2ga, &gw);
switch ( rc )
unsigned int idx = (mrp->idx + i++) % ARRAY_SIZE(mrp->list);
unsigned long gfn = mrp->list[idx];
- if ( gfn != INVALID_GFN )
+ if ( gfn != gfn_x(INVALID_GFN) )
{
if ( gfn & POD_LAST_SUPERPAGE )
{
else
p2m_pod_zero_check(p2m, &gfn, 1);
- mrp->list[idx] = INVALID_GFN;
+ mrp->list[idx] = gfn_x(INVALID_GFN);
}
} while ( (p2m->pod.count == 0) && (i < ARRAY_SIZE(mrp->list)) );
{
struct pod_mrp_list *mrp = &p2m->pod.mrp;
- ASSERT(gfn != INVALID_GFN);
+ ASSERT(gfn != gfn_x(INVALID_GFN));
mrp->list[mrp->idx++] =
gfn | (order == PAGE_ORDER_2M ? POD_LAST_SUPERPAGE : 0);
p2m->np2m_base = P2M_BASE_EADDR;
for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i )
- p2m->pod.mrp.list[i] = INVALID_GFN;
+ p2m->pod.mrp.list[i] = gfn_x(INVALID_GFN);
if ( hap_enabled(d) && cpu_has_vmx )
ret = ept_p2m_init(p2m);
}
/* If request to set default access. */
- if ( gfn_x(gfn) == INVALID_GFN )
+ if ( gfn_eq(gfn, INVALID_GFN) )
{
p2m->default_access = a;
return 0;
};
/* If request to get default access. */
- if ( gfn_x(gfn) == INVALID_GFN )
+ if ( gfn_eq(gfn, INVALID_GFN) )
{
*access = memaccess[p2m->default_access];
return 0;
mode = paging_get_nestedmode(v);
l2_gfn = mode->gva_to_gfn(v, p2m, va, pfec);
- if ( l2_gfn == INVALID_GFN )
- return INVALID_GFN;
+ if ( l2_gfn == gfn_x(INVALID_GFN) )
+ return gfn_x(INVALID_GFN);
/* translate l2 guest gfn into l1 guest gfn */
rv = nestedhap_walk_L1_p2m(v, l2_gfn, &l1_gfn, &l1_page_order, &l1_p2ma,
!!(*pfec & PFEC_insn_fetch));
if ( rv != NESTEDHVM_PAGEFAULT_DONE )
- return INVALID_GFN;
+ return gfn_x(INVALID_GFN);
/*
* Sanity check that l1_gfn can be used properly as a 4K mapping, even
struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
struct ept_data *ept;
- p2m->min_remapped_gfn = INVALID_GFN;
+ p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
p2m->max_remapped_gfn = 0;
ept = &p2m->ept;
ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
mfn = ap2m->get_entry(ap2m, gfn_x(old_gfn), &t, &a, 0, NULL, NULL);
- if ( gfn_x(new_gfn) == INVALID_GFN )
+ if ( gfn_eq(new_gfn, INVALID_GFN) )
{
if ( mfn_valid(mfn) )
p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K);
/* Uninit and reinit ept to force TLB shootdown */
ept_p2m_uninit(p2m);
ept_p2m_init(p2m);
- p2m->min_remapped_gfn = INVALID_GFN;
+ p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
p2m->max_remapped_gfn = 0;
}
/* Translate the VA to a GFN. */
gfn = paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec);
- if ( gfn == INVALID_GFN )
+ if ( gfn == gfn_x(INVALID_GFN) )
{
if ( is_hvm_vcpu(v) )
hvm_inject_page_fault(pfec, vaddr);
*/
if ( is_hvm_vcpu(v) && !hvm_nx_enabled(v) && !hvm_smep_enabled(v) )
pfec[0] &= ~PFEC_insn_fetch;
- return INVALID_GFN;
+ return gfn_x(INVALID_GFN);
}
gfn = guest_walk_to_gfn(&gw);
unsigned long va, uint32_t pfec)
{
unsigned long page_number = va >> PAGE_SHIFT;
- unsigned long frame_number = INVALID_GFN;
+ unsigned long frame_number = gfn_x(INVALID_GFN);
int i = vtlb_hash(page_number);
spin_lock(&v->arch.paging.vtlb_lock);
unsigned long old_root_mfn;
struct domain_iommu *hd = dom_iommu(d);
- if ( gfn == INVALID_GFN )
+ if ( gfn == gfn_x(INVALID_GFN) )
return -EADDRNOTAVAIL;
ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
if ( iommu_domid == -1 )
continue;
- if ( page_count != 1 || gfn == INVALID_GFN )
+ if ( page_count != 1 || gfn == gfn_x(INVALID_GFN) )
rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
0, flush_dev_iotlb);
else
static int __must_check iommu_flush_iotlb_all(struct domain *d)
{
- return iommu_flush_iotlb(d, INVALID_GFN, 0, 0);
+ return iommu_flush_iotlb(d, gfn_x(INVALID_GFN), 0, 0);
}
/* clear one page's page table */
unsigned long mfn = page_to_mfn(page);
unsigned long gfn = mfn_to_gmfn(d, mfn);
- if ( gfn != INVALID_GFN )
+ if ( gfn != gfn_x(INVALID_GFN) )
{
ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
BUG_ON(SHARED_M2P(gfn));
#error GUEST_PAGING_LEVELS not defined
#endif
-#define VALID_GFN(m) (m != INVALID_GFN)
+#define VALID_GFN(m) (m != gfn_x(INVALID_GFN))
static inline int
valid_gfn(gfn_t m)
guest_walk_to_gfn(walk_t *gw)
{
if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) )
- return _gfn(INVALID_GFN);
+ return INVALID_GFN;
return guest_l1e_get_gfn(gw->l1e);
}
#define NR_POD_MRP_ENTRIES 32
/* Encode ORDER_2M superpage in top bit of GFN */
-#define POD_LAST_SUPERPAGE (INVALID_GFN & ~(INVALID_GFN >> 1))
+#define POD_LAST_SUPERPAGE (gfn_x(INVALID_GFN) & ~(gfn_x(INVALID_GFN) >> 1))
unsigned long list[NR_POD_MRP_ENTRIES];
unsigned int idx;
TYPE_SAFE(unsigned long, gfn);
#define PRI_gfn "05lx"
-#define INVALID_GFN (~0UL)
+#define INVALID_GFN _gfn(~0UL)
#ifndef gfn_t
#define gfn_t /* Grep fodder: gfn_t, _gfn() and gfn_x() are defined above */