return page;
}
-bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
+bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
{
int rc;
- bool_t violation;
+ bool violation;
xenmem_access_t xma;
vm_event_request_t *req;
struct vcpu *v = current;
const vm_event_response_t *rsp)
{
xenmem_access_t access;
- bool violation = 1;
+ bool violation = true;
const struct vm_event_mem_access *data = &rsp->u.mem_access;
struct domain *d = v->domain;
struct p2m_domain *p2m = NULL;
break;
case XENMEM_access_rwx:
- violation = 0;
+ violation = false;
break;
}
}
return violation;
}
-bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
- struct npfec npfec,
- vm_event_request_t **req_ptr)
+bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
+ struct npfec npfec,
+ vm_event_request_t **req_ptr)
{
struct vcpu *v = current;
unsigned long gfn = gpa >> PAGE_SHIFT;
rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw, -1);
ASSERT(rc == 0);
gfn_unlock(p2m, gfn, 0);
- return 1;
+ return true;
}
else if ( p2ma == p2m_access_n2rwx )
{
"no vm_event listener VCPU %d, dom %d\n",
v->vcpu_id, d->domain_id);
domain_crash(v->domain);
- return 0;
+ return false;
}
else
{
ASSERT(rc == 0);
}
gfn_unlock(p2m, gfn, 0);
- return 1;
+ return true;
}
}
const vm_event_response_t *rsp)
{
/* Not supported on ARM. */
- return 0;
+ return false;
}
/* vm_event and mem_access are supported on any ARM guest */
-static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
+static inline bool p2m_mem_access_sanity_check(struct domain *d)
{
- return 1;
+ return true;
}
/*
* Send mem event based on the access. Boolean return value indicates if trap
* needs to be injected into guest.
*/
-bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec);
+bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec);
struct page_info*
p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
* ring. Once having released get_gfn* locks caller must also xfree the
* request.
*/
-bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
- struct npfec npfec,
- vm_event_request_t **req_ptr);
+bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
+ struct npfec npfec,
+ vm_event_request_t **req_ptr);
/* Check for emulation and mark vcpu for skipping one instruction
* upon rescheduling if required. */
const vm_event_response_t *rsp);
/* Sanity check for mem_access hardware support */
-static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
+static inline bool p2m_mem_access_sanity_check(struct domain *d)
{
return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
}