{
struct vcpu* v = current;
struct domain *d = v->domain;
- mem_event_request_t req;
+ mem_event_request_t req = { .reason = reason };
int rc;
if ( !(p & HVMPME_MODE_MASK) )
else if ( rc < 0 )
return rc;
- memset(&req, 0, sizeof(req));
- req.reason = reason;
-
if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
{
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
p2m_type_t p2mt)
{
- mem_event_request_t req;
+ mem_event_request_t req = { .gfn = gfn };
/* We allow no ring in this unique case, because it won't affect
* correctness of the guest execution at this point. If this is the only
return;
/* Send release notification to pager */
- memset(&req, 0, sizeof(req));
- req.gfn = gfn;
req.flags = MEM_EVENT_FLAG_DROP_PAGE;
/* Update stats unless the page hasn't yet been evicted */
void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
{
struct vcpu *v = current;
- mem_event_request_t req;
+ mem_event_request_t req = { .gfn = gfn };
p2m_type_t p2mt;
p2m_access_t a;
mfn_t mfn;
else if ( rc < 0 )
return;
- memset(&req, 0, sizeof(req));
-
/* Fix p2m mapping */
gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
}
/* Send request to pager */
- req.gfn = gfn;
req.p2mt = p2mt;
req.vcpu_id = v->vcpu_id;