unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) -
(linear >> PAGE_SHIFT) + 1;
unsigned int i;
+ gfn_t gfn;
/*
* mfn points to the next free slot. All used slots have a page reference
ASSERT(mfn_x(*mfn) == 0);
res = hvm_translate_get_page(curr, addr, true, pfec,
- &pfinfo, &page, NULL, &p2mt);
+ &pfinfo, &page, &gfn, &p2mt);
switch ( res )
{
err = NULL;
goto out;
+ case HVMTRANS_need_retry:
+ /*
+ * hvm_translate_get_page() does not currently return
+ * HVMTRANS_need_retry.
+ */
+ ASSERT_UNREACHABLE();
+ /* fall through */
case HVMTRANS_gfn_paged_out:
case HVMTRANS_gfn_shared:
err = ERR_PTR(~X86EMUL_RETRY);
ASSERT(p2mt == p2m_ram_logdirty || !p2m_is_readonly(p2mt));
}
+
+ if ( unlikely(curr->arch.vm_event) &&
+ curr->arch.vm_event->send_event &&
+ hvm_monitor_check_p2m(addr, gfn, pfec, npfec_kind_with_gla) )
+ {
+ err = ERR_PTR(~X86EMUL_RETRY);
+ goto out;
+ }
}
/* Entire access within a single frame? */
case HVMTRANS_gfn_paged_out:
case HVMTRANS_gfn_shared:
+ case HVMTRANS_need_retry:
return X86EMUL_RETRY;
}
case HVMTRANS_gfn_paged_out:
case HVMTRANS_gfn_shared:
+ case HVMTRANS_need_retry:
return X86EMUL_RETRY;
}
xfree(buf);
- if ( rc == HVMTRANS_gfn_paged_out )
- return X86EMUL_RETRY;
- if ( rc == HVMTRANS_gfn_shared )
- return X86EMUL_RETRY;
- if ( rc != HVMTRANS_okay )
+ switch ( rc )
{
- gdprintk(XENLOG_WARNING, "Failed memory-to-memory REP MOVS: sgpa=%"
- PRIpaddr" dgpa=%"PRIpaddr" reps=%lu bytes_per_rep=%u\n",
- sgpa, dgpa, *reps, bytes_per_rep);
- return X86EMUL_UNHANDLEABLE;
+ case HVMTRANS_need_retry:
+ /*
+ * hvm_copy_{from,to}_guest_phys() do not currently return
+ * HVMTRANS_need_retry.
+ */
+ ASSERT_UNREACHABLE();
+ /* fall through */
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
+ return X86EMUL_RETRY;
+ case HVMTRANS_okay:
+ return X86EMUL_OKAY;
}
- return X86EMUL_OKAY;
+ gdprintk(XENLOG_WARNING, "Failed memory-to-memory REP MOVS: sgpa=%"
+ PRIpaddr" dgpa=%"PRIpaddr" reps=%lu bytes_per_rep=%u\n",
+ sgpa, dgpa, *reps, bytes_per_rep);
+
+ return X86EMUL_UNHANDLEABLE;
}
static int hvmemul_rep_stos(
switch ( rc )
{
+ case HVMTRANS_need_retry:
+ /*
+ * hvm_copy_to_guest_phys() does not currently return
+ * HVMTRANS_need_retry.
+ */
+ ASSERT_UNREACHABLE();
+ /* fall through */
case HVMTRANS_gfn_paged_out:
case HVMTRANS_gfn_shared:
return X86EMUL_RETRY;
return HVMTRANS_bad_gfn_to_mfn;
}
+ if ( unlikely(v->arch.vm_event) &&
+ (flags & HVMCOPY_linear) &&
+ v->arch.vm_event->send_event &&
+ hvm_monitor_check_p2m(addr, gfn, pfec, npfec_kind_with_gla) )
+ {
+ put_page(page);
+ return HVMTRANS_need_retry;
+ }
+
p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
if ( flags & HVMCOPY_to_guest )
case HVMTRANS_bad_linear_to_gfn:
case HVMTRANS_gfn_paged_out:
case HVMTRANS_gfn_shared:
+ case HVMTRANS_need_retry:
ASSERT_UNREACHABLE();
/* fall through */
default:
case HVMTRANS_bad_linear_to_gfn:
case HVMTRANS_gfn_paged_out:
case HVMTRANS_gfn_shared:
+ case HVMTRANS_need_retry:
ASSERT_UNREACHABLE();
/* fall through */
default:
*/
#include <xen/vm_event.h>
+#include <xen/mem_access.h>
#include <xen/monitor.h>
#include <asm/hvm/monitor.h>
+#include <asm/altp2m.h>
#include <asm/monitor.h>
#include <asm/paging.h>
#include <asm/vm_event.h>
monitor_traps(current, 1, &req);
}
+/*
+ * Send memory access vm_events based on pfec. Returns true if the event was
+ * sent and false for p2m_get_mem_access() error, no violation and event send
+ * error. Assumes the caller will enable/disable arch.vm_event->send_event.
+ */
+bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec,
+ uint16_t kind)
+{
+ xenmem_access_t access;
+ struct vcpu *curr = current;
+ vm_event_request_t req = {};
+ paddr_t gpa = (gfn_to_gaddr(gfn) | (gla & ~PAGE_MASK));
+ int rc;
+
+ ASSERT(curr->arch.vm_event->send_event);
+
+ /*
+ * p2m_get_mem_access() can fail from a invalid MFN and return -ESRCH
+ * in which case access must be restricted.
+ */
+ rc = p2m_get_mem_access(curr->domain, gfn, &access, altp2m_vcpu_idx(curr));
+
+ if ( rc == -ESRCH )
+ access = XENMEM_access_n;
+ else if ( rc )
+ return false;
+
+ switch ( access )
+ {
+ case XENMEM_access_x:
+ case XENMEM_access_rx:
+ if ( pfec & PFEC_write_access )
+ req.u.mem_access.flags = MEM_ACCESS_R | MEM_ACCESS_W;
+ break;
+
+ case XENMEM_access_w:
+ case XENMEM_access_rw:
+ if ( pfec & PFEC_insn_fetch )
+ req.u.mem_access.flags = MEM_ACCESS_X;
+ break;
+
+ case XENMEM_access_r:
+ case XENMEM_access_n:
+ if ( pfec & PFEC_write_access )
+ req.u.mem_access.flags |= MEM_ACCESS_R | MEM_ACCESS_W;
+ if ( pfec & PFEC_insn_fetch )
+ req.u.mem_access.flags |= MEM_ACCESS_X;
+ break;
+
+ case XENMEM_access_wx:
+ case XENMEM_access_rwx:
+ case XENMEM_access_rx2rw:
+ case XENMEM_access_n2rwx:
+ case XENMEM_access_default:
+ break;
+ }
+
+ if ( !req.u.mem_access.flags )
+ return false; /* no violation */
+
+ if ( kind == npfec_kind_with_gla )
+ req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA |
+ MEM_ACCESS_GLA_VALID;
+ else if ( kind == npfec_kind_in_gpt )
+ req.u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT |
+ MEM_ACCESS_GLA_VALID;
+
+
+ req.reason = VM_EVENT_REASON_MEM_ACCESS;
+ req.u.mem_access.gfn = gfn_x(gfn);
+ req.u.mem_access.gla = gla;
+ req.u.mem_access.offset = gpa & ~PAGE_MASK;
+
+ return monitor_traps(curr, true, &req) >= 0;
+}
+
/*
* Local variables:
* mode: C
return true;
}
}
+
+ /*
+ * Try to avoid sending a mem event. Suppress events caused by page-walks
+ * by emulating but still checking mem_access violations.
+ */
if ( vm_event_check_ring(d->vm_event_monitor) &&
d->arch.monitor.inguest_pagefault_disabled &&
- npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */
+ npfec.kind == npfec_kind_in_gpt )
{
+ v->arch.vm_event->send_event = true;
hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, X86_EVENT_NO_EC);
+ v->arch.vm_event->send_event = false;
return true;
}
return X86EMUL_UNHANDLEABLE;
case HVMTRANS_gfn_paged_out:
case HVMTRANS_gfn_shared:
+ case HVMTRANS_need_retry:
return X86EMUL_RETRY;
}
unsigned int err, uint64_t cr2);
bool hvm_monitor_emul_unimplemented(void);
+bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec,
+ uint16_t kind);
+
#endif /* __ASM_X86_HVM_MONITOR_H__ */
/*
HVMTRANS_unhandleable,
HVMTRANS_gfn_paged_out,
HVMTRANS_gfn_shared,
+ HVMTRANS_need_retry,
};
/*
bool set_gprs;
/* A sync vm_event has been sent and we're not done handling it. */
bool sync_event;
+ /* Send mem access events from emulator */
+ bool send_event;
};
int vm_event_init_domain(struct domain *d);