mfn_t gmfn, sl1mfn = _mfn(0);
shadow_l1e_t sl1e, *ptr_sl1e;
paddr_t gpa;
+#ifdef CONFIG_HVM
struct sh_emulate_ctxt emul_ctxt;
const struct x86_emulate_ops *emul_ops;
+#endif
int r;
p2m_type_t p2mt;
uint32_t rc, error_code;
* caught by user-mode page-table check above.
*/
emulate_readonly:
+ if ( !is_hvm_domain(d) )
+ {
+ ASSERT_UNREACHABLE();
+ goto not_a_shadow_fault;
+ }
+#ifdef CONFIG_HVM
/* Unshadow if we are writing to a toplevel pagetable that is
* flagged as a dying process, and that is not currently used. */
if ( sh_mfn_is_a_page_table(gmfn) && is_hvm_domain(d) &&
#if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
early_emulation:
#endif
- if ( is_hvm_domain(d) )
+ /*
+ * If we are in the middle of injecting an exception or interrupt then
+ * we should not emulate: the fault is a side effect of the processor
+ * trying to deliver the exception (e.g. IDT/GDT accesses, pushing the
+ * exception frame onto the stack). Furthermore it is almost
+ * certainly the case the handler stack is currently considered to be
+ * a page table, so we should unshadow the faulting page before
+ * exiting.
+ */
+ if ( unlikely(hvm_event_pending(v)) )
{
- /*
- * If we are in the middle of injecting an exception or interrupt then
- * we should not emulate: the fault is a side effect of the processor
- * trying to deliver the exception (e.g. IDT/GDT accesses, pushing the
- * exception frame onto the stack). Furthermore it is almost
- * certainly the case the handler stack is currently considered to be
- * a page table, so we should unshadow the faulting page before
- * exiting.
- */
- if ( unlikely(hvm_event_pending(v)) )
- {
#if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
- if ( fast_emul )
- {
- perfc_incr(shadow_fault_fast_emulate_fail);
- v->arch.paging.last_write_emul_ok = 0;
- }
-#endif
- sh_remove_shadows(d, gmfn, 0 /* thorough */, 1 /* must succeed */);
- trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ,
- va, gfn);
- return EXCRET_fault_fixed;
+ if ( fast_emul )
+ {
+ perfc_incr(shadow_fault_fast_emulate_fail);
+ v->arch.paging.last_write_emul_ok = 0;
}
+#endif
+ sh_remove_shadows(d, gmfn, 0 /* thorough */, 1 /* must succeed */);
+ trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ,
+ va, gfn);
+ return EXCRET_fault_fixed;
}
SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n", regs->rip, regs->rsp);
emul_ops = shadow_init_emulation(&emul_ctxt, regs, GUEST_PTE_SIZE);
r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
-
-#ifdef CONFIG_HVM
if ( r == X86EMUL_EXCEPTION )
{
- ASSERT(is_hvm_domain(d));
/*
* This emulation covers writes to shadow pagetables. We tolerate #PF
* (from accesses spanning pages, concurrent paging updated from
r = X86EMUL_UNHANDLEABLE;
}
}
-#endif
/*
* NB. We do not unshadow on X86EMUL_EXCEPTION. It's not clear that it
emulate_done:
SHADOW_PRINTK("emulated\n");
return EXCRET_fault_fixed;
+#endif /* CONFIG_HVM */
mmio:
if ( !guest_mode(regs) )
int sh_rm_write_access_from_sl1p(struct domain *d, mfn_t gmfn,
mfn_t smfn, unsigned long off)
{
+#ifdef CONFIG_HVM
struct vcpu *curr = current;
+#endif
int r;
shadow_l1e_t *sl1p, sl1e;
struct page_info *sp;
ASSERT(mfn_valid(gmfn));
ASSERT(mfn_valid(smfn));
+#ifdef CONFIG_HVM
/* Remember if we've been told that this process is being torn down */
if ( curr->domain == d && is_hvm_domain(d) )
curr->arch.paging.shadow.pagetable_dying
= mfn_to_page(gmfn)->pagetable_dying;
+#endif
sp = mfn_to_page(smfn);
}
#endif /* 64bit guest */
+#ifdef CONFIG_HVM
/**************************************************************************/
/* Function for the guest to inform us that a process is being torn
* down. We remember that as a hint to unshadow its pagetables soon,
put_gfn(d, gpa >> PAGE_SHIFT);
}
#endif
+#endif /* CONFIG_HVM */
/**************************************************************************/
/* Audit tools */
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
.shadow.guess_wrmap = sh_guess_wrmap,
#endif
-#endif /* CONFIG_HVM */
.shadow.pagetable_dying = sh_pagetable_dying,
+#endif /* CONFIG_HVM */
.shadow.trace_emul_write_val = trace_emulate_write_val,
.shadow.shadow_levels = SHADOW_PAGING_LEVELS,
};
#define SHOPT_FAST_EMULATION 0x80 /* Fast write emulation */
#define SHOPT_OUT_OF_SYNC 0x100 /* Allow guest writes to L1 PTs */
+#ifdef CONFIG_HVM
#define SHADOW_OPTIMIZATIONS 0x1ff
+#else
+#define SHADOW_OPTIMIZATIONS (0x1ff & ~SHOPT_FAST_EMULATION)
+#endif
/******************************************************************************
#endif
};
-#ifdef CONFIG_HVM
const struct x86_emulate_ops *shadow_init_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs,
unsigned int pte_size);
void shadow_continue_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
-#else
-static inline const struct x86_emulate_ops *shadow_init_emulation(
- struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs,
- unsigned int pte_size)
-{
- BUG();
- return NULL;
-}
-static inline void shadow_continue_emulation(
- struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs)
-{
- BUG();
-}
-#endif
/* Stop counting towards early unshadows, as we've seen a real page fault */
static inline void sh_reset_early_unshadow(struct vcpu *v)
/* OOS */
bool_t oos_active;
+#ifdef CONFIG_HVM
/* Has this domain ever used HVMOP_pagetable_dying? */
bool_t pagetable_dying_op;
+#endif
#ifdef CONFIG_PV
/* PV L1 Terminal Fault mitigation. */
unsigned long last_emulated_mfn_for_unshadow;
/* MFN of the last shadow that we shot a writeable mapping in */
unsigned long last_writeable_pte_smfn;
+#ifdef CONFIG_HVM
/* Last frame number that we emulated a write to. */
unsigned long last_emulated_frame;
/* Last MFN that we emulated a write successfully */
unsigned long last_emulated_mfn;
+#endif
/* Shadow out-of-sync: pages that this vcpu has let go out of sync */
mfn_t oos[SHADOW_OOS_PAGES];
unsigned long off[SHADOW_OOS_FIXUPS];
} oos_fixup[SHADOW_OOS_PAGES];
+#ifdef CONFIG_HVM
bool_t pagetable_dying;
#endif
+#endif
};
/************************************************/
const struct paging_mode *mode;
/* Nested Virtualization: paging mode of nested guest */
const struct paging_mode *nestedmode;
+#ifdef CONFIG_HVM
/* HVM guest: last emulate was to a pagetable */
unsigned int last_write_was_pt:1;
/* HVM guest: last write emulation succeeds */
unsigned int last_write_emul_ok:1;
+#endif
/* Translated guest: virtual TLB */
struct shadow_vtlb *vtlb;
spinlock_t vtlb_lock;