#endif /* SHADOW_AUDIT */
+#ifdef CONFIG_HVM
/**************************************************************************/
/* x86 emulator support for the shadow code
*/
.cmpxchg = hvm_emulate_cmpxchg,
.cpuid = hvmemul_cpuid,
};
+#endif
const struct x86_emulate_ops *shadow_init_emulation(
struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs,
unsigned int pte_size)
{
+#ifdef CONFIG_HVM
struct segment_register *creg, *sreg;
struct vcpu *v = current;
unsigned long addr;
? sizeof(sh_ctxt->insn_buf) : 0;
return &hvm_shadow_emulator_ops;
+#else
+ BUG();
+ return NULL;
+#endif
}
/* Update an initialized emulation context to prepare for the next
void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
struct cpu_user_regs *regs)
{
+#ifdef CONFIG_HVM
struct vcpu *v = current;
unsigned long addr, diff;
? sizeof(sh_ctxt->insn_buf) : 0;
sh_ctxt->insn_buf_eip = regs->rip;
}
+#else
+ BUG();
+#endif
}
+ ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
}
+#ifdef CONFIG_HVM
/**************************************************************************/
/* Handling guest writes to pagetables. */
atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version);
}
+#endif
/**************************************************************************/
/* Hash table for storing the guest->shadow mappings.
&& (page->count_info & PGC_count_mask) <= 3
&& ((page->u.inuse.type_info & PGT_count_mask)
== (is_xen_heap_page(page) ||
- is_ioreq_server_page(d, page)))) )
+ (is_hvm_domain(d) && is_ioreq_server_page(d, page))))) )
{
SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): "
"c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn_x(gfn),
page->count_info, page->u.inuse.type_info,
- !!is_xen_heap_page(page), is_ioreq_server_page(d, page));
+ !!is_xen_heap_page(page),
+ is_hvm_domain(d) && is_ioreq_server_page(d, page));
}
}
trace_shadow_gen(TRC_SHADOW_FAST_PROPAGATE, va);
return 0;
}
- else
- {
- /* Magic MMIO marker: extract gfn for MMIO address */
- ASSERT(sh_l1e_is_mmio(sl1e));
- gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
- << PAGE_SHIFT)
- | (va & ~PAGE_MASK);
- }
+#ifdef CONFIG_HVM
+ /* Magic MMIO marker: extract gfn for MMIO address */
+ ASSERT(sh_l1e_is_mmio(sl1e));
+ ASSERT(is_hvm_vcpu(v));
+ gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
+ << PAGE_SHIFT) | (va & ~PAGE_MASK);
perfc_incr(shadow_fault_fast_mmio);
SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
sh_reset_early_unshadow(v);
trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va);
- return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
- ? EXCRET_fault_fixed : 0);
+ return handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
+ ? EXCRET_fault_fixed : 0;
+#else
+ /* When HVM is not enabled, there shouldn't be MMIO marker */
+ BUG();
+#endif
}
else
{
r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
+#ifdef CONFIG_HVM
if ( r == X86EMUL_EXCEPTION )
{
+ ASSERT(is_hvm_domain(d));
/*
* This emulation covers writes to shadow pagetables. We tolerate #PF
* (from accesses spanning pages, concurrent paging updated from
r = X86EMUL_UNHANDLEABLE;
}
}
+#endif
/*
* NB. We do not unshadow on X86EMUL_EXCEPTION. It's not clear that it
mmio:
if ( !guest_mode(regs) )
goto not_a_shadow_fault;
+#ifdef CONFIG_HVM
+ ASSERT(is_hvm_vcpu(v));
perfc_incr(shadow_fault_mmio);
sh_audit_gw(v, &gw);
SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa);
trace_shadow_gen(TRC_SHADOW_MMIO, va);
return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
? EXCRET_fault_fixed : 0);
+#else
+ BUG();
+#endif
not_a_shadow_fault:
sh_audit_gw(v, &gw);