The hook is already in use for other purposes, and emulating e.g.
CLFLUSH by issuing WBINVD is, well, not very nice. Rename the hook and
add parameters. Use lighter weight flushing insns when possible in
hvmemul_cache_op().
hvmemul_cache_op() treating x86emul_invd the same as x86emul_wbinvd is
to retain original behavior, but I'm not sure this is what we want in
the long run.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Paul Durrant <paul.durrant@citrix.com>
return maybe_fail(ctxt, "invlpg", false);
}
-static int fuzz_wbinvd(
+static int fuzz_cache_op(
+ enum x86emul_cache_op op,
+ enum x86_segment seg,
+ unsigned long offset,
struct x86_emulate_ctxt *ctxt)
{
- return maybe_fail(ctxt, "wbinvd", true);
+ return maybe_fail(ctxt, "cache-management", true);
}
static int fuzz_write_io(
SET(read_xcr),
SET(read_msr),
SET(write_msr),
- SET(wbinvd),
+ SET(cache_op),
SET(invlpg),
.get_fpu = emul_test_get_fpu,
.put_fpu = emul_test_put_fpu,
HOOK_read_xcr,
HOOK_read_msr,
HOOK_write_msr,
- HOOK_wbinvd,
+ HOOK_cache_op,
HOOK_cpuid,
HOOK_inject_hw_exception,
HOOK_inject_sw_interrupt,
MAYBE_DISABLE_HOOK(read_xcr);
MAYBE_DISABLE_HOOK(read_msr);
MAYBE_DISABLE_HOOK(write_msr);
- MAYBE_DISABLE_HOOK(wbinvd);
+ MAYBE_DISABLE_HOOK(cache_op);
MAYBE_DISABLE_HOOK(cpuid);
MAYBE_DISABLE_HOOK(get_fpu);
MAYBE_DISABLE_HOOK(invlpg);
$(call as-option-add,CFLAGS,CC,"invept (%rax)$$(comma)%rax",-DHAVE_AS_EPT)
$(call as-option-add,CFLAGS,CC,"rdrand %eax",-DHAVE_AS_RDRAND)
$(call as-option-add,CFLAGS,CC,"rdfsbase %rax",-DHAVE_AS_FSGSBASE)
+$(call as-option-add,CFLAGS,CC,"xsaveopt (%rax)",-DHAVE_AS_XSAVEOPT)
$(call as-option-add,CFLAGS,CC,"rdseed %eax",-DHAVE_AS_RDSEED)
+$(call as-option-add,CFLAGS,CC,"clwb (%rax)",-DHAVE_AS_CLWB)
$(call as-option-add,CFLAGS,CC,".equ \"x\"$$(comma)1", \
-U__OBJECT_LABEL__ -DHAVE_AS_QUOTED_SYM \
'-D__OBJECT_LABEL__=$(subst $(BASEDIR)/,,$(CURDIR))/$$@')
#include <asm/hvm/trace.h>
#include <asm/hvm/support.h>
#include <asm/hvm/svm/svm.h>
+#include <asm/iocap.h>
#include <asm/vm_event.h>
static void hvmtrace_io_assist(const ioreq_t *p)
mfn_t *mfn = &hvmemul_ctxt->mfn[0];
/*
- * The caller has no legitimate reason for trying a zero-byte write, but
- * all other code here is written to work if the check below was dropped.
- *
- * The maximum write size depends on the number of adjacent mfns[] which
+ * The maximum access size depends on the number of adjacent mfns[] which
* can be vmap()'d, accouting for possible misalignment within the region.
* The higher level emulation callers are responsible for ensuring that
- * mfns[] is large enough for the requested write size.
+ * mfns[] is large enough for the requested access size.
*/
- if ( bytes == 0 ||
- nr_frames > ARRAY_SIZE(hvmemul_ctxt->mfn) )
+ if ( nr_frames > ARRAY_SIZE(hvmemul_ctxt->mfn) )
{
ASSERT_UNREACHABLE();
goto unhandleable;
unsigned int i;
mfn_t *mfn = &hvmemul_ctxt->mfn[0];
- ASSERT(bytes > 0);
-
if ( nr_frames == 1 )
unmap_domain_page(mapping);
else
return X86EMUL_OKAY;
}
-static int hvmemul_wbinvd_discard(
+static int hvmemul_cache_op_discard(
+ enum x86emul_cache_op op,
+ enum x86_segment seg,
+ unsigned long offset,
struct x86_emulate_ctxt *ctxt)
{
return X86EMUL_OKAY;
return rc;
}
-static int hvmemul_wbinvd(
+static int hvmemul_cache_op(
+ enum x86emul_cache_op op,
+ enum x86_segment seg,
+ unsigned long offset,
struct x86_emulate_ctxt *ctxt)
{
- alternative_vcall(hvm_funcs.wbinvd_intercept);
+ struct hvm_emulate_ctxt *hvmemul_ctxt =
+ container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+ uint32_t pfec = PFEC_page_present;
+
+ if ( !cache_flush_permitted(current->domain) )
+ return X86EMUL_OKAY;
+
+ switch ( op )
+ {
+ unsigned long addr;
+ int rc;
+ void *mapping;
+
+ case x86emul_clflush:
+ case x86emul_clflushopt:
+ case x86emul_clwb:
+ ASSERT(!is_x86_system_segment(seg));
+
+ rc = hvmemul_virtual_to_linear(seg, offset, 0, NULL,
+ hvm_access_read, hvmemul_ctxt, &addr);
+ if ( rc != X86EMUL_OKAY )
+ break;
+
+ if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
+ pfec |= PFEC_user_mode;
+
+ mapping = hvmemul_map_linear_addr(addr, 0, pfec, hvmemul_ctxt);
+ if ( mapping == ERR_PTR(~X86EMUL_EXCEPTION) )
+ return X86EMUL_EXCEPTION;
+ if ( IS_ERR_OR_NULL(mapping) )
+ break;
+
+ if ( cpu_has_clflush )
+ {
+ if ( op == x86emul_clwb && cpu_has_clwb )
+ clwb(mapping);
+ else if ( op == x86emul_clflushopt && cpu_has_clflushopt )
+ clflushopt(mapping);
+ else
+ clflush(mapping);
+
+ hvmemul_unmap_linear_addr(mapping, addr, 0, hvmemul_ctxt);
+ break;
+ }
+
+ hvmemul_unmap_linear_addr(mapping, addr, 0, hvmemul_ctxt);
+ /* fall through */
+ case x86emul_invd:
+ case x86emul_wbinvd:
+ alternative_vcall(hvm_funcs.wbinvd_intercept);
+ break;
+ }
+
return X86EMUL_OKAY;
}
.write_xcr = hvmemul_write_xcr,
.read_msr = hvmemul_read_msr,
.write_msr = hvmemul_write_msr,
- .wbinvd = hvmemul_wbinvd,
+ .cache_op = hvmemul_cache_op,
.cpuid = x86emul_cpuid,
.get_fpu = hvmemul_get_fpu,
.put_fpu = hvmemul_put_fpu,
.write_xcr = hvmemul_write_xcr,
.read_msr = hvmemul_read_msr,
.write_msr = hvmemul_write_msr_discard,
- .wbinvd = hvmemul_wbinvd_discard,
+ .cache_op = hvmemul_cache_op_discard,
.cpuid = x86emul_cpuid,
.get_fpu = hvmemul_get_fpu,
.put_fpu = hvmemul_put_fpu,
return X86EMUL_UNHANDLEABLE;
}
-/* Name it differently to avoid clashing with wbinvd() */
-static int _wbinvd(struct x86_emulate_ctxt *ctxt)
+static int cache_op(enum x86emul_cache_op op, enum x86_segment seg,
+ unsigned long offset, struct x86_emulate_ctxt *ctxt)
{
+ ASSERT(op == x86emul_wbinvd);
+
/* Ignore the instruction if unprivileged. */
if ( !cache_flush_permitted(current->domain) )
/*
.read_msr = read_msr,
.write_msr = write_msr,
.cpuid = x86emul_cpuid,
- .wbinvd = _wbinvd,
+ .cache_op = cache_op,
};
int pv_emulate_privileged_op(struct cpu_user_regs *regs)
case X86EMUL_OPC(0x0f, 0x08): /* invd */
case X86EMUL_OPC(0x0f, 0x09): /* wbinvd */
generate_exception_if(!mode_ring0(), EXC_GP, 0);
- fail_if(ops->wbinvd == NULL);
- if ( (rc = ops->wbinvd(ctxt)) != 0 )
+ fail_if(!ops->cache_op);
+ if ( (rc = ops->cache_op(b == 0x09 ? x86emul_wbinvd
+ : x86emul_invd,
+ x86_seg_none, 0,
+ ctxt)) != X86EMUL_OKAY )
goto done;
break;
/* else clwb */
fail_if(!vex.pfx);
vcpu_must_have(clwb);
- fail_if(!ops->wbinvd);
- if ( (rc = ops->wbinvd(ctxt)) != X86EMUL_OKAY )
+ fail_if(!ops->cache_op);
+ if ( (rc = ops->cache_op(x86emul_clwb, ea.mem.seg, ea.mem.off,
+ ctxt)) != X86EMUL_OKAY )
goto done;
break;
case 7:
vcpu_must_have(clflush);
else
vcpu_must_have(clflushopt);
- fail_if(ops->wbinvd == NULL);
- if ( (rc = ops->wbinvd(ctxt)) != 0 )
+ fail_if(!ops->cache_op);
+ if ( (rc = ops->cache_op(vex.pfx ? x86emul_clflushopt
+ : x86emul_clflush,
+ ea.mem.seg, ea.mem.off,
+ ctxt)) != X86EMUL_OKAY )
goto done;
break;
default:
X86EMUL_FPU_none
};
+enum x86emul_cache_op {
+ x86emul_clflush,
+ x86emul_clflushopt,
+ x86emul_clwb,
+ x86emul_invd,
+ x86emul_wbinvd,
+};
+
struct x86_emulate_state;
/*
uint64_t val,
struct x86_emulate_ctxt *ctxt);
- /* wbinvd: Write-back and invalidate cache contents. */
- int (*wbinvd)(
+ /*
+ * cache_op: Write-back and/or invalidate cache contents.
+ *
+ * @seg:@offset applicable only to some of enum x86emul_cache_op.
+ */
+ int (*cache_op)(
+ enum x86emul_cache_op op,
+ enum x86_segment seg,
+ unsigned long offset,
struct x86_emulate_ctxt *ctxt);
/* cpuid: Emulate CPUID via given set of EAX-EDX inputs/outputs. */
#define cpu_has_rdseed boot_cpu_has(X86_FEATURE_RDSEED)
#define cpu_has_smap boot_cpu_has(X86_FEATURE_SMAP)
#define cpu_has_avx512_ifma boot_cpu_has(X86_FEATURE_AVX512_IFMA)
+#define cpu_has_clflushopt boot_cpu_has(X86_FEATURE_CLFLUSHOPT)
+#define cpu_has_clwb boot_cpu_has(X86_FEATURE_CLWB)
#define cpu_has_avx512er boot_cpu_has(X86_FEATURE_AVX512ER)
#define cpu_has_avx512cd boot_cpu_has(X86_FEATURE_AVX512CD)
#define cpu_has_sha boot_cpu_has(X86_FEATURE_SHA)
asm volatile ( "clflush %0" :: "m" (*(const char *)p) );
}
+static inline void clflushopt(const void *p)
+{
+ asm volatile ( "data16 clflush %0" :: "m" (*(const char *)p) );
+}
+
+static inline void clwb(const void *p)
+{
+#if defined(HAVE_AS_CLWB)
+ asm volatile ( "clwb %0" :: "m" (*(const char *)p) );
+#elif defined(HAVE_AS_XSAVEOPT)
+ asm volatile ( "data16 xsaveopt %0" :: "m" (*(const char *)p) );
+#else
+ asm volatile ( ".byte 0x66, 0x0f, 0xae, 0x32"
+ :: "d" (p), "m" (*(const char *)p) );
+#endif
+}
+
#define xchg(ptr,v) \
((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))