/* Update per-VCPU guest runstate shared memory area (if registered). */
static void update_runstate_area(struct vcpu *v)
{
+ void __user *guest_handle = NULL;
+
if ( guest_handle_is_null(runstate_guest(v)) )
return;
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
+ {
+ guest_handle = &v->runstate_guest.p->state_entry_time + 1;
+ guest_handle--;
+ v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+ __raw_copy_to_guest(guest_handle,
+ (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
+ smp_wmb();
+ }
+
__copy_to_guest(runstate_guest(v), &v->runstate, 1);
+
+ if ( guest_handle )
+ {
+ v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ smp_wmb();
+ __raw_copy_to_guest(guest_handle,
+ (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
+ }
}
static void schedule_tail(struct vcpu *prev)
{
bool_t rc;
smap_check_policy_t smap_policy;
+ void __user *guest_handle = NULL;
if ( guest_handle_is_null(runstate_guest(v)) )
return 1;
smap_policy = smap_policy_change(v, SMAP_CHECK_ENABLED);
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
+ {
+ guest_handle = has_32bit_shinfo(v->domain)
+ ? &v->runstate_guest.compat.p->state_entry_time + 1
+ : &v->runstate_guest.native.p->state_entry_time + 1;
+ guest_handle--;
+ v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+ __raw_copy_to_guest(guest_handle,
+ (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
+ smp_wmb();
+ }
+
if ( has_32bit_shinfo(v->domain) )
{
struct compat_vcpu_runstate_info info;
rc = __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
sizeof(v->runstate);
+ if ( guest_handle )
+ {
+ v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ smp_wmb();
+ __raw_copy_to_guest(guest_handle,
+ (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
+ }
+
smap_policy_change(v, smap_policy);
return rc;
#define watchdog_disable() ((void)0)
#define watchdog_enable() ((void)0)
-#define VM_ASSIST_VALID (0)
+#define VM_ASSIST_VALID (1UL << VMASST_TYPE_runstate_update_flag)
#endif /* __ARM_CONFIG_H__ */
/*
(1UL << VMASST_TYPE_writable_pagetables) | \
(1UL << VMASST_TYPE_pae_extended_cr3) | \
(1UL << VMASST_TYPE_architectural_iopl) | \
+ (1UL << VMASST_TYPE_runstate_update_flag)| \
(1UL << VMASST_TYPE_m2p_strict))
#define VM_ASSIST_VALID NATIVE_VM_ASSIST_VALID
#define COMPAT_VM_ASSIST_VALID (NATIVE_VM_ASSIST_VALID & \
int state;
/* When was current state entered (system time, ns)? */
uint64_t state_entry_time;
+ /*
+ * Update indicator set in state_entry_time:
+ * When activated via VMASST_TYPE_runstate_update_flag, set during
+ * updates in guest memory mapped copy of vcpu_runstate_info.
+ */
+#define XEN_RUNSTATE_UPDATE (1ULL << 63)
/*
* Time spent in each RUNSTATE_* (ns). The sum of these times is
* guaranteed not to drift from system time.
*/
#define VMASST_TYPE_architectural_iopl 4
+/*
+ * All guests: activate update indicator in vcpu_runstate_info
+ * Enable setting the XEN_RUNSTATE_UPDATE flag in guest memory mapped
+ * vcpu_runstate_info during updates of the runstate information.
+ */
+#define VMASST_TYPE_runstate_update_flag 5
+
/*
* x86/64 guests: strictly hide M2P from user mode.
* This allows the guest to control respective hypervisor behavior: