}
/* Update per-VCPU guest runstate shared memory area (if registered). */
-static void update_runstate_area(struct vcpu *v)
+bool_t update_runstate_area(const struct vcpu *v)
{
if ( guest_handle_is_null(runstate_guest(v)) )
- return;
+ return 1;
if ( has_32bit_shinfo(v->domain) )
{
XLAT_vcpu_runstate_info(&info, &v->runstate);
__copy_to_guest(v->runstate_guest.compat, &info, 1);
- return;
+ return 1;
}
- __copy_to_guest(runstate_guest(v), &v->runstate, 1);
+ return __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
+ sizeof(v->runstate);
+}
+
+static void _update_runstate_area(struct vcpu *v)
+{
+ if ( !update_runstate_area(v) && is_pv_vcpu(v) &&
+ !(v->arch.flags & TF_kernel_mode) )
+ v->arch.pv_vcpu.need_update_runstate_area = 1;
}
static inline int need_full_gdt(struct vcpu *v)
flush_tlb_mask(&dirty_mask);
}
- if (prev != next)
- update_runstate_area(prev);
+ if ( prev != next )
+ _update_runstate_area(prev);
if ( is_hvm_vcpu(prev) )
{
context_saved(prev);
- if (prev != next)
- update_runstate_area(next);
+ if ( prev != next )
+ _update_runstate_area(next);
/* Ensure that the vcpu has an up-to-date time base. */
update_vcpu_system_time(next);
{
struct cpu_time *t;
struct vcpu_time_info *u, _u;
- XEN_GUEST_HANDLE(vcpu_time_info_t) user_u;
struct domain *d = v->domain;
s_time_t tsc_stamp = 0;
/* 3. Update guest kernel version. */
u->version = version_update_end(u->version);
- user_u = v->arch.time_info_guest;
- if ( !guest_handle_is_null(user_u) )
- {
- /* 1. Update userspace version. */
- __copy_field_to_guest(user_u, &_u, version);
- wmb();
- /* 2. Update all other userspavce fields. */
- __copy_to_guest(user_u, &_u, 1);
- wmb();
- /* 3. Update userspace version. */
- _u.version = version_update_end(_u.version);
- __copy_field_to_guest(user_u, &_u, version);
- }
+ if ( !update_secondary_system_time(v, &_u) && is_pv_domain(d) &&
+ !is_pv_32bit_domain(d) && !(v->arch.flags & TF_kernel_mode) )
+ v->arch.pv_vcpu.pending_system_time = _u;
+}
+
+bool_t update_secondary_system_time(const struct vcpu *v,
+ struct vcpu_time_info *u)
+{
+ XEN_GUEST_HANDLE(vcpu_time_info_t) user_u = v->arch.time_info_guest;
+
+ if ( guest_handle_is_null(user_u) )
+ return 1;
+
+ /* 1. Update userspace version. */
+ if ( __copy_field_to_guest(user_u, u, version) == sizeof(u->version) )
+ return 0;
+ wmb();
+ /* 2. Update all other userspace fields. */
+ __copy_to_guest(user_u, u, 1);
+ wmb();
+ /* 3. Update userspace version. */
+ u->version = version_update_end(u->version);
+ __copy_field_to_guest(user_u, u, version);
+
+ return 1;
}
void update_vcpu_system_time(struct vcpu *v)
#else
write_ptbase(v);
#endif
+
+ if ( !(v->arch.flags & TF_kernel_mode) )
+ return;
+
+ if ( v->arch.pv_vcpu.need_update_runstate_area &&
+ update_runstate_area(v) )
+ v->arch.pv_vcpu.need_update_runstate_area = 0;
+
+ if ( v->arch.pv_vcpu.pending_system_time.version &&
+ update_secondary_system_time(v,
+ &v->arch.pv_vcpu.pending_system_time) )
+ v->arch.pv_vcpu.pending_system_time.version = 0;
}
unsigned long do_iret(void)
/* Current LDT details. */
unsigned long shadow_ldt_mapcnt;
spinlock_t shadow_ldt_lock;
+
+ /* Deferred VA-based update state. */
+ bool_t need_update_runstate_area;
+ struct vcpu_time_info pending_system_time;
};
struct arch_vcpu
#define hvm_vmx hvm_vcpu.u.vmx
#define hvm_svm hvm_vcpu.u.svm
+bool_t update_runstate_area(const struct vcpu *);
+bool_t update_secondary_system_time(const struct vcpu *,
+ struct vcpu_time_info *);
+
void vcpu_show_execution_state(struct vcpu *);
void vcpu_show_registers(const struct vcpu *);