WRITE_SYSREG(n->arch.mdcr_el2, MDCR_EL2);
}
-/* Update per-VCPU guest runstate shared memory area (if registered). */
-static void update_runstate_area(struct vcpu *v)
-{
- void __user *guest_handle = NULL;
- struct vcpu_runstate_info runstate;
-
- if ( guest_handle_is_null(runstate_guest(v)) )
- return;
-
- memcpy(&runstate, &v->runstate, sizeof(runstate));
-
- if ( VM_ASSIST(v->domain, runstate_update_flag) )
- {
- guest_handle = &v->runstate_guest.p->state_entry_time + 1;
- guest_handle--;
- runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
- __raw_copy_to_guest(guest_handle,
- (void *)(&runstate.state_entry_time + 1) - 1, 1);
- smp_wmb();
- }
-
- __copy_to_guest(runstate_guest(v), &runstate, 1);
-
- if ( guest_handle )
- {
- runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
- smp_wmb();
- __raw_copy_to_guest(guest_handle,
- (void *)(&runstate.state_entry_time + 1) - 1, 1);
- }
-}
-
static void schedule_tail(struct vcpu *prev)
{
ASSERT(prev != current);
struct instr_details dabt_instr; /* when the instruction is decoded */
};
+struct guest_memory_policy {};
+static inline void update_guest_memory_policy(struct vcpu *v,
+ struct guest_memory_policy *gmp)
+{}
+
#endif /* __ASM_DOMAIN_H__ */
/*
wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
-/* Update per-VCPU guest runstate shared memory area (if registered). */
-bool update_runstate_area(struct vcpu *v)
-{
- bool rc;
- struct guest_memory_policy policy = { .nested_guest_mode = false };
- void __user *guest_handle = NULL;
- struct vcpu_runstate_info runstate;
-
- if ( guest_handle_is_null(runstate_guest(v)) )
- return true;
-
- update_guest_memory_policy(v, &policy);
-
- memcpy(&runstate, &v->runstate, sizeof(runstate));
-
- if ( VM_ASSIST(v->domain, runstate_update_flag) )
- {
-#ifdef CONFIG_COMPAT
- guest_handle = has_32bit_shinfo(v->domain)
- ? &v->runstate_guest.compat.p->state_entry_time + 1
- : &v->runstate_guest.native.p->state_entry_time + 1;
-#else
- guest_handle = &v->runstate_guest.p->state_entry_time + 1;
-#endif
- guest_handle--;
- runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
- __raw_copy_to_guest(guest_handle,
- (void *)(&runstate.state_entry_time + 1) - 1, 1);
- smp_wmb();
- }
-
-#ifdef CONFIG_COMPAT
- if ( has_32bit_shinfo(v->domain) )
- {
- struct compat_vcpu_runstate_info info;
-
- XLAT_vcpu_runstate_info(&info, &runstate);
- __copy_to_guest(v->runstate_guest.compat, &info, 1);
- rc = true;
- }
- else
-#endif
- rc = __copy_to_guest(runstate_guest(v), &runstate, 1) !=
- sizeof(runstate);
-
- if ( guest_handle )
- {
- runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
- smp_wmb();
- __raw_copy_to_guest(guest_handle,
- (void *)(&runstate.state_entry_time + 1) - 1, 1);
- }
-
- update_guest_memory_policy(v, &policy);
-
- return rc;
-}
-
static void _update_runstate_area(struct vcpu *v)
{
if ( !update_runstate_area(v) && is_pv_vcpu(v) &&
void domain_cpu_policy_changed(struct domain *d);
-bool update_runstate_area(struct vcpu *);
bool update_secondary_system_time(struct vcpu *,
struct vcpu_time_info *);
return rc;
}
+/* Update per-VCPU guest runstate shared memory area (if registered). */
+bool update_runstate_area(struct vcpu *v)
+{
+ bool rc;
+ struct guest_memory_policy policy = { };
+ void __user *guest_handle = NULL;
+ struct vcpu_runstate_info runstate;
+
+ if ( guest_handle_is_null(runstate_guest(v)) )
+ return true;
+
+ update_guest_memory_policy(v, &policy);
+
+ memcpy(&runstate, &v->runstate, sizeof(runstate));
+
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
+ {
+#ifdef CONFIG_COMPAT
+ guest_handle = has_32bit_shinfo(v->domain)
+ ? &v->runstate_guest.compat.p->state_entry_time + 1
+ : &v->runstate_guest.native.p->state_entry_time + 1;
+#else
+ guest_handle = &v->runstate_guest.p->state_entry_time + 1;
+#endif
+ guest_handle--;
+ runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+ __raw_copy_to_guest(guest_handle,
+ (void *)(&runstate.state_entry_time + 1) - 1, 1);
+ smp_wmb();
+ }
+
+#ifdef CONFIG_COMPAT
+ if ( has_32bit_shinfo(v->domain) )
+ {
+ struct compat_vcpu_runstate_info info;
+
+ XLAT_vcpu_runstate_info(&info, &runstate);
+ __copy_to_guest(v->runstate_guest.compat, &info, 1);
+ rc = true;
+ }
+ else
+#endif
+ rc = __copy_to_guest(runstate_guest(v), &runstate, 1) !=
+ sizeof(runstate);
+
+ if ( guest_handle )
+ {
+ runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ smp_wmb();
+ __raw_copy_to_guest(guest_handle,
+ (void *)(&runstate.state_entry_time + 1) - 1, 1);
+ }
+
+ update_guest_memory_policy(v, &policy);
+
+ return rc;
+}
+
long common_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
{
long rc = 0;
int arch_get_paging_mempool_size(struct domain *d, uint64_t *size /* bytes */);
int arch_set_paging_mempool_size(struct domain *d, uint64_t size /* bytes */);
+bool update_runstate_area(struct vcpu *);
+
int domain_relinquish_resources(struct domain *d);
void dump_pageframe_info(struct domain *d);