return 0;
}
-void arch_vcpu_reset(struct vcpu *v)
+int arch_vcpu_reset(struct vcpu *v)
{
vcpu_end_shutdown_deferral(v);
+ return 0;
}
static int relinquish_memory(struct domain *d, struct page_list_head *list)
#undef c
}
-void arch_vcpu_reset(struct vcpu *v)
+int arch_vcpu_reset(struct vcpu *v)
{
if ( !is_hvm_vcpu(v) )
{
destroy_gdt(v);
- vcpu_destroy_pagetables(v, 0);
- }
- else
- {
- vcpu_end_shutdown_deferral(v);
+ return vcpu_destroy_pagetables(v);
}
+
+ vcpu_end_shutdown_deferral(v);
+ return 0;
}
/*
/* Drop the in-use references to page-table bases. */
for_each_vcpu ( d, v )
{
- ret = vcpu_destroy_pagetables(v, 1);
+ ret = vcpu_destroy_pagetables(v);
if ( ret )
return ret;
for_each_vcpu ( d, v )
{
+ int rc;
+
vlapic_reset(vcpu_vlapic(v));
- vcpu_reset(v);
+ rc = vcpu_reset(v);
+ ASSERT(!rc);
}
vpic_reset(d);
{
case APIC_DM_INIT: {
bool_t fpu_initialised;
+ int rc;
+
/* No work on INIT de-assert for P4-type APIC. */
if ( (icr & (APIC_INT_LEVELTRIG | APIC_INT_ASSERT)) ==
APIC_INT_LEVELTRIG )
domain_lock(target->domain);
/* Reset necessary VCPU state. This does not include FPU state. */
fpu_initialised = target->fpu_initialised;
- vcpu_reset(target);
+ rc = vcpu_reset(target);
+ ASSERT(!rc);
target->fpu_initialised = fpu_initialised;
vlapic_reset(vcpu_vlapic(target));
domain_unlock(target->domain);
return rc;
}
-int vcpu_destroy_pagetables(struct vcpu *v, bool_t preemptible)
+int vcpu_destroy_pagetables(struct vcpu *v)
{
unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
struct page_info *page;
if ( paging_mode_refcounts(v->domain) )
put_page(page);
else
- rc = put_page_and_type_preemptible(page, preemptible);
+ rc = put_page_and_type_preemptible(page, 1);
}
if ( l4tab )
if ( paging_mode_refcounts(v->domain) )
put_page(page);
else
- rc = put_page_and_type_preemptible(page, preemptible);
+ rc = put_page_and_type_preemptible(page, 1);
}
if ( !rc )
v->arch.guest_table_user = pagetable_null();
domain_unpause(d);
}
-void vcpu_reset(struct vcpu *v)
+int vcpu_reset(struct vcpu *v)
{
struct domain *d = v->domain;
+ int rc;
vcpu_pause(v);
domain_lock(d);
- arch_vcpu_reset(v);
+ set_bit(_VPF_in_reset, &v->pause_flags);
+ rc = arch_vcpu_reset(v);
+ if ( rc )
+ goto out_unlock;
set_bit(_VPF_down, &v->pause_flags);
#endif
cpumask_clear(v->cpu_affinity_tmp);
clear_bit(_VPF_blocked, &v->pause_flags);
+ clear_bit(_VPF_in_reset, &v->pause_flags);
+ out_unlock:
domain_unlock(v->domain);
vcpu_unpause(v);
+
+ return rc;
}
ret = -EINVAL;
if ( (d == current->domain) || /* no domain_pause() */
(vcpu >= d->max_vcpus) || ((v = d->vcpu[vcpu]) == NULL) )
- goto svc_out;
+ break;
if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
{
- vcpu_reset(v);
- ret = 0;
- goto svc_out;
+ ret = vcpu_reset(v);
+ if ( ret == -EAGAIN )
+ ret = hypercall_create_continuation(
+ __HYPERVISOR_domctl, "h", u_domctl);
+ break;
}
#ifdef CONFIG_COMPAT
#endif
ret = -ENOMEM;
if ( (c.nat = alloc_vcpu_guest_context()) == NULL )
- goto svc_out;
+ break;
#ifdef CONFIG_COMPAT
if ( !is_pv_32on64_vcpu(v) )
domain_unpause(d);
}
- svc_out:
free_vcpu_guest_context(c.nat);
}
break;
int new_guest_cr3(unsigned long pfn);
void make_cr3(struct vcpu *v, unsigned long mfn);
void update_cr3(struct vcpu *v);
-int vcpu_destroy_pagetables(struct vcpu *, bool_t preemptible);
+int vcpu_destroy_pagetables(struct vcpu *);
void propagate_page_fault(unsigned long addr, u16 error_code);
void *do_page_walk(struct vcpu *v, unsigned long addr);
struct vcpu *alloc_vcpu(
struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
struct vcpu *alloc_dom0_vcpu0(void);
-void vcpu_reset(struct vcpu *v);
+int vcpu_reset(struct vcpu *);
struct xen_domctl_getdomaininfo;
void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info);
void arch_dump_domain_info(struct domain *d);
-void arch_vcpu_reset(struct vcpu *v);
+int arch_vcpu_reset(struct vcpu *);
extern spinlock_t vcpu_alloc_lock;
bool_t domctl_lock_acquire(void);
/* VCPU is blocked due to missing mem_sharing ring. */
#define _VPF_mem_sharing 6
#define VPF_mem_sharing (1UL<<_VPF_mem_sharing)
+ /* VCPU is being reset. */
+#define _VPF_in_reset 7
+#define VPF_in_reset (1UL<<_VPF_in_reset)
static inline int vcpu_runnable(struct vcpu *v)
{