ia64/xen-unstable
changeset 16333:828cb584c1cc
[IA64] vti save-restore: reap ioreq after resume.
Otherwise ioreq is left as STATE_IORESP_READY
so that the domain will be paniced.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Otherwise ioreq is left as STATE_IORESP_READY
so that the domain will be paniced.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Wed Nov 07 10:10:20 2007 -0700 (2007-11-07) |
parents | 166bf3b04495 |
children | 74b40a9f4c0a |
files | xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/xen/domain.c xen/include/asm-ia64/vmx.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/vmx_init.c Wed Nov 07 10:07:06 2007 -0700 1.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Wed Nov 07 10:10:20 2007 -0700 1.3 @@ -436,7 +436,32 @@ int vmx_setup_platform(struct domain *d) 1.4 return 0; 1.5 } 1.6 1.7 -void vmx_do_launch(struct vcpu *v) 1.8 +void vmx_do_resume(struct vcpu *v) 1.9 { 1.10 + ioreq_t *p; 1.11 + 1.12 vmx_load_all_rr(v); 1.13 + migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor); 1.14 + 1.15 + /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */ 1.16 + /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ 1.17 + p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq; 1.18 + while (p->state != STATE_IOREQ_NONE) { 1.19 + switch (p->state) { 1.20 + case STATE_IORESP_READY: /* IORESP_READY -> NONE */ 1.21 + vmx_io_assist(v); 1.22 + break; 1.23 + case STATE_IOREQ_READY: 1.24 + case STATE_IOREQ_INPROCESS: 1.25 + /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ 1.26 + wait_on_xen_event_channel(v->arch.arch_vmx.xen_port, 1.27 + (p->state != STATE_IOREQ_READY) && 1.28 + (p->state != STATE_IOREQ_INPROCESS)); 1.29 + break; 1.30 + default: 1.31 + gdprintk(XENLOG_ERR, 1.32 + "Weird HVM iorequest state %d.\n", p->state); 1.33 + domain_crash_synchronous(); 1.34 + } 1.35 + } 1.36 }
2.1 --- a/xen/arch/ia64/xen/domain.c Wed Nov 07 10:07:06 2007 -0700 2.2 +++ b/xen/arch/ia64/xen/domain.c Wed Nov 07 10:10:20 2007 -0700 2.3 @@ -203,11 +203,9 @@ void schedule_tail(struct vcpu *prev) 2.4 context_saved(prev); 2.5 ia64_disable_vhpt_walker(); 2.6 2.7 - if (VMX_DOMAIN(current)) { 2.8 - vmx_do_launch(current); 2.9 - migrate_timer(¤t->arch.arch_vmx.vtm.vtm_timer, 2.10 - current->processor); 2.11 - } else { 2.12 + if (VMX_DOMAIN(current)) 2.13 + vmx_do_resume(current); 2.14 + else { 2.15 if (VMX_DOMAIN(prev)) 2.16 ia64_set_iva(&ia64_ivt); 2.17 load_region_regs(current);
3.1 --- a/xen/include/asm-ia64/vmx.h Wed Nov 07 10:07:06 2007 -0700 3.2 +++ b/xen/include/asm-ia64/vmx.h Wed Nov 07 10:10:20 2007 -0700 3.3 @@ -34,7 +34,7 @@ extern int vmx_final_setup_guest(struct 3.4 extern void vmx_save_state(struct vcpu *v); 3.5 extern void vmx_load_state(struct vcpu *v); 3.6 extern int vmx_setup_platform(struct domain *d); 3.7 -extern void vmx_do_launch(struct vcpu *v); 3.8 +extern void vmx_do_resume(struct vcpu *v); 3.9 extern void vmx_io_assist(struct vcpu *v); 3.10 extern int ia64_hypercall (struct pt_regs *regs); 3.11 extern void vmx_save_state(struct vcpu *v);