ia64/xen-unstable

changeset 12349:6c975e642719

[HVM] Clean up IOREQ state managemnet and evtchn notifications.
Based on a patch from Xin Li <xin.b.li@intel.com>. Changed
significantly on the Xen side -- not as cut down as the original
patch: this one keeps the xen_event_channel 'API' unchanged.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Nov 09 17:23:58 2006 +0000 (2006-11-09)
parents 7968f02ede38
children 5a9b79981a35
files tools/ioemu/target-i386-dm/cpu.h tools/ioemu/target-i386-dm/helper2.c xen/arch/ia64/vmx/vmx_support.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/platform.c xen/include/public/hvm/ioreq.h
line diff
     1.1 --- a/tools/ioemu/target-i386-dm/cpu.h	Thu Nov 09 16:50:44 2006 +0000
     1.2 +++ b/tools/ioemu/target-i386-dm/cpu.h	Thu Nov 09 17:23:58 2006 +0000
     1.3 @@ -55,8 +55,6 @@ typedef struct CPUX86State {
     1.4      int interrupt_request;
     1.5  
     1.6      CPU_COMMON
     1.7 -
     1.8 -    int send_event;
     1.9  } CPUX86State;
    1.10  
    1.11  CPUX86State *cpu_x86_init(void);
     2.1 --- a/tools/ioemu/target-i386-dm/helper2.c	Thu Nov 09 16:50:44 2006 +0000
     2.2 +++ b/tools/ioemu/target-i386-dm/helper2.c	Thu Nov 09 17:23:58 2006 +0000
     2.3 @@ -506,10 +506,10 @@ void cpu_handle_ioreq(void *opaque)
     2.4  
     2.5          /* No state change if state = STATE_IORESP_HOOK */
     2.6          if (req->state == STATE_IOREQ_INPROCESS) {
     2.7 -            mb();
     2.8              req->state = STATE_IORESP_READY;
     2.9 -        }
    2.10 -        env->send_event = 1;
    2.11 +            xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
    2.12 +        } else
    2.13 +            destroy_hvm_domain();
    2.14      }
    2.15  }
    2.16  
    2.17 @@ -526,8 +526,6 @@ int main_loop(void)
    2.18  
    2.19      qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
    2.20  
    2.21 -    env->send_event = 0;
    2.22 -
    2.23      while (1) {
    2.24          if (vm_running) {
    2.25              if (shutdown_requested)
    2.26 @@ -540,11 +538,6 @@ int main_loop(void)
    2.27  
    2.28          /* Wait up to 10 msec. */
    2.29          main_loop_wait(10);
    2.30 -
    2.31 -        if (env->send_event) {
    2.32 -            env->send_event = 0;
    2.33 -            xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
    2.34 -        }
    2.35      }
    2.36      destroy_hvm_domain();
    2.37      return 0;
     3.1 --- a/xen/arch/ia64/vmx/vmx_support.c	Thu Nov 09 16:50:44 2006 +0000
     3.2 +++ b/xen/arch/ia64/vmx/vmx_support.c	Thu Nov 09 17:23:58 2006 +0000
     3.3 @@ -49,7 +49,7 @@ void vmx_io_assist(struct vcpu *v)
     3.4      p = &vio->vp_ioreq;
     3.5  
     3.6      if (p->state == STATE_IORESP_READY) {
     3.7 -        p->state = STATE_INVALID;
     3.8 +        p->state = STATE_IOREQ_NONE;
     3.9      }
    3.10      else {
    3.11          /* Can't block here, for the same reason as other places to
    3.12 @@ -65,7 +65,7 @@ void vmx_send_assist_req(struct vcpu *v)
    3.13      ioreq_t *p;
    3.14  
    3.15      p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
    3.16 -    if (unlikely(p->state != STATE_INVALID)) {
    3.17 +    if (unlikely(p->state != STATE_IOREQ_NONE)) {
    3.18          /* This indicates a bug in the device model.  Crash the
    3.19             domain. */
    3.20          printk("Device model set bad IO state %d.\n", p->state);
     4.1 --- a/xen/arch/x86/hvm/hvm.c	Thu Nov 09 16:50:44 2006 +0000
     4.2 +++ b/xen/arch/x86/hvm/hvm.c	Thu Nov 09 17:23:58 2006 +0000
     4.3 @@ -60,10 +60,8 @@ struct hvm_function_table hvm_funcs;
     4.4  void hvm_stts(struct vcpu *v)
     4.5  {
     4.6      /* FPU state already dirty? Then no need to setup_fpu() lazily. */
     4.7 -    if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
     4.8 -        return;
     4.9 -    
    4.10 -    hvm_funcs.stts(v);
    4.11 +    if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
    4.12 +        hvm_funcs.stts(v);
    4.13  }
    4.14  
    4.15  void hvm_set_guest_time(struct vcpu *v, u64 gtime)
    4.16 @@ -79,34 +77,40 @@ void hvm_set_guest_time(struct vcpu *v, 
    4.17  void hvm_do_resume(struct vcpu *v)
    4.18  {
    4.19      ioreq_t *p;
    4.20 -    struct periodic_time *pt =
    4.21 -        &v->domain->arch.hvm_domain.pl_time.periodic_tm;
    4.22 +    struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
    4.23  
    4.24      hvm_stts(v);
    4.25  
    4.26 -    /* pick up the elapsed PIT ticks and re-enable pit_timer */
    4.27 -    if ( pt->enabled && v->vcpu_id == pt->bind_vcpu && pt->first_injected ) {
    4.28 -        if ( v->arch.hvm_vcpu.guest_time ) {
    4.29 +    /* Pick up the elapsed PIT ticks and re-enable pit_timer. */
    4.30 +    if ( pt->enabled && (v->vcpu_id == pt->bind_vcpu) && pt->first_injected )
    4.31 +    {
    4.32 +        if ( v->arch.hvm_vcpu.guest_time )
    4.33 +        {
    4.34              hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
    4.35              v->arch.hvm_vcpu.guest_time = 0;
    4.36          }
    4.37          pickup_deactive_ticks(pt);
    4.38      }
    4.39  
    4.40 +    /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
    4.41      p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
    4.42 -    wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
    4.43 -                              p->state != STATE_IOREQ_READY &&
    4.44 -                              p->state != STATE_IOREQ_INPROCESS);
    4.45 -    switch ( p->state )
    4.46 +    while ( p->state != STATE_IOREQ_NONE )
    4.47      {
    4.48 -    case STATE_IORESP_READY:
    4.49 -        hvm_io_assist(v);
    4.50 -        break;
    4.51 -    case STATE_INVALID:
    4.52 -        break;
    4.53 -    default:
    4.54 -        printk("Weird HVM iorequest state %d.\n", p->state);
    4.55 -        domain_crash(v->domain);
    4.56 +        switch ( p->state )
    4.57 +        {
    4.58 +        case STATE_IORESP_READY: /* IORESP_READY -> NONE */
    4.59 +            hvm_io_assist(v);
    4.60 +            break;
    4.61 +        case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
    4.62 +        case STATE_IOREQ_INPROCESS:
    4.63 +            wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
    4.64 +                                      (p->state != STATE_IOREQ_READY) &&
    4.65 +                                      (p->state != STATE_IOREQ_INPROCESS));
    4.66 +            break;
    4.67 +        default:
    4.68 +            gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
    4.69 +            domain_crash_synchronous();
    4.70 +        }
    4.71      }
    4.72  }
    4.73  
     5.1 --- a/xen/arch/x86/hvm/io.c	Thu Nov 09 16:50:44 2006 +0000
     5.2 +++ b/xen/arch/x86/hvm/io.c	Thu Nov 09 17:23:58 2006 +0000
     5.3 @@ -736,27 +736,25 @@ void hvm_io_assist(struct vcpu *v)
     5.4  
     5.5      io_opp = &v->arch.hvm_vcpu.io_op;
     5.6      regs   = &io_opp->io_context;
     5.7 -
     5.8 -    vio = get_vio(v->domain, v->vcpu_id);
     5.9 +    vio    = get_vio(v->domain, v->vcpu_id);
    5.10  
    5.11 -    if ( vio == 0 ) {
    5.12 -        printk("bad shared page: %lx\n", (unsigned long)vio);
    5.13 +    p = &vio->vp_ioreq;
    5.14 +    if ( p->state != STATE_IORESP_READY )
    5.15 +    {
    5.16 +        gdprintk(XENLOG_ERR, "Unexpected HVM iorequest state %d.\n", p->state);
    5.17          domain_crash_synchronous();
    5.18      }
    5.19  
    5.20 -    p = &vio->vp_ioreq;
    5.21 +    p->state = STATE_IOREQ_NONE;
    5.22  
    5.23 -    if ( p->state == STATE_IORESP_READY ) {
    5.24 -        p->state = STATE_INVALID;
    5.25 -        if ( p->type == IOREQ_TYPE_PIO )
    5.26 -            hvm_pio_assist(regs, p, io_opp);
    5.27 -        else
    5.28 -            hvm_mmio_assist(regs, p, io_opp);
    5.29 +    if ( p->type == IOREQ_TYPE_PIO )
    5.30 +        hvm_pio_assist(regs, p, io_opp);
    5.31 +    else
    5.32 +        hvm_mmio_assist(regs, p, io_opp);
    5.33  
    5.34 -        /* Copy register changes back into current guest state. */
    5.35 -        hvm_load_cpu_guest_regs(v, regs);
    5.36 -        memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
    5.37 -    }
    5.38 +    /* Copy register changes back into current guest state. */
    5.39 +    hvm_load_cpu_guest_regs(v, regs);
    5.40 +    memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
    5.41  }
    5.42  
    5.43  /*
     6.1 --- a/xen/arch/x86/hvm/platform.c	Thu Nov 09 16:50:44 2006 +0000
     6.2 +++ b/xen/arch/x86/hvm/platform.c	Thu Nov 09 17:23:58 2006 +0000
     6.3 @@ -727,7 +727,7 @@ static void hvm_send_assist_req(struct v
     6.4      ioreq_t *p;
     6.5  
     6.6      p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
     6.7 -    if ( unlikely(p->state != STATE_INVALID) ) {
     6.8 +    if ( unlikely(p->state != STATE_IOREQ_NONE) ) {
     6.9          /* This indicates a bug in the device model.  Crash the
    6.10             domain. */
    6.11          printk("Device model set bad IO state %d.\n", p->state);
    6.12 @@ -760,7 +760,7 @@ void send_pio_req(unsigned long port, un
    6.13      }
    6.14  
    6.15      p = &vio->vp_ioreq;
    6.16 -    if ( p->state != STATE_INVALID )
    6.17 +    if ( p->state != STATE_IOREQ_NONE )
    6.18          printk("WARNING: send pio with something already pending (%d)?\n",
    6.19                 p->state);
    6.20  
    6.21 @@ -815,7 +815,7 @@ static void send_mmio_req(unsigned char 
    6.22  
    6.23      p = &vio->vp_ioreq;
    6.24  
    6.25 -    if ( p->state != STATE_INVALID )
    6.26 +    if ( p->state != STATE_IOREQ_NONE )
    6.27          printk("WARNING: send mmio with something already pending (%d)?\n",
    6.28                 p->state);
    6.29      p->dir = dir;
     7.1 --- a/xen/include/public/hvm/ioreq.h	Thu Nov 09 16:50:44 2006 +0000
     7.2 +++ b/xen/include/public/hvm/ioreq.h	Thu Nov 09 17:23:58 2006 +0000
     7.3 @@ -27,7 +27,7 @@
     7.4  #define IOREQ_READ      1
     7.5  #define IOREQ_WRITE     0
     7.6  
     7.7 -#define STATE_INVALID           0
     7.8 +#define STATE_IOREQ_NONE        0
     7.9  #define STATE_IOREQ_READY       1
    7.10  #define STATE_IOREQ_INPROCESS   2
    7.11  #define STATE_IORESP_READY      3