direct-io.hg

changeset 10974:415614d3a1ee

[hvm/qemu] Flip the device model over to using the new Xen event channels
support.

Signed-off-by: Steven Smith <ssmith@xensource.com>
author sos22@douglas.cl.cam.ac.uk
date Tue Aug 08 11:19:29 2006 +0100 (2006-08-08)
parents 857e7b864bb0
children 88a42cf59f46
files tools/ioemu/target-i386-dm/helper2.c tools/libxc/xc_hvm_build.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/intercept.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/svm/x86_32/exits.S xen/arch/x86/hvm/svm/x86_64/exits.S xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vmx/io.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/x86_32/exits.S xen/arch/x86/hvm/vmx/x86_64/exits.S xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/io.h xen/include/asm-x86/hvm/support.h xen/include/asm-x86/hvm/vcpu.h xen/include/public/hvm/ioreq.h xen/include/xen/event.h
line diff
     1.1 --- a/tools/ioemu/target-i386-dm/helper2.c	Tue Aug 08 11:17:52 2006 +0100
     1.2 +++ b/tools/ioemu/target-i386-dm/helper2.c	Tue Aug 08 11:19:29 2006 +0100
     1.3 @@ -82,6 +82,10 @@ int xce_handle = -1;
     1.4  /* which vcpu we are serving */
     1.5  int send_vcpu = 0;
     1.6  
     1.7 +//the evtchn port for polling the notification,
     1.8 +#define NR_CPUS 32
     1.9 +evtchn_port_t ioreq_local_port[NR_CPUS];
    1.10 +
    1.11  CPUX86State *cpu_x86_init(void)
    1.12  {
    1.13      CPUX86State *env;
    1.14 @@ -113,7 +117,7 @@ CPUX86State *cpu_x86_init(void)
    1.15                  fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
    1.16                  return NULL;
    1.17              }
    1.18 -            shared_page->vcpu_iodata[i].dm_eport = rc;
    1.19 +            ioreq_local_port[i] = rc;
    1.20          }
    1.21      }
    1.22  
    1.23 @@ -184,8 +188,7 @@ void sp_info()
    1.24  
    1.25      for (i = 0; i < vcpus; i++) {
    1.26          req = &(shared_page->vcpu_iodata[i].vp_ioreq);
    1.27 -        term_printf("vcpu %d: event port %d\n", i,
    1.28 -                    shared_page->vcpu_iodata[i].vp_eport);
    1.29 +        term_printf("vcpu %d: event port %d\n", i, ioreq_local_port[i]);
    1.30          term_printf("  req state: %x, pvalid: %x, addr: %"PRIx64", "
    1.31                      "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
    1.32                      req->state, req->pdata_valid, req->addr,
    1.33 @@ -204,6 +207,7 @@ static ioreq_t *__cpu_get_ioreq(int vcpu
    1.34  
    1.35      if (req->state == STATE_IOREQ_READY) {
    1.36          req->state = STATE_IOREQ_INPROCESS;
    1.37 +        rmb();
    1.38          return req;
    1.39      }
    1.40  
    1.41 @@ -226,7 +230,7 @@ static ioreq_t *cpu_get_ioreq(void)
    1.42      port = xc_evtchn_pending(xce_handle);
    1.43      if (port != -1) {
    1.44          for ( i = 0; i < vcpus; i++ )
    1.45 -            if ( shared_page->vcpu_iodata[i].dm_eport == port )
    1.46 +            if ( ioreq_local_port[i] == port )
    1.47                  break;
    1.48  
    1.49          if ( i == vcpus ) {
    1.50 @@ -447,8 +451,10 @@ void cpu_handle_ioreq(void *opaque)
    1.51          }
    1.52  
    1.53          /* No state change if state = STATE_IORESP_HOOK */
    1.54 -        if (req->state == STATE_IOREQ_INPROCESS)
    1.55 +        if (req->state == STATE_IOREQ_INPROCESS) {
    1.56 +            mb();
    1.57              req->state = STATE_IORESP_READY;
    1.58 +        }
    1.59          env->send_event = 1;
    1.60      }
    1.61  }
    1.62 @@ -479,8 +485,7 @@ int main_loop(void)
    1.63  
    1.64          if (env->send_event) {
    1.65              env->send_event = 0;
    1.66 -            xc_evtchn_notify(xce_handle,
    1.67 -                             shared_page->vcpu_iodata[send_vcpu].dm_eport);
    1.68 +            xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
    1.69          }
    1.70      }
    1.71      destroy_hvm_domain();
     2.1 --- a/tools/libxc/xc_hvm_build.c	Tue Aug 08 11:17:52 2006 +0100
     2.2 +++ b/tools/libxc/xc_hvm_build.c	Tue Aug 08 11:19:29 2006 +0100
     2.3 @@ -294,26 +294,13 @@ static int setup_guest(int xc_handle,
     2.4          shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
     2.5      munmap(shared_info, PAGE_SIZE);
     2.6  
     2.7 -    /* Populate the event channel port in the shared page */
     2.8 +    /* Paranoia */
     2.9      shared_page_frame = page_array[(v_end >> PAGE_SHIFT) - 1];
    2.10      if ( (sp = (shared_iopage_t *) xc_map_foreign_range(
    2.11                xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
    2.12                shared_page_frame)) == 0 )
    2.13          goto error_out;
    2.14      memset(sp, 0, PAGE_SIZE);
    2.15 -
    2.16 -    /* FIXME: how about if we overflow the page here? */
    2.17 -    for ( i = 0; i < vcpus; i++ ) {
    2.18 -        unsigned int vp_eport;
    2.19 -
    2.20 -        vp_eport = xc_evtchn_alloc_unbound(xc_handle, dom, 0);
    2.21 -        if ( vp_eport < 0 ) {
    2.22 -            PERROR("Couldn't get unbound port from VMX guest.\n");
    2.23 -            goto error_out;
    2.24 -        }
    2.25 -        sp->vcpu_iodata[i].vp_eport = vp_eport;
    2.26 -    }
    2.27 -
    2.28      munmap(sp, PAGE_SIZE);
    2.29  
    2.30      xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, (v_end >> PAGE_SHIFT) - 2);
     3.1 --- a/xen/arch/x86/hvm/hvm.c	Tue Aug 08 11:17:52 2006 +0100
     3.2 +++ b/xen/arch/x86/hvm/hvm.c	Tue Aug 08 11:19:29 2006 +0100
     3.3 @@ -29,6 +29,7 @@
     3.4  #include <xen/domain_page.h>
     3.5  #include <xen/hypercall.h>
     3.6  #include <xen/guest_access.h>
     3.7 +#include <xen/event.h>
     3.8  #include <asm/current.h>
     3.9  #include <asm/io.h>
    3.10  #include <asm/shadow.h>
    3.11 @@ -160,6 +161,29 @@ void hvm_map_io_shared_page(struct vcpu 
    3.12      d->arch.hvm_domain.shared_page_va = (unsigned long)p;
    3.13  }
    3.14  
    3.15 +void hvm_create_event_channels(struct vcpu *v)
    3.16 +{
    3.17 +    vcpu_iodata_t *p;
    3.18 +    struct vcpu *o;
    3.19 +
    3.20 +    if ( v->vcpu_id == 0 ) {
    3.21 +        /* Ugly: create event channels for every vcpu when vcpu 0
    3.22 +           starts, so that they're available for ioemu to bind to. */
    3.23 +        for_each_vcpu(v->domain, o) {
    3.24 +            p = get_vio(v->domain, o->vcpu_id);
    3.25 +            o->arch.hvm_vcpu.xen_port = p->vp_eport =
    3.26 +                alloc_unbound_xen_event_channel(o, 0);
    3.27 +            DPRINTK("Allocated port %d for hvm.\n", o->arch.hvm_vcpu.xen_port);
    3.28 +        }
    3.29 +    }
    3.30 +}
    3.31 +
    3.32 +void hvm_release_assist_channel(struct vcpu *v)
    3.33 +{
    3.34 +    free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);
    3.35 +}
    3.36 +
    3.37 +
    3.38  void hvm_setup_platform(struct domain* d)
    3.39  {
    3.40      struct hvm_domain *platform;
     4.1 --- a/xen/arch/x86/hvm/intercept.c	Tue Aug 08 11:17:52 2006 +0100
     4.2 +++ b/xen/arch/x86/hvm/intercept.c	Tue Aug 08 11:19:29 2006 +0100
     4.3 @@ -211,7 +211,7 @@ void hlt_timer_fn(void *data)
     4.4  {
     4.5      struct vcpu *v = data;
     4.6  
     4.7 -    evtchn_set_pending(v, iopacket_port(v));
     4.8 +    hvm_prod_vcpu(v);
     4.9  }
    4.10  
    4.11  static __inline__ void missed_ticks(struct periodic_time *pt)
     5.1 --- a/xen/arch/x86/hvm/io.c	Tue Aug 08 11:17:52 2006 +0100
     5.2 +++ b/xen/arch/x86/hvm/io.c	Tue Aug 08 11:19:29 2006 +0100
     5.3 @@ -687,84 +687,17 @@ void hvm_io_assist(struct vcpu *v)
     5.4  
     5.5      p = &vio->vp_ioreq;
     5.6  
     5.7 -    /* clear IO wait HVM flag */
     5.8 -    if ( test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) ) {
     5.9 -        if ( p->state == STATE_IORESP_READY ) {
    5.10 -            p->state = STATE_INVALID;
    5.11 -            clear_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags);
    5.12 -
    5.13 -            if ( p->type == IOREQ_TYPE_PIO )
    5.14 -                hvm_pio_assist(regs, p, io_opp);
    5.15 -            else
    5.16 -                hvm_mmio_assist(regs, p, io_opp);
    5.17 -
    5.18 -            /* Copy register changes back into current guest state. */
    5.19 -            hvm_load_cpu_guest_regs(v, regs);
    5.20 -            memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
    5.21 -        }
    5.22 -        /* else an interrupt send event raced us */
    5.23 -    }
    5.24 -}
    5.25 -
    5.26 -/*
    5.27 - * On exit from hvm_wait_io, we're guaranteed not to be waiting on
    5.28 - * I/O response from the device model.
    5.29 - */
    5.30 -void hvm_wait_io(void)
    5.31 -{
    5.32 -    struct vcpu *v = current;
    5.33 -    struct domain *d = v->domain;
    5.34 -    int port = iopacket_port(v);
    5.35 -
    5.36 -    for ( ; ; )
    5.37 -    {
    5.38 -        /* Clear master flag, selector flag, event flag each in turn. */
    5.39 -        v->vcpu_info->evtchn_upcall_pending = 0;
    5.40 -        clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
    5.41 -        smp_mb__after_clear_bit();
    5.42 -        if ( test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]) )
    5.43 -            hvm_io_assist(v);
    5.44 +    if ( p->state == STATE_IORESP_READY ) {
    5.45 +        p->state = STATE_INVALID;
    5.46 +        if ( p->type == IOREQ_TYPE_PIO )
    5.47 +            hvm_pio_assist(regs, p, io_opp);
    5.48 +        else
    5.49 +            hvm_mmio_assist(regs, p, io_opp);
    5.50  
    5.51 -        /* Need to wait for I/O responses? */
    5.52 -        if ( !test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
    5.53 -            break;
    5.54 -
    5.55 -        do_sched_op_compat(SCHEDOP_block, 0);
    5.56 +        /* Copy register changes back into current guest state. */
    5.57 +        hvm_load_cpu_guest_regs(v, regs);
    5.58 +        memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
    5.59      }
    5.60 -
    5.61 -    /*
    5.62 -     * Re-set the selector and master flags in case any other notifications
    5.63 -     * are pending.
    5.64 -     */
    5.65 -    if ( d->shared_info->evtchn_pending[port/BITS_PER_LONG] )
    5.66 -        set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
    5.67 -    if ( v->vcpu_info->evtchn_pending_sel )
    5.68 -        v->vcpu_info->evtchn_upcall_pending = 1;
    5.69 -}
    5.70 -
    5.71 -void hvm_safe_block(void)
    5.72 -{
    5.73 -    struct vcpu *v = current;
    5.74 -    struct domain *d = v->domain;
    5.75 -    int port = iopacket_port(v);
    5.76 -
    5.77 -    for ( ; ; )
    5.78 -    {
    5.79 -        /* Clear master flag & selector flag so we will wake from block. */
    5.80 -        v->vcpu_info->evtchn_upcall_pending = 0;
    5.81 -        clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
    5.82 -        smp_mb__after_clear_bit();
    5.83 -
    5.84 -        /* Event pending already? */
    5.85 -        if ( test_bit(port, &d->shared_info->evtchn_pending[0]) )
    5.86 -            break;
    5.87 -
    5.88 -        do_sched_op_compat(SCHEDOP_block, 0);
    5.89 -    }
    5.90 -
    5.91 -    /* Reflect pending event in selector and master flags. */
    5.92 -    set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
    5.93 -    v->vcpu_info->evtchn_upcall_pending = 1;
    5.94  }
    5.95  
    5.96  /*
     6.1 --- a/xen/arch/x86/hvm/platform.c	Tue Aug 08 11:17:52 2006 +0100
     6.2 +++ b/xen/arch/x86/hvm/platform.c	Tue Aug 08 11:19:29 2006 +0100
     6.3 @@ -669,6 +669,30 @@ int inst_copy_from_guest(unsigned char *
     6.4      return inst_len;
     6.5  }
     6.6  
     6.7 +static void hvm_send_assist_req(struct vcpu *v)
     6.8 +{
     6.9 +    ioreq_t *p;
    6.10 +
    6.11 +    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
    6.12 +    if ( unlikely(p->state != STATE_INVALID) ) {
    6.13 +        /* This indicates a bug in the device model.  Crash the
    6.14 +           domain. */
    6.15 +        printf("Device model set bad IO state %d.\n", p->state);
    6.16 +        domain_crash(v->domain);
    6.17 +        return;
    6.18 +    }
    6.19 +    wmb();
    6.20 +    p->state = STATE_IOREQ_READY;
    6.21 +    notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
    6.22 +}
    6.23 +
    6.24 +
    6.25 +/* Wake up a vcpu whihc is waiting for interrupts to come in */
    6.26 +void hvm_prod_vcpu(struct vcpu *v)
    6.27 +{
    6.28 +    vcpu_unblock(v);
    6.29 +}
    6.30 +
    6.31  void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
    6.32                    unsigned long count, int size, long value, int dir, int pvalid)
    6.33  {
    6.34 @@ -682,13 +706,10 @@ void send_pio_req(struct cpu_user_regs *
    6.35          domain_crash_synchronous();
    6.36      }
    6.37  
    6.38 -    if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags)) {
    6.39 -        printf("HVM I/O has not yet completed\n");
    6.40 -        domain_crash_synchronous();
    6.41 -    }
    6.42 -    set_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags);
    6.43 -
    6.44      p = &vio->vp_ioreq;
    6.45 +    if ( p->state != STATE_INVALID )
    6.46 +        printf("WARNING: send pio with something already pending (%d)?\n",
    6.47 +               p->state);
    6.48      p->dir = dir;
    6.49      p->pdata_valid = pvalid;
    6.50  
    6.51 @@ -714,10 +735,7 @@ void send_pio_req(struct cpu_user_regs *
    6.52          return;
    6.53      }
    6.54  
    6.55 -    p->state = STATE_IOREQ_READY;
    6.56 -
    6.57 -    evtchn_send(iopacket_port(v));
    6.58 -    hvm_wait_io();
    6.59 +    hvm_send_assist_req(v);
    6.60  }
    6.61  
    6.62  void send_mmio_req(
    6.63 @@ -739,12 +757,9 @@ void send_mmio_req(
    6.64  
    6.65      p = &vio->vp_ioreq;
    6.66  
    6.67 -    if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags)) {
    6.68 -        printf("HVM I/O has not yet completed\n");
    6.69 -        domain_crash_synchronous();
    6.70 -    }
    6.71 -
    6.72 -    set_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags);
    6.73 +    if ( p->state != STATE_INVALID )
    6.74 +        printf("WARNING: send mmio with something already pending (%d)?\n",
    6.75 +               p->state);
    6.76      p->dir = dir;
    6.77      p->pdata_valid = pvalid;
    6.78  
    6.79 @@ -770,10 +785,7 @@ void send_mmio_req(
    6.80          return;
    6.81      }
    6.82  
    6.83 -    p->state = STATE_IOREQ_READY;
    6.84 -
    6.85 -    evtchn_send(iopacket_port(v));
    6.86 -    hvm_wait_io();
    6.87 +    hvm_send_assist_req(v);
    6.88  }
    6.89  
    6.90  static void mmio_operands(int type, unsigned long gpa, struct instruction *inst,
     7.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue Aug 08 11:17:52 2006 +0100
     7.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Tue Aug 08 11:19:29 2006 +0100
     7.3 @@ -25,6 +25,7 @@
     7.4  #include <xen/sched.h>
     7.5  #include <xen/irq.h>
     7.6  #include <xen/softirq.h>
     7.7 +#include <xen/hypercall.h>
     7.8  #include <asm/current.h>
     7.9  #include <asm/io.h>
    7.10  #include <asm/shadow.h>
    7.11 @@ -2121,7 +2122,7 @@ static inline void svm_vmexit_do_hlt(str
    7.12          next_wakeup = next_pit;
    7.13      if ( next_wakeup != - 1 )
    7.14          set_timer(&current->arch.hvm_svm.hlt_timer, next_wakeup);
    7.15 -    hvm_safe_block();
    7.16 +    do_sched_op_compat(SCHEDOP_block, 0);
    7.17  }
    7.18  
    7.19  
     8.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Tue Aug 08 11:17:52 2006 +0100
     8.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Tue Aug 08 11:19:29 2006 +0100
     8.3 @@ -370,18 +370,6 @@ void svm_do_launch(struct vcpu *v)
     8.4      if (v->vcpu_id == 0)
     8.5          hvm_setup_platform(v->domain);
     8.6  
     8.7 -    if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
     8.8 -    {
     8.9 -        printk("HVM domain bind port %d to vcpu %d failed!\n",
    8.10 -               iopacket_port(v), v->vcpu_id);
    8.11 -        domain_crash_synchronous();
    8.12 -    }
    8.13 -
    8.14 -    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
    8.15 -
    8.16 -    clear_bit(iopacket_port(v),
    8.17 -              &v->domain->shared_info->evtchn_mask[0]);
    8.18 -
    8.19      if (hvm_apic_support(v->domain))
    8.20          vlapic_init(v);
    8.21      init_timer(&v->arch.hvm_svm.hlt_timer,
    8.22 @@ -439,10 +427,12 @@ void set_hsa_to_guest( struct arch_svm_s
    8.23  /* 
    8.24   * Resume the guest.
    8.25   */
    8.26 +/* XXX svm_do_resume and vmx_do_resume are remarkably similar; could
    8.27 +   they be unified? */
    8.28  void svm_do_resume(struct vcpu *v) 
    8.29  {
    8.30 -    struct domain *d = v->domain;
    8.31 -    struct periodic_time *pt = &d->arch.hvm_domain.pl_time.periodic_tm;
    8.32 +    struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
    8.33 +    ioreq_t *p;
    8.34  
    8.35      svm_stts(v);
    8.36  
    8.37 @@ -455,12 +445,16 @@ void svm_do_resume(struct vcpu *v)
    8.38          pickup_deactive_ticks(pt);
    8.39      }
    8.40  
    8.41 -    if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
    8.42 -         test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
    8.43 -        hvm_wait_io();
    8.44 -
    8.45 -    /* We can't resume the guest if we're waiting on I/O */
    8.46 -    ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
    8.47 +    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
    8.48 +    wait_on_xen_event_channel(v->arch.hvm.xen_port,
    8.49 +                              p->state != STATE_IOREQ_READY &&
    8.50 +                              p->state != STATE_IOREQ_INPROCESS);
    8.51 +    if ( p->state == STATE_IORESP_READY )
    8.52 +        hvm_io_assist(v);
    8.53 +    if ( p->state != STATE_INVALID ) {
    8.54 +        printf("Weird HVM iorequest state %d.\n", p->state);
    8.55 +        domain_crash(v->domain);
    8.56 +    }
    8.57  }
    8.58  
    8.59  void svm_launch_fail(unsigned long eflags)
     9.1 --- a/xen/arch/x86/hvm/svm/x86_32/exits.S	Tue Aug 08 11:17:52 2006 +0100
     9.2 +++ b/xen/arch/x86/hvm/svm/x86_32/exits.S	Tue Aug 08 11:19:29 2006 +0100
     9.3 @@ -132,6 +132,9 @@ ENTRY(svm_asm_do_launch)
     9.4  ENTRY(svm_asm_do_resume)
     9.5  svm_test_all_events:
     9.6          GET_CURRENT(%ebx)
     9.7 +        pushl %ebx
     9.8 +        call svm_do_resume
     9.9 +        addl $4, %esp
    9.10  /*test_all_events:*/
    9.11          xorl %ecx,%ecx
    9.12          notl %ecx
    10.1 --- a/xen/arch/x86/hvm/svm/x86_64/exits.S	Tue Aug 08 11:17:52 2006 +0100
    10.2 +++ b/xen/arch/x86/hvm/svm/x86_64/exits.S	Tue Aug 08 11:19:29 2006 +0100
    10.3 @@ -147,6 +147,8 @@ ENTRY(svm_asm_do_launch)
    10.4  ENTRY(svm_asm_do_resume)
    10.5  svm_test_all_events:
    10.6  	GET_CURRENT(%rbx)
    10.7 +        movq %rbx, %rdi
    10.8 +        call svm_do_resume
    10.9  /*test_all_events:*/
   10.10          cli                             # tests must not race interrupts
   10.11  /*test_softirqs:*/
    11.1 --- a/xen/arch/x86/hvm/vlapic.c	Tue Aug 08 11:17:52 2006 +0100
    11.2 +++ b/xen/arch/x86/hvm/vlapic.c	Tue Aug 08 11:19:29 2006 +0100
    11.3 @@ -232,7 +232,7 @@ static int vlapic_accept_irq(struct vcpu
    11.4                "level trig mode for vector %d\n", vector);
    11.5              set_bit(vector, vlapic->regs + APIC_TMR);
    11.6          }
    11.7 -        evtchn_set_pending(v, iopacket_port(v));
    11.8 +        hvm_prod_vcpu(v);
    11.9  
   11.10          result = 1;
   11.11          break;
    12.1 --- a/xen/arch/x86/hvm/vmx/io.c	Tue Aug 08 11:17:52 2006 +0100
    12.2 +++ b/xen/arch/x86/hvm/vmx/io.c	Tue Aug 08 11:19:29 2006 +0100
    12.3 @@ -221,7 +221,7 @@ asmlinkage void vmx_intr_assist(void)
    12.4  
    12.5  void vmx_do_resume(struct vcpu *v)
    12.6  {
    12.7 -    struct domain *d = v->domain;
    12.8 +    ioreq_t *p;
    12.9      struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
   12.10  
   12.11      vmx_stts();
   12.12 @@ -235,12 +235,16 @@ void vmx_do_resume(struct vcpu *v)
   12.13          pickup_deactive_ticks(pt);
   12.14      }
   12.15  
   12.16 -    if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
   12.17 -         test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
   12.18 -        hvm_wait_io();
   12.19 -
   12.20 -    /* We can't resume the guest if we're waiting on I/O */
   12.21 -    ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
   12.22 +    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
   12.23 +    wait_on_xen_event_channel(v->arch.hvm.xen_port,
   12.24 +                              p->state != STATE_IOREQ_READY &&
   12.25 +                              p->state != STATE_IOREQ_INPROCESS);
   12.26 +    if ( p->state == STATE_IORESP_READY )
   12.27 +        hvm_io_assist(v);
   12.28 +    if ( p->state != STATE_INVALID ) {
   12.29 +        printf("Weird HVM iorequest state %d.\n", p->state);
   12.30 +        domain_crash(v->domain);
   12.31 +    }
   12.32  }
   12.33  
   12.34  /*
    13.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Tue Aug 08 11:17:52 2006 +0100
    13.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Tue Aug 08 11:19:29 2006 +0100
    13.3 @@ -245,18 +245,6 @@ static void vmx_do_launch(struct vcpu *v
    13.4      if (v->vcpu_id == 0)
    13.5          hvm_setup_platform(v->domain);
    13.6  
    13.7 -    if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
    13.8 -    {
    13.9 -        printk("VMX domain bind port %d to vcpu %d failed!\n",
   13.10 -               iopacket_port(v), v->vcpu_id);
   13.11 -        domain_crash_synchronous();
   13.12 -    }
   13.13 -
   13.14 -    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
   13.15 -
   13.16 -    clear_bit(iopacket_port(v),
   13.17 -              &v->domain->shared_info->evtchn_mask[0]);
   13.18 -
   13.19      __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
   13.20  
   13.21      error |= __vmwrite(GUEST_CR0, cr0);
    14.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Tue Aug 08 11:17:52 2006 +0100
    14.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Tue Aug 08 11:19:29 2006 +0100
    14.3 @@ -25,6 +25,7 @@
    14.4  #include <xen/irq.h>
    14.5  #include <xen/softirq.h>
    14.6  #include <xen/domain_page.h>
    14.7 +#include <xen/hypercall.h>
    14.8  #include <asm/current.h>
    14.9  #include <asm/io.h>
   14.10  #include <asm/shadow.h>
   14.11 @@ -141,6 +142,7 @@ static void vmx_relinquish_guest_resourc
   14.12              free_domheap_page(VLAPIC(v)->regs_page);
   14.13              xfree(VLAPIC(v));
   14.14          }
   14.15 +        hvm_release_assist_channel(v);
   14.16      }
   14.17  
   14.18      kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer);
   14.19 @@ -2014,7 +2016,7 @@ void vmx_vmexit_do_hlt(void)
   14.20          next_wakeup = next_pit;
   14.21      if ( next_wakeup != - 1 ) 
   14.22          set_timer(&current->arch.hvm_vmx.hlt_timer, next_wakeup);
   14.23 -    hvm_safe_block();
   14.24 +    do_sched_op_compat(SCHEDOP_block, 0);
   14.25  }
   14.26  
   14.27  static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
    15.1 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S	Tue Aug 08 11:17:52 2006 +0100
    15.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S	Tue Aug 08 11:19:29 2006 +0100
    15.3 @@ -94,6 +94,9 @@ vmx_process_softirqs:
    15.4          ALIGN
    15.5  ENTRY(vmx_asm_do_vmentry)
    15.6          GET_CURRENT(%ebx)
    15.7 +        pushl %ebx
    15.8 +        call vmx_do_resume
    15.9 +        addl $4, %esp
   15.10          cli                             # tests must not race interrupts
   15.11  
   15.12          movl VCPU_processor(%ebx),%eax
    16.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S	Tue Aug 08 11:17:52 2006 +0100
    16.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S	Tue Aug 08 11:19:29 2006 +0100
    16.3 @@ -105,6 +105,8 @@ vmx_process_softirqs:
    16.4          ALIGN
    16.5  ENTRY(vmx_asm_do_vmentry)
    16.6          GET_CURRENT(%rbx)
    16.7 +        movq %rbx, %rdi
    16.8 +        call vmx_do_resume
    16.9          cli                             # tests must not race interrupts
   16.10  
   16.11          movl  VCPU_processor(%rbx),%eax
    17.1 --- a/xen/include/asm-x86/hvm/hvm.h	Tue Aug 08 11:17:52 2006 +0100
    17.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Tue Aug 08 11:19:29 2006 +0100
    17.3 @@ -77,6 +77,7 @@ hvm_disable(void)
    17.4          hvm_funcs.disable();
    17.5  }
    17.6  
    17.7 +void hvm_create_event_channels(struct vcpu *v);
    17.8  void hvm_map_io_shared_page(struct vcpu *v);
    17.9  
   17.10  static inline int
   17.11 @@ -85,8 +86,10 @@ hvm_initialize_guest_resources(struct vc
   17.12      int ret = 1;
   17.13      if ( hvm_funcs.initialize_guest_resources )
   17.14          ret = hvm_funcs.initialize_guest_resources(v);
   17.15 -    if ( ret == 1 )
   17.16 +    if ( ret == 1 ) {
   17.17          hvm_map_io_shared_page(v);
   17.18 +        hvm_create_event_channels(v);
   17.19 +    }
   17.20      return ret;
   17.21  }
   17.22  
    18.1 --- a/xen/include/asm-x86/hvm/io.h	Tue Aug 08 11:17:52 2006 +0100
    18.2 +++ b/xen/include/asm-x86/hvm/io.h	Tue Aug 08 11:19:29 2006 +0100
    18.3 @@ -150,13 +150,12 @@ static inline int irq_masked(unsigned lo
    18.4  #endif
    18.5  
    18.6  extern void handle_mmio(unsigned long, unsigned long);
    18.7 -extern void hvm_wait_io(void);
    18.8 -extern void hvm_safe_block(void);
    18.9  extern void hvm_io_assist(struct vcpu *v);
   18.10  extern void pic_irq_request(void *data, int level);
   18.11  extern void hvm_pic_assist(struct vcpu *v);
   18.12  extern int cpu_get_interrupt(struct vcpu *v, int *type);
   18.13  extern int cpu_has_pending_irq(struct vcpu *v);
   18.14 +extern void hvm_release_assist_channel(struct vcpu *v);
   18.15  
   18.16  // XXX - think about this, maybe use bit 30 of the mfn to signify an MMIO frame.
   18.17  #define mmio_space(gpa) (!VALID_MFN(get_mfn_from_gpfn((gpa) >> PAGE_SHIFT)))
    19.1 --- a/xen/include/asm-x86/hvm/support.h	Tue Aug 08 11:17:52 2006 +0100
    19.2 +++ b/xen/include/asm-x86/hvm/support.h	Tue Aug 08 11:19:29 2006 +0100
    19.3 @@ -44,11 +44,6 @@ static inline vcpu_iodata_t *get_vio(str
    19.4      return &get_sp(d)->vcpu_iodata[cpu];
    19.5  }
    19.6  
    19.7 -static inline int iopacket_port(struct vcpu *v)
    19.8 -{
    19.9 -    return get_vio(v->domain, v->vcpu_id)->vp_eport;
   19.10 -}
   19.11 -
   19.12  /* XXX these are really VMX specific */
   19.13  #define TYPE_MOV_TO_DR          (0 << 4)
   19.14  #define TYPE_MOV_FROM_DR        (1 << 4)
   19.15 @@ -150,4 +145,6 @@ extern void hlt_timer_fn(void *data);
   19.16  
   19.17  void hvm_do_hypercall(struct cpu_user_regs *pregs);
   19.18  
   19.19 +void hvm_prod_vcpu(struct vcpu *v);
   19.20 +
   19.21  #endif /* __ASM_X86_HVM_SUPPORT_H__ */
    20.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Tue Aug 08 11:17:52 2006 +0100
    20.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Tue Aug 08 11:19:29 2006 +0100
    20.3 @@ -38,6 +38,8 @@ struct hvm_vcpu {
    20.4      /* For AP startup */
    20.5      unsigned long       init_sipi_sipi_state;
    20.6  
    20.7 +    int                 xen_port;
    20.8 +
    20.9      /* Flags */
   20.10      int                 flag_dr_dirty;
   20.11  
    21.1 --- a/xen/include/public/hvm/ioreq.h	Tue Aug 08 11:17:52 2006 +0100
    21.2 +++ b/xen/include/public/hvm/ioreq.h	Tue Aug 08 11:19:29 2006 +0100
    21.3 @@ -69,7 +69,6 @@ struct vcpu_iodata {
    21.4      struct ioreq         vp_ioreq;
    21.5      /* Event channel port */
    21.6      unsigned int    vp_eport;   /* VMX vcpu uses this to notify DM */
    21.7 -    unsigned int    dm_eport;   /* DM uses this to notify VMX vcpu */
    21.8  };
    21.9  typedef struct vcpu_iodata vcpu_iodata_t;
   21.10  
    22.1 --- a/xen/include/xen/event.h	Tue Aug 08 11:17:52 2006 +0100
    22.2 +++ b/xen/include/xen/event.h	Tue Aug 08 11:19:29 2006 +0100
    22.3 @@ -12,6 +12,7 @@
    22.4  #include <xen/config.h>
    22.5  #include <xen/sched.h>
    22.6  #include <xen/smp.h>
    22.7 +#include <xen/softirq.h>
    22.8  #include <asm/bitops.h>
    22.9  #include <asm/event.h>
   22.10