ia64/xen-unstable

changeset 8970:b5bb9920bf48

Per-vcpu IO evtchn patch for HVM domain.
We are starting to send patches to support SMP VMX guest.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Feb 23 11:22:25 2006 +0100 (2006-02-23)
parents 175ad739d8bc
children cb14f4db7a1e
files tools/ioemu/target-i386-dm/helper2.c tools/ioemu/vl.c tools/libxc/xc_hvm_build.c tools/libxc/xenguest.h tools/python/xen/lowlevel/xc/xc.c tools/python/xen/xend/image.py xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/intercept.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vmx/io.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/common/event_channel.c xen/include/asm-x86/hvm/io.h xen/include/asm-x86/hvm/support.h xen/include/public/hvm/ioreq.h xen/include/xen/event.h
line diff
     1.1 --- a/tools/ioemu/target-i386-dm/helper2.c	Wed Feb 22 21:52:30 2006 +0100
     1.2 +++ b/tools/ioemu/target-i386-dm/helper2.c	Thu Feb 23 11:22:25 2006 +0100
     1.3 @@ -125,9 +125,8 @@ target_ulong cpu_get_phys_page_debug(CPU
     1.4  //the evtchn fd for polling
     1.5  int evtchn_fd = -1;
     1.6  
     1.7 -//the evtchn port for polling the notification,
     1.8 -//should be inputed as bochs's parameter
     1.9 -evtchn_port_t ioreq_remote_port, ioreq_local_port;
    1.10 +//which vcpu we are serving
    1.11 +int send_vcpu = 0;
    1.12  
    1.13  //some functions to handle the io req packet
    1.14  void sp_info()
    1.15 @@ -135,52 +134,62 @@ void sp_info()
    1.16      ioreq_t *req;
    1.17      int i;
    1.18  
    1.19 -    term_printf("event port: %d\n", shared_page->sp_global.eport);
    1.20      for ( i = 0; i < vcpus; i++ ) {
    1.21          req = &(shared_page->vcpu_iodata[i].vp_ioreq);
    1.22 -        term_printf("vcpu %d:\n", i);
    1.23 +        term_printf("vcpu %d: event port %d\n",
    1.24 +                    i, shared_page->vcpu_iodata[i].vp_eport);
    1.25          term_printf("  req state: %x, pvalid: %x, addr: %llx, "
    1.26                      "data: %llx, count: %llx, size: %llx\n",
    1.27                      req->state, req->pdata_valid, req->addr,
    1.28                      req->u.data, req->count, req->size);
    1.29 +        term_printf("  IO totally occurred on this vcpu: %llx\n",
    1.30 +                    req->io_count);
    1.31      }
    1.32  }
    1.33  
    1.34  //get the ioreq packets from share mem
    1.35 -ioreq_t* __cpu_get_ioreq(void)
    1.36 +static ioreq_t* __cpu_get_ioreq(int vcpu)
    1.37  {
    1.38      ioreq_t *req;
    1.39  
    1.40 -    req = &(shared_page->vcpu_iodata[0].vp_ioreq);
    1.41 -    if (req->state == STATE_IOREQ_READY) {
    1.42 -        req->state = STATE_IOREQ_INPROCESS;
    1.43 -    } else {
    1.44 -        fprintf(logfile, "False I/O request ... in-service already: "
    1.45 -                         "%x, pvalid: %x, port: %llx, "
    1.46 -                         "data: %llx, count: %llx, size: %llx\n",
    1.47 -                         req->state, req->pdata_valid, req->addr,
    1.48 -                         req->u.data, req->count, req->size);
    1.49 -        req = NULL;
    1.50 -    }
    1.51 +    req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
    1.52 +
    1.53 +    if ( req->state == STATE_IOREQ_READY )
    1.54 +        return req;
    1.55  
    1.56 -    return req;
    1.57 +    fprintf(logfile, "False I/O request ... in-service already: "
    1.58 +                     "%x, pvalid: %x, port: %llx, "
    1.59 +                     "data: %llx, count: %llx, size: %llx\n",
    1.60 +                     req->state, req->pdata_valid, req->addr,
    1.61 +                     req->u.data, req->count, req->size);
    1.62 +    return NULL;
    1.63  }
    1.64  
    1.65  //use poll to get the port notification
    1.66  //ioreq_vec--out,the
    1.67  //retval--the number of ioreq packet
    1.68 -ioreq_t* cpu_get_ioreq(void)
    1.69 +static ioreq_t* cpu_get_ioreq(void)
    1.70  {
    1.71 -    int rc;
    1.72 +    int i, rc;
    1.73      evtchn_port_t port;
    1.74  
    1.75      rc = read(evtchn_fd, &port, sizeof(port));
    1.76 -    if ((rc == sizeof(port)) && (port == ioreq_local_port)) {
    1.77 +    if ( rc == sizeof(port) ) {
    1.78 +        for ( i = 0; i < vcpus; i++ )
    1.79 +            if ( shared_page->vcpu_iodata[i].dm_eport == port )
    1.80 +                break;
    1.81 +
    1.82 +        if ( i == vcpus ) {
    1.83 +            fprintf(logfile, "Fatal error while trying to get io event!\n");
    1.84 +            exit(1);
    1.85 +        }
    1.86 +
    1.87          // unmask the wanted port again
    1.88 -        write(evtchn_fd, &ioreq_local_port, sizeof(port));
    1.89 +        write(evtchn_fd, &port, sizeof(port));
    1.90  
    1.91          //get the io packet from shared memory
    1.92 -        return __cpu_get_ioreq();
    1.93 +        send_vcpu = i;
    1.94 +        return __cpu_get_ioreq(i);
    1.95      }
    1.96  
    1.97      //read error or read nothing
    1.98 @@ -361,6 +370,8 @@ void cpu_handle_ioreq(CPUState *env)
    1.99      ioreq_t *req = cpu_get_ioreq();
   1.100  
   1.101      if (req) {
   1.102 +        req->state = STATE_IOREQ_INPROCESS;
   1.103 +
   1.104          if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
   1.105              if (req->size != 4)
   1.106                  req->u.data &= (1UL << (8 * req->size))-1;
   1.107 @@ -465,7 +476,7 @@ int main_loop(void)
   1.108              struct ioctl_evtchn_notify notify;
   1.109  
   1.110              env->send_event = 0;
   1.111 -            notify.port = ioreq_local_port;
   1.112 +            notify.port = shared_page->vcpu_iodata[send_vcpu].dm_eport;
   1.113              (void)ioctl(evtchn_fd, IOCTL_EVTCHN_NOTIFY, &notify);
   1.114          }
   1.115      }
   1.116 @@ -488,7 +499,7 @@ CPUState * cpu_init()
   1.117  {
   1.118      CPUX86State *env;
   1.119      struct ioctl_evtchn_bind_interdomain bind;
   1.120 -    int rc;
   1.121 +    int i, rc;
   1.122  
   1.123      cpu_exec_init();
   1.124      qemu_register_reset(qemu_hvm_reset, NULL);
   1.125 @@ -509,14 +520,17 @@ CPUState * cpu_init()
   1.126          return NULL;
   1.127      }
   1.128  
   1.129 +    /* FIXME: how about if we overflow the page here? */
   1.130      bind.remote_domain = domid;
   1.131 -    bind.remote_port   = ioreq_remote_port;
   1.132 -    rc = ioctl(evtchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
   1.133 -    if (rc == -1) {
   1.134 -        fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
   1.135 -        return NULL;
   1.136 +    for ( i = 0; i < vcpus; i++ ) {
   1.137 +        bind.remote_port = shared_page->vcpu_iodata[i].vp_eport;
   1.138 +        rc = ioctl(evtchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
   1.139 +        if ( rc == -1 ) {
   1.140 +            fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
   1.141 +            return NULL;
   1.142 +        }
   1.143 +        shared_page->vcpu_iodata[i].dm_eport = rc;
   1.144      }
   1.145 -    ioreq_local_port = rc;
   1.146  
   1.147      return env;
   1.148  }
     2.1 --- a/tools/ioemu/vl.c	Wed Feb 22 21:52:30 2006 +0100
     2.2 +++ b/tools/ioemu/vl.c	Thu Feb 23 11:22:25 2006 +0100
     2.3 @@ -2337,7 +2337,6 @@ enum {
     2.4  
     2.5      QEMU_OPTION_S,
     2.6      QEMU_OPTION_s,
     2.7 -    QEMU_OPTION_p,
     2.8      QEMU_OPTION_d,
     2.9      QEMU_OPTION_l,
    2.10      QEMU_OPTION_hdachs,
    2.11 @@ -2414,7 +2413,6 @@ const QEMUOption qemu_options[] = {
    2.12  
    2.13      { "S", 0, QEMU_OPTION_S },
    2.14      { "s", 0, QEMU_OPTION_s },
    2.15 -    { "p", HAS_ARG, QEMU_OPTION_p },
    2.16      { "d", HAS_ARG, QEMU_OPTION_d },
    2.17      { "l", HAS_ARG, QEMU_OPTION_l },
    2.18      { "hdachs", HAS_ARG, QEMU_OPTION_hdachs },
    2.19 @@ -2938,13 +2936,6 @@ int main(int argc, char **argv)
    2.20                      fprintf(logfile, "domid: %d\n", domid);
    2.21                  }
    2.22                  break;
    2.23 -            case QEMU_OPTION_p:
    2.24 -                {
    2.25 -                    extern evtchn_port_t ioreq_remote_port;
    2.26 -                    ioreq_remote_port = atoi(optarg);
    2.27 -                    fprintf(logfile, "eport: %d\n", ioreq_remote_port);
    2.28 -                }
    2.29 -                break;
    2.30              case QEMU_OPTION_l:
    2.31                  {
    2.32                      int mask;
     3.1 --- a/tools/libxc/xc_hvm_build.c	Wed Feb 22 21:52:30 2006 +0100
     3.2 +++ b/tools/libxc/xc_hvm_build.c	Thu Feb 23 11:22:25 2006 +0100
     3.3 @@ -175,7 +175,6 @@ static int setup_guest(int xc_handle,
     3.4                         unsigned long nr_pages,
     3.5                         vcpu_guest_context_t *ctxt,
     3.6                         unsigned long shared_info_frame,
     3.7 -                       unsigned int control_evtchn,
     3.8                         unsigned int vcpus,
     3.9                         unsigned int pae,
    3.10                         unsigned int acpi,
    3.11 @@ -284,7 +283,19 @@ static int setup_guest(int xc_handle,
    3.12           shared_page_frame)) == 0 )
    3.13          goto error_out;
    3.14      memset(sp, 0, PAGE_SIZE);
    3.15 -    sp->sp_global.eport = control_evtchn;
    3.16 +
    3.17 +    /* FIXME: how about if we overflow the page here? */
    3.18 +    for ( i = 0; i < vcpus; i++ ) {
    3.19 +        unsigned int vp_eport;
    3.20 +
    3.21 +        vp_eport = xc_evtchn_alloc_unbound(xc_handle, dom, 0);
    3.22 +        if ( vp_eport < 0 ) {
    3.23 +            fprintf(stderr, "Couldn't get unbound port from VMX guest.\n");
    3.24 +            goto error_out;
    3.25 +        }
    3.26 +        sp->vcpu_iodata[i].vp_eport = vp_eport;
    3.27 +    }
    3.28 +
    3.29      munmap(sp, PAGE_SIZE);
    3.30  
    3.31      *store_mfn = page_array[(v_end >> PAGE_SHIFT) - 2];
    3.32 @@ -331,7 +342,6 @@ int xc_hvm_build(int xc_handle,
    3.33                   uint32_t domid,
    3.34                   int memsize,
    3.35                   const char *image_name,
    3.36 -                 unsigned int control_evtchn,
    3.37                   unsigned int vcpus,
    3.38                   unsigned int pae,
    3.39                   unsigned int acpi,
    3.40 @@ -388,7 +398,7 @@ int xc_hvm_build(int xc_handle,
    3.41  
    3.42      ctxt->flags = VGCF_HVM_GUEST;
    3.43      if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
    3.44 -                     ctxt, op.u.getdomaininfo.shared_info_frame, control_evtchn,
    3.45 +                     ctxt, op.u.getdomaininfo.shared_info_frame,
    3.46                       vcpus, pae, acpi, apic, store_evtchn, store_mfn) < 0)
    3.47      {
    3.48          ERROR("Error constructing guest OS");
     4.1 --- a/tools/libxc/xenguest.h	Wed Feb 22 21:52:30 2006 +0100
     4.2 +++ b/tools/libxc/xenguest.h	Thu Feb 23 11:22:25 2006 +0100
     4.3 @@ -57,7 +57,6 @@ int xc_hvm_build(int xc_handle,
     4.4                   uint32_t domid,
     4.5                   int memsize,
     4.6                   const char *image_name,
     4.7 -                 unsigned int control_evtchn,
     4.8                   unsigned int vcpus,
     4.9                   unsigned int pae,
    4.10                   unsigned int acpi,
     5.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Wed Feb 22 21:52:30 2006 +0100
     5.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Thu Feb 23 11:22:25 2006 +0100
     5.3 @@ -363,7 +363,7 @@ static PyObject *pyxc_hvm_build(XcObject
     5.4  {
     5.5      uint32_t dom;
     5.6      char *image;
     5.7 -    int control_evtchn, store_evtchn;
     5.8 +    int store_evtchn;
     5.9      int memsize;
    5.10      int vcpus = 1;
    5.11      int pae  = 0;
    5.12 @@ -371,15 +371,15 @@ static PyObject *pyxc_hvm_build(XcObject
    5.13      int apic = 0;
    5.14      unsigned long store_mfn = 0;
    5.15  
    5.16 -    static char *kwd_list[] = { "dom", "control_evtchn", "store_evtchn",
    5.17 +    static char *kwd_list[] = { "dom", "store_evtchn",
    5.18  				"memsize", "image", "vcpus", "pae", "acpi", "apic",
    5.19  				NULL };
    5.20 -    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiisiiii", kwd_list,
    5.21 -                                      &dom, &control_evtchn, &store_evtchn,
    5.22 -				      &memsize, &image, &vcpus, &pae, &acpi, &apic) )
    5.23 +    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiisiiii", kwd_list,
    5.24 +                                      &dom, &store_evtchn, &memsize,
    5.25 +                                      &image, &vcpus, &pae, &acpi, &apic) )
    5.26          return NULL;
    5.27  
    5.28 -    if ( xc_hvm_build(self->xc_handle, dom, memsize, image, control_evtchn,
    5.29 +    if ( xc_hvm_build(self->xc_handle, dom, memsize, image,
    5.30  		      vcpus, pae, acpi, apic, store_evtchn, &store_mfn) != 0 )
    5.31          return PyErr_SetFromErrno(xc_error);
    5.32  
     6.1 --- a/tools/python/xen/xend/image.py	Wed Feb 22 21:52:30 2006 +0100
     6.2 +++ b/tools/python/xen/xend/image.py	Thu Feb 23 11:22:25 2006 +0100
     6.3 @@ -205,7 +205,6 @@ class HVMImageHandler(ImageHandler):
     6.4                          ("image/device-model", self.device_model),
     6.5                          ("image/display", self.display))
     6.6  
     6.7 -        self.device_channel = None
     6.8          self.pid = 0
     6.9  
    6.10          self.dmargs += self.configVNC(imageConfig)
    6.11 @@ -216,16 +215,10 @@ class HVMImageHandler(ImageHandler):
    6.12          self.apic = int(sxp.child_value(imageConfig, 'apic', 0))
    6.13  
    6.14      def buildDomain(self):
    6.15 -        # Create an event channel
    6.16 -        self.device_channel = xc.evtchn_alloc_unbound(dom=self.vm.getDomid(),
    6.17 -                                                      remote_dom=0)
    6.18 -        log.info("HVM device model port: %d", self.device_channel)
    6.19 -
    6.20          store_evtchn = self.vm.getStorePort()
    6.21  
    6.22          log.debug("dom            = %d", self.vm.getDomid())
    6.23          log.debug("image          = %s", self.kernel)
    6.24 -        log.debug("control_evtchn = %d", self.device_channel)
    6.25          log.debug("store_evtchn   = %d", store_evtchn)
    6.26          log.debug("memsize        = %d", self.vm.getMemoryTarget() / 1024)
    6.27          log.debug("vcpus          = %d", self.vm.getVCpuCount())
    6.28 @@ -237,7 +230,6 @@ class HVMImageHandler(ImageHandler):
    6.29  
    6.30          return xc.hvm_build(dom            = self.vm.getDomid(),
    6.31                              image          = self.kernel,
    6.32 -                            control_evtchn = self.device_channel,
    6.33                              store_evtchn   = store_evtchn,
    6.34                              memsize        = self.vm.getMemoryTarget() / 1024,
    6.35                              vcpus          = self.vm.getVCpuCount(),
    6.36 @@ -345,7 +337,6 @@ class HVMImageHandler(ImageHandler):
    6.37          if len(vnc):
    6.38              args = args + vnc
    6.39          args = args + ([ "-d",  "%d" % self.vm.getDomid(),
    6.40 -                  "-p", "%d" % self.device_channel,
    6.41                    "-m", "%s" % (self.vm.getMemoryTarget() / 1024)])
    6.42          args = args + self.dmargs
    6.43          env = dict(os.environ)
     7.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Feb 22 21:52:30 2006 +0100
     7.2 +++ b/xen/arch/x86/hvm/hvm.c	Thu Feb 23 11:22:25 2006 +0100
     7.3 @@ -124,11 +124,6 @@ static void hvm_map_io_shared_page(struc
     7.4          domain_crash_synchronous();
     7.5      }
     7.6      d->arch.hvm_domain.shared_page_va = (unsigned long)p;
     7.7 -
     7.8 -    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d));
     7.9 -
    7.10 -    clear_bit(iopacket_port(d),
    7.11 -              &d->shared_info->evtchn_mask[0]);
    7.12  }
    7.13  
    7.14  static int validate_hvm_info(struct hvm_info_table *t)
     8.1 --- a/xen/arch/x86/hvm/intercept.c	Wed Feb 22 21:52:30 2006 +0100
     8.2 +++ b/xen/arch/x86/hvm/intercept.c	Thu Feb 23 11:22:25 2006 +0100
     8.3 @@ -332,8 +332,8 @@ int intercept_pit_io(ioreq_t *p)
     8.4  void hlt_timer_fn(void *data)
     8.5  {
     8.6      struct vcpu *v = data;
     8.7 -    
     8.8 -    evtchn_set_pending(v, iopacket_port(v->domain));
     8.9 +
    8.10 +    evtchn_set_pending(v, iopacket_port(v));
    8.11  }
    8.12  
    8.13  static __inline__ void missed_ticks(struct hvm_virpit*vpit)
     9.1 --- a/xen/arch/x86/hvm/io.c	Wed Feb 22 21:52:30 2006 +0100
     9.2 +++ b/xen/arch/x86/hvm/io.c	Thu Feb 23 11:22:25 2006 +0100
     9.3 @@ -697,8 +697,8 @@ void hvm_io_assist(struct vcpu *v)
     9.4  void hvm_wait_io(void)
     9.5  {
     9.6      struct vcpu *v = current;
     9.7 -    struct domain *d = v->domain;    
     9.8 -    int port = iopacket_port(d);
     9.9 +    struct domain *d = v->domain;
    9.10 +    int port = iopacket_port(v);
    9.11  
    9.12      for ( ; ; )
    9.13      {
    9.14 @@ -729,8 +729,8 @@ void hvm_wait_io(void)
    9.15  void hvm_safe_block(void)
    9.16  {
    9.17      struct vcpu *v = current;
    9.18 -    struct domain *d = v->domain;    
    9.19 -    int port = iopacket_port(d);
    9.20 +    struct domain *d = v->domain;
    9.21 +    int port = iopacket_port(v);
    9.22  
    9.23      for ( ; ; )
    9.24      {
    10.1 --- a/xen/arch/x86/hvm/platform.c	Wed Feb 22 21:52:30 2006 +0100
    10.2 +++ b/xen/arch/x86/hvm/platform.c	Thu Feb 23 11:22:25 2006 +0100
    10.3 @@ -42,8 +42,6 @@
    10.4  #define DECODE_success  1
    10.5  #define DECODE_failure  0
    10.6  
    10.7 -extern long evtchn_send(int lport);
    10.8 -
    10.9  #if defined (__x86_64__)
   10.10  static inline long __get_reg_value(unsigned long reg, int size)
   10.11  {
   10.12 @@ -648,6 +646,8 @@ void send_pio_req(struct cpu_user_regs *
   10.13      p->count = count;
   10.14      p->df = regs->eflags & EF_DF ? 1 : 0;
   10.15  
   10.16 +    p->io_count++;
   10.17 +
   10.18      if (pvalid) {
   10.19          if (hvm_paging_enabled(current))
   10.20              p->u.pdata = (void *) gva_to_gpa(value);
   10.21 @@ -664,18 +664,18 @@ void send_pio_req(struct cpu_user_regs *
   10.22  
   10.23      p->state = STATE_IOREQ_READY;
   10.24  
   10.25 -    evtchn_send(iopacket_port(v->domain));
   10.26 +    evtchn_send(iopacket_port(v));
   10.27      hvm_wait_io();
   10.28  }
   10.29  
   10.30 -void send_mmio_req(unsigned char type, unsigned long gpa,
   10.31 -                   unsigned long count, int size, long value, int dir, int pvalid)
   10.32 +void send_mmio_req(
   10.33 +    unsigned char type, unsigned long gpa,
   10.34 +    unsigned long count, int size, long value, int dir, int pvalid)
   10.35  {
   10.36      struct vcpu *v = current;
   10.37      vcpu_iodata_t *vio;
   10.38      ioreq_t *p;
   10.39      struct cpu_user_regs *regs;
   10.40 -    extern long evtchn_send(int lport);
   10.41  
   10.42      regs = current->arch.hvm_vcpu.mmio_op.inst_decoder_regs;
   10.43  
   10.44 @@ -702,6 +702,8 @@ void send_mmio_req(unsigned char type, u
   10.45      p->count = count;
   10.46      p->df = regs->eflags & EF_DF ? 1 : 0;
   10.47  
   10.48 +    p->io_count++;
   10.49 +
   10.50      if (pvalid) {
   10.51          if (hvm_paging_enabled(v))
   10.52              p->u.pdata = (void *) gva_to_gpa(value);
   10.53 @@ -718,7 +720,7 @@ void send_mmio_req(unsigned char type, u
   10.54  
   10.55      p->state = STATE_IOREQ_READY;
   10.56  
   10.57 -    evtchn_send(iopacket_port(v->domain));
   10.58 +    evtchn_send(iopacket_port(v));
   10.59      hvm_wait_io();
   10.60  }
   10.61  
    11.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Feb 22 21:52:30 2006 +0100
    11.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Feb 23 11:22:25 2006 +0100
    11.3 @@ -64,7 +64,6 @@ static unsigned long trace_values[NR_CPU
    11.4  /* 
    11.5   * External functions, etc. We should move these to some suitable header file(s) */
    11.6  
    11.7 -extern long evtchn_send(int lport);
    11.8  extern void do_nmi(struct cpu_user_regs *, unsigned long);
    11.9  extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
   11.10                                  int inst_len);
    12.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Wed Feb 22 21:52:30 2006 +0100
    12.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Thu Feb 23 11:22:25 2006 +0100
    12.3 @@ -421,6 +421,18 @@ void svm_do_launch(struct vcpu *v)
    12.4      if (v->vcpu_id == 0)
    12.5          hvm_setup_platform(v->domain);
    12.6  
    12.7 +    if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
    12.8 +    {
    12.9 +        printk("HVM domain bind port %d to vcpu %d failed!\n",
   12.10 +               iopacket_port(v), v->vcpu_id);
   12.11 +        domain_crash_synchronous();
   12.12 +    }
   12.13 +
   12.14 +    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
   12.15 +
   12.16 +    clear_bit(iopacket_port(v),
   12.17 +              &v->domain->shared_info->evtchn_mask[0]);
   12.18 +
   12.19      if (hvm_apic_support(v->domain))
   12.20          vlapic_init(v);
   12.21      init_timer(&v->arch.hvm_svm.hlt_timer,
   12.22 @@ -490,7 +502,7 @@ void svm_do_resume(struct vcpu *v)
   12.23  
   12.24      svm_stts(v);
   12.25  
   12.26 -    if ( test_bit(iopacket_port(d), &d->shared_info->evtchn_pending[0]) ||
   12.27 +    if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
   12.28           test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
   12.29          hvm_wait_io();
   12.30  
    13.1 --- a/xen/arch/x86/hvm/vlapic.c	Wed Feb 22 21:52:30 2006 +0100
    13.2 +++ b/xen/arch/x86/hvm/vlapic.c	Thu Feb 23 11:22:25 2006 +0100
    13.3 @@ -210,7 +210,7 @@ static int vlapic_accept_irq(struct vcpu
    13.4                  set_bit(vector, &vlapic->tmr[0]);
    13.5              }
    13.6          }
    13.7 -        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->domain));
    13.8 +        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->vcpu));
    13.9          result = 1;
   13.10          break;
   13.11  
   13.12 @@ -834,7 +834,7 @@ void vlapic_timer_fn(void *data)
   13.13          }
   13.14          else
   13.15              vlapic->intr_pending_count[vlapic_lvt_vector(vlapic, VLAPIC_LVT_TIMER)]++;
   13.16 -        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->domain));
   13.17 +        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->vcpu));
   13.18      }
   13.19  
   13.20      vlapic->timer_current_update = NOW();
    14.1 --- a/xen/arch/x86/hvm/vmx/io.c	Wed Feb 22 21:52:30 2006 +0100
    14.2 +++ b/xen/arch/x86/hvm/vmx/io.c	Thu Feb 23 11:22:25 2006 +0100
    14.3 @@ -178,7 +178,7 @@ void vmx_do_resume(struct vcpu *v)
    14.4  
    14.5      vmx_stts();
    14.6  
    14.7 -    if ( test_bit(iopacket_port(d), &d->shared_info->evtchn_pending[0]) ||
    14.8 +    if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
    14.9           test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
   14.10          hvm_wait_io();
   14.11  
    15.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Feb 22 21:52:30 2006 +0100
    15.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Thu Feb 23 11:22:25 2006 +0100
    15.3 @@ -200,6 +200,18 @@ static void vmx_do_launch(struct vcpu *v
    15.4      if (v->vcpu_id == 0)
    15.5          hvm_setup_platform(v->domain);
    15.6  
    15.7 +    if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
    15.8 +    {
    15.9 +        printk("VMX domain bind port %d to vcpu %d failed!\n",
   15.10 +               iopacket_port(v), v->vcpu_id);
   15.11 +        domain_crash_synchronous();
   15.12 +    }
   15.13 +
   15.14 +    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
   15.15 +
   15.16 +    clear_bit(iopacket_port(v),
   15.17 +              &v->domain->shared_info->evtchn_mask[0]);
   15.18 +
   15.19      __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
   15.20  
   15.21      error |= __vmwrite(GUEST_CR0, cr0);
    16.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Feb 22 21:52:30 2006 +0100
    16.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Feb 23 11:22:25 2006 +0100
    16.3 @@ -448,7 +448,6 @@ unsigned long vmx_get_ctrl_reg(struct vc
    16.4      return 0;                   /* dummy */
    16.5  }
    16.6  
    16.7 -extern long evtchn_send(int lport);
    16.8  void do_nmi(struct cpu_user_regs *);
    16.9  
   16.10  static int check_vmx_controls(ctrls, msr)
    17.1 --- a/xen/common/event_channel.c	Wed Feb 22 21:52:30 2006 +0100
    17.2 +++ b/xen/common/event_channel.c	Thu Feb 23 11:22:25 2006 +0100
    17.3 @@ -399,7 +399,7 @@ static long evtchn_close(evtchn_close_t 
    17.4  }
    17.5  
    17.6  
    17.7 -long evtchn_send(int lport)
    17.8 +long evtchn_send(unsigned int lport)
    17.9  {
   17.10      struct evtchn *lchn, *rchn;
   17.11      struct domain *ld = current->domain, *rd;
   17.12 @@ -508,15 +508,13 @@ static long evtchn_status(evtchn_status_
   17.13      return rc;
   17.14  }
   17.15  
   17.16 -static long evtchn_bind_vcpu(evtchn_bind_vcpu_t *bind) 
   17.17 +long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
   17.18  {
   17.19 -    struct domain *d    = current->domain;
   17.20 -    int            port = bind->port;
   17.21 -    int            vcpu = bind->vcpu;
   17.22 +    struct domain *d = current->domain;
   17.23      struct evtchn *chn;
   17.24      long           rc = 0;
   17.25  
   17.26 -    if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
   17.27 +    if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
   17.28          return -ENOENT;
   17.29  
   17.30      spin_lock(&d->evtchn_lock);
   17.31 @@ -533,7 +531,7 @@ static long evtchn_bind_vcpu(evtchn_bind
   17.32      case ECS_UNBOUND:
   17.33      case ECS_INTERDOMAIN:
   17.34      case ECS_PIRQ:
   17.35 -        chn->notify_vcpu_id = vcpu;
   17.36 +        chn->notify_vcpu_id = vcpu_id;
   17.37          break;
   17.38      default:
   17.39          rc = -EINVAL;
   17.40 @@ -638,7 +636,7 @@ long do_event_channel_op(struct evtchn_o
   17.41          break;
   17.42  
   17.43      case EVTCHNOP_bind_vcpu:
   17.44 -        rc = evtchn_bind_vcpu(&op.u.bind_vcpu);
   17.45 +        rc = evtchn_bind_vcpu(op.u.bind_vcpu.port, op.u.bind_vcpu.vcpu);
   17.46          break;
   17.47  
   17.48      case EVTCHNOP_unmask:
    18.1 --- a/xen/include/asm-x86/hvm/io.h	Wed Feb 22 21:52:30 2006 +0100
    18.2 +++ b/xen/include/asm-x86/hvm/io.h	Thu Feb 23 11:22:25 2006 +0100
    18.3 @@ -23,6 +23,7 @@
    18.4  #include <asm/hvm/vpic.h>
    18.5  #include <asm/hvm/vioapic.h>
    18.6  #include <public/hvm/ioreq.h>
    18.7 +#include <public/event_channel.h>
    18.8  
    18.9  #define MAX_OPERAND_NUM 2
   18.10  
    19.1 --- a/xen/include/asm-x86/hvm/support.h	Wed Feb 22 21:52:30 2006 +0100
    19.2 +++ b/xen/include/asm-x86/hvm/support.h	Thu Feb 23 11:22:25 2006 +0100
    19.3 @@ -40,9 +40,9 @@ static inline vcpu_iodata_t *get_vio(str
    19.4      return &get_sp(d)->vcpu_iodata[cpu];
    19.5  }
    19.6  
    19.7 -static inline int iopacket_port(struct domain *d)
    19.8 +static inline int iopacket_port(struct vcpu *v)
    19.9  {
   19.10 -    return get_sp(d)->sp_global.eport;
   19.11 +    return get_vio(v->domain, v->vcpu_id)->vp_eport;
   19.12  }
   19.13  
   19.14  /* XXX these are really VMX specific */
    20.1 --- a/xen/include/public/hvm/ioreq.h	Wed Feb 22 21:52:30 2006 +0100
    20.2 +++ b/xen/include/public/hvm/ioreq.h	Thu Feb 23 11:22:25 2006 +0100
    20.3 @@ -53,6 +53,7 @@ typedef struct {
    20.4      uint8_t dir:1;          /*  1=read, 0=write             */
    20.5      uint8_t df:1;
    20.6      uint8_t type;           /* I/O type                     */
    20.7 +    uint64_t io_count;      /* How many IO done on a vcpu   */
    20.8  } ioreq_t;
    20.9  
   20.10  #define MAX_VECTOR      256
   20.11 @@ -65,11 +66,13 @@ typedef struct {
   20.12      uint16_t    pic_irr;
   20.13      uint16_t    pic_last_irr;
   20.14      uint16_t    pic_clear_irr;
   20.15 -    int         eport; /* Event channel port */
   20.16  } global_iodata_t;
   20.17  
   20.18  typedef struct {
   20.19 -    ioreq_t     vp_ioreq;
   20.20 +    ioreq_t         vp_ioreq;
   20.21 +    /* Event channel port */
   20.22 +    unsigned long   vp_eport;   /* VMX vcpu uses this to notify DM */
   20.23 +    unsigned long   dm_eport;   /* DM uses this to notify VMX vcpu */
   20.24  } vcpu_iodata_t;
   20.25  
   20.26  typedef struct {
    21.1 --- a/xen/include/xen/event.h	Wed Feb 22 21:52:30 2006 +0100
    21.2 +++ b/xen/include/xen/event.h	Thu Feb 23 11:22:25 2006 +0100
    21.3 @@ -63,4 +63,10 @@ extern void send_guest_pirq(struct domai
    21.4      (!!(v)->vcpu_info->evtchn_upcall_pending &  \
    21.5        !(v)->vcpu_info->evtchn_upcall_mask)
    21.6  
    21.7 +/* Send a notification from a local event-channel port. */
    21.8 +extern long evtchn_send(unsigned int lport);
    21.9 +
   21.10 +/* Bind a local event-channel port to the specified VCPU. */
   21.11 +extern long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
   21.12 +
   21.13  #endif /* __XEN_EVENT_H__ */