ia64/xen-unstable

changeset 16284:7eb68d995aa7

ia64: Fix after stdvga performance changes to bufioreq struct.
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author Keir Fraser <keir@xensource.com>
date Tue Oct 30 16:25:58 2007 +0000 (2007-10-30)
parents 9379c83e14b5
children a07288a84785 eaa8014ef779
files xen/arch/ia64/vmx/mmio.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/mmio.c	Tue Oct 30 16:15:17 2007 +0000
     1.2 +++ b/xen/arch/ia64/vmx/mmio.c	Tue Oct 30 16:25:58 2007 +0000
     1.3 @@ -55,54 +55,69 @@ static struct hvm_buffered_io_range
     1.4  static int hvm_buffered_io_intercept(ioreq_t *p)
     1.5  {
     1.6      struct vcpu *v = current;
     1.7 -    spinlock_t  *buffered_io_lock;
     1.8 -    buffered_iopage_t *buffered_iopage =
     1.9 +    buffered_iopage_t *pg =
    1.10          (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
    1.11 -    unsigned long tmp_write_pointer = 0;
    1.12 +    buf_ioreq_t bp;
    1.13      int i;
    1.14  
    1.15 +    /* Ensure buffered_iopage fits in a page */
    1.16 +    BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
    1.17 +
    1.18      /* ignore READ ioreq_t! */
    1.19 -    if ( p->dir == IOREQ_READ )
    1.20 +    if (p->dir == IOREQ_READ)
    1.21          return 0;
    1.22  
    1.23 -    for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
    1.24 -        if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
    1.25 -             p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
    1.26 -                                     hvm_buffered_io_ranges[i]->length )
    1.27 +    for (i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++) {
    1.28 +        if (p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
    1.29 +            p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
    1.30 +                                    hvm_buffered_io_ranges[i]->length)
    1.31              break;
    1.32      }
    1.33  
    1.34 -    if ( i == HVM_BUFFERED_IO_RANGE_NR )
    1.35 +    if (i == HVM_BUFFERED_IO_RANGE_NR)
    1.36          return 0;
    1.37  
    1.38 -    buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
    1.39 -    spin_lock(buffered_io_lock);
    1.40 +    bp.type = p->type;
    1.41 +    bp.dir = p->dir;
    1.42 +    switch (p->size) {
    1.43 +    case 1:
    1.44 +        bp.size = 0;
    1.45 +        break;
    1.46 +    case 2:
    1.47 +        bp.size = 1;
    1.48 +        break;
    1.49 +    default:
    1.50 +	/* Could use quad word semantics, but it only appears
    1.51 +	 * to be useful for timeoffset data. */
    1.52 +        return 0;
    1.53 +    }
    1.54 +    bp.data = (uint16_t)p->data;
    1.55 +    bp.addr = (uint32_t)p->addr;
    1.56  
    1.57 -    if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
    1.58 -         (unsigned long)IOREQ_BUFFER_SLOT_NUM ) {
    1.59 +    spin_lock(&v->domain->arch.hvm_domain.buffered_io_lock);
    1.60 +
    1.61 +    if (pg->write_pointer - pg->read_pointer == IOREQ_BUFFER_SLOT_NUM) {
    1.62          /* the queue is full.
    1.63           * send the iopacket through the normal path.
    1.64           * NOTE: The arithimetic operation could handle the situation for
    1.65           * write_pointer overflow.
    1.66           */
    1.67 -        spin_unlock(buffered_io_lock);
    1.68 +        spin_unlock(&v->domain->arch.hvm_domain.buffered_io_lock);
    1.69          return 0;
    1.70      }
    1.71  
    1.72 -    tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
    1.73 -
    1.74 -    memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
    1.75 +    memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
    1.76 +           &bp, sizeof(bp));
    1.77  
    1.78 -    /*make the ioreq_t visible before write_pointer*/
    1.79 +    /* Make the ioreq_t visible before write_pointer */
    1.80      wmb();
    1.81 -    buffered_iopage->write_pointer++;
    1.82 +    pg->write_pointer++;
    1.83  
    1.84 -    spin_unlock(buffered_io_lock);
    1.85 +    spin_unlock(&v->domain->arch.hvm_domain.buffered_io_lock);
    1.86  
    1.87      return 1;
    1.88  }
    1.89  
    1.90 -
    1.91  static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
    1.92  {
    1.93      struct vcpu *v = current;
    1.94 @@ -110,32 +125,36 @@ static void low_mmio_access(VCPU *vcpu, 
    1.95      ioreq_t *p;
    1.96  
    1.97      vio = get_vio(v->domain, v->vcpu_id);
    1.98 -    if (vio == 0) {
    1.99 -        panic_domain(NULL,"bad shared page: %lx", (unsigned long)vio);
   1.100 -    }
   1.101 +    if (!vio)
   1.102 +        panic_domain(NULL, "bad shared page");
   1.103 +
   1.104      p = &vio->vp_ioreq;
   1.105 +
   1.106      p->addr = pa;
   1.107      p->size = s;
   1.108      p->count = 1;
   1.109 -    p->dir = dir;
   1.110 -    if (dir==IOREQ_WRITE)     // write;
   1.111 +    if (dir == IOREQ_WRITE)
   1.112          p->data = *val;
   1.113 -    else if (dir == IOREQ_READ)
   1.114 -        p->data = 0;          // clear all bits
   1.115 +    else
   1.116 +        p->data = 0;
   1.117      p->data_is_ptr = 0;
   1.118 +    p->dir = dir;
   1.119 +    p->df = 0;
   1.120      p->type = 1;
   1.121 -    p->df = 0;
   1.122  
   1.123      p->io_count++;
   1.124 +
   1.125      if (hvm_buffered_io_intercept(p)) {
   1.126          p->state = STATE_IORESP_READY;
   1.127          vmx_io_assist(v);
   1.128 -        return;
   1.129 -    } else 
   1.130 -        vmx_send_assist_req(v);
   1.131 -    if (dir == IOREQ_READ) { // read
   1.132 +        if (dir != IOREQ_READ)
   1.133 +            return;
   1.134 +    }
   1.135 +
   1.136 +    vmx_send_assist_req(v);
   1.137 +    if (dir == IOREQ_READ)
   1.138          *val = p->data;
   1.139 -    }
   1.140 +
   1.141      return;
   1.142  }
   1.143  
   1.144 @@ -227,16 +246,18 @@ static void legacy_io_access(VCPU *vcpu,
   1.145      ioreq_t *p;
   1.146  
   1.147      vio = get_vio(v->domain, v->vcpu_id);
   1.148 -    if (vio == 0) {
   1.149 -        panic_domain(NULL,"bad shared page\n");
   1.150 -    }
   1.151 +    if (!vio)
   1.152 +        panic_domain(NULL, "bad shared page\n");
   1.153 +
   1.154      p = &vio->vp_ioreq;
   1.155 -    p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
   1.156 +    p->addr = TO_LEGACY_IO(pa & 0x3ffffffUL);
   1.157      p->size = s;
   1.158      p->count = 1;
   1.159      p->dir = dir;
   1.160 -    if (dir == IOREQ_WRITE)     // write;
   1.161 +    if (dir == IOREQ_WRITE)
   1.162          p->data = *val;
   1.163 +    else
   1.164 +        p->data = 0;
   1.165      p->data_is_ptr = 0;
   1.166      p->type = 0;
   1.167      p->df = 0;