ia64/xen-unstable

changeset 17194:d5bcf03596cc

[IA64] Create a vlsapic - viosapic interface

This patch removes duplicated code and create a vlsapic function to
inject any interruption. Slightly simplifies vlsapic.

It also removes useless irq_save/restore around atomic updates.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Fri Mar 07 12:44:26 2008 -0700 (2008-03-07)
parents 54c7e3798464
children 6225df3ff209
files xen/arch/ia64/vmx/viosapic.c xen/arch/ia64/vmx/vlsapic.c xen/include/asm-ia64/vlsapic.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/viosapic.c	Fri Mar 07 12:26:27 2008 -0700
     1.2 +++ b/xen/arch/ia64/vmx/viosapic.c	Fri Mar 07 12:44:26 2008 -0700
     1.3 @@ -49,36 +49,14 @@ static void viosapic_deliver(struct vios
     1.4      uint16_t dest = viosapic->redirtbl[irq].dest_id;
     1.5      uint8_t delivery_mode = viosapic->redirtbl[irq].delivery_mode;
     1.6      uint8_t vector = viosapic->redirtbl[irq].vector;
     1.7 -    struct vcpu *v;
     1.8  
     1.9      ASSERT(spin_is_locked(&viosapic->lock));
    1.10 -    switch ( delivery_mode )
    1.11 -    {
    1.12 -    case SAPIC_FIXED:
    1.13 -    {
    1.14 -        v = vlsapic_lid_to_vcpu(viosapic_domain(viosapic), dest);
    1.15 -        vlsapic_set_irq(v, vector);
    1.16 -        vcpu_kick(v);
    1.17 -        break;
    1.18 -    }
    1.19 -    case SAPIC_LOWEST_PRIORITY:
    1.20 -    {
    1.21 -        v = vlsapic_lid_to_vcpu(viosapic_domain(viosapic), dest);
    1.22 -        if (viosapic->lowest_vcpu)
    1.23 -            v = viosapic->lowest_vcpu;
    1.24 -        vlsapic_set_irq(v, vector);
    1.25 -        vcpu_kick(v);
    1.26 -        break;
    1.27 -    }
    1.28 -    case SAPIC_PMI:
    1.29 -    case SAPIC_NMI:
    1.30 -    case SAPIC_INIT:
    1.31 -    case SAPIC_EXTINT:
    1.32 -    default:
    1.33 -        gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n",
    1.34 -                 delivery_mode);
    1.35 -        break;
    1.36 -    }
    1.37 +
    1.38 +    if (vlsapic_deliver_int(viosapic_domain (viosapic),
    1.39 +                            dest, delivery_mode, vector) < 0)
    1.40 +        gdprintk(XENLOG_WARNING,
    1.41 +                 "viosapic: can't deliver int %u to %u (dm=%u)\n",
    1.42 +                 vector, dest, delivery_mode);
    1.43  }
    1.44  
    1.45  
     2.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Fri Mar 07 12:26:27 2008 -0700
     2.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Fri Mar 07 12:44:26 2008 -0700
     2.3 @@ -107,7 +107,6 @@ static void update_vhpi(VCPU *vcpu, int 
     2.4   */
     2.5  static int vmx_vcpu_unpend_interrupt(VCPU *vcpu, uint8_t vector)
     2.6  {
     2.7 -    uint64_t spsr;
     2.8      int ret;
     2.9  
    2.10      if (vector & ~0xff) {
    2.11 @@ -115,9 +114,7 @@ static int vmx_vcpu_unpend_interrupt(VCP
    2.12          return -1;
    2.13      }
    2.14  
    2.15 -    local_irq_save(spsr);
    2.16      ret = test_and_clear_bit(vector, &VCPU(vcpu, irr[0]));
    2.17 -    local_irq_restore(spsr);
    2.18  
    2.19      if (ret) {
    2.20          vcpu->arch.irq_new_pending = 1;
    2.21 @@ -422,16 +419,13 @@ static int irq_masked(VCPU *vcpu, int h_
    2.22   */
    2.23  int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector)
    2.24  {
    2.25 -    uint64_t    spsr;
    2.26      int ret;
    2.27  
    2.28      if (vector & ~0xff) {
    2.29          gdprintk(XENLOG_INFO, "vmx_vcpu_pend_interrupt: bad vector\n");
    2.30          return -1;
    2.31      }
    2.32 -    local_irq_save(spsr);
    2.33      ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
    2.34 -    local_irq_restore(spsr);
    2.35  
    2.36      if (!ret) {
    2.37          vcpu->arch.irq_new_pending = 1;
    2.38 @@ -605,17 +599,16 @@ void vmx_vexirq(VCPU *vcpu)
    2.39      generate_exirq (vcpu);
    2.40  }
    2.41  
    2.42 -struct vcpu * vlsapic_lid_to_vcpu(struct domain *d, uint16_t dest)
    2.43 +struct vcpu *lid_to_vcpu(struct domain *d, uint16_t dest)
    2.44  {
    2.45 -    struct vcpu * v;
    2.46 -    for_each_vcpu ( d, v ) {
    2.47 -        if ( (v->arch.privregs->lid >> 16) == dest )
    2.48 -            return v;
    2.49 -    }
    2.50 +    int id = dest >> 8;
    2.51 +
    2.52 +    /* Fast look: assume EID=0 ID=vcpu_id.  */
    2.53 +    if ((dest & 0xff) == 0 && id < MAX_VIRT_CPUS)
    2.54 +        return d->vcpu[id];
    2.55      return NULL;
    2.56  }
    2.57  
    2.58 -
    2.59  /*
    2.60   * To inject INIT to guest, we must set the PAL_INIT entry 
    2.61   * and set psr to switch to physical mode
    2.62 @@ -641,14 +634,25 @@ static void vmx_inject_guest_pal_init(VC
    2.63   *  offset: address offset to IPI space.
    2.64   *  value:  deliver value.
    2.65   */
    2.66 -static void vlsapic_deliver_ipi(VCPU *vcpu, uint64_t dm, uint64_t vector)
    2.67 +static int vcpu_deliver_int(VCPU *vcpu, uint64_t dm, uint64_t vector)
    2.68  {
    2.69 -    IPI_DPRINTK("deliver_ipi %lx %lx\n", dm, vector);
    2.70 +    int running = vcpu->is_running;
    2.71 +
    2.72 +    IPI_DPRINTK("deliver_int %lx %lx\n", dm, vector);
    2.73  
    2.74      switch (dm) {
    2.75      case SAPIC_FIXED:     // INT
    2.76          vmx_vcpu_pend_interrupt(vcpu, vector);
    2.77          break;
    2.78 +    case SAPIC_LOWEST_PRIORITY:
    2.79 +    {
    2.80 +        struct vcpu *lowest = vcpu_viosapic(vcpu)->lowest_vcpu;
    2.81 +
    2.82 +        if (lowest == NULL)
    2.83 +            lowest = vcpu;
    2.84 +        vmx_vcpu_pend_interrupt(lowest, vector);
    2.85 +        break;
    2.86 +    }
    2.87      case SAPIC_PMI:
    2.88          // TODO -- inject guest PMI
    2.89          panic_domain(NULL, "Inject guest PMI!\n");
    2.90 @@ -663,9 +667,30 @@ static void vlsapic_deliver_ipi(VCPU *vc
    2.91          vmx_vcpu_pend_interrupt(vcpu, 0);
    2.92          break;
    2.93      default:
    2.94 -        panic_domain(NULL, "Deliver reserved IPI!\n");
    2.95 -        break;
    2.96 +        return -EINVAL;
    2.97      }
    2.98 +
    2.99 +    /* Kick vcpu.  */
   2.100 +    vcpu_unblock(vcpu);
   2.101 +    if (running)
   2.102 +        smp_send_event_check_cpu(vcpu->processor);
   2.103 +
   2.104 +    return 0;
   2.105 +}
   2.106 +
   2.107 +int vlsapic_deliver_int(struct domain *d,
   2.108 +			uint16_t dest, uint64_t dm, uint64_t vector)
   2.109 +{
   2.110 +    VCPU *vcpu;
   2.111 +
   2.112 +    vcpu = lid_to_vcpu(d, dest);
   2.113 +    if (vcpu == NULL)
   2.114 +        return -ESRCH;
   2.115 +
   2.116 +    if (!vcpu->is_initialised || test_bit(_VPF_down, &vcpu->pause_flags))
   2.117 +        return -ENOEXEC;
   2.118 +
   2.119 +    return vcpu_deliver_int (vcpu, dm, vector);
   2.120  }
   2.121  
   2.122  /*
   2.123 @@ -673,27 +698,10 @@ static void vlsapic_deliver_ipi(VCPU *vc
   2.124   */
   2.125  void deliver_pal_init(VCPU *vcpu)
   2.126  {
   2.127 -    vlsapic_deliver_ipi(vcpu, SAPIC_INIT, 0);
   2.128 +    vcpu_deliver_int(vcpu, SAPIC_INIT, 0);
   2.129  }
   2.130  
   2.131  /*
   2.132 - * TODO: Use hash table for the lookup.
   2.133 - */
   2.134 -static inline VCPU *lid_to_vcpu(struct domain *d, uint8_t id, uint8_t eid)
   2.135 -{
   2.136 -    VCPU  *v;
   2.137 -    LID   lid; 
   2.138 -
   2.139 -    for_each_vcpu(d, v) {
   2.140 -        lid.val = VCPU_LID(v);
   2.141 -        if (lid.id == id && lid.eid == eid)
   2.142 -            return v;
   2.143 -    }
   2.144 -    return NULL;
   2.145 -}
   2.146 -
   2.147 -
   2.148 -/*
   2.149   * execute write IPI op.
   2.150   */
   2.151  static void vlsapic_write_ipi(VCPU *vcpu, uint64_t addr, uint64_t value)
   2.152 @@ -701,7 +709,8 @@ static void vlsapic_write_ipi(VCPU *vcpu
   2.153      VCPU   *targ;
   2.154      struct domain *d = vcpu->domain; 
   2.155  
   2.156 -    targ = lid_to_vcpu(vcpu->domain, ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
   2.157 +    targ = lid_to_vcpu(vcpu->domain,
   2.158 +                       (((ipi_a_t)addr).id << 8) | ((ipi_a_t)addr).eid);
   2.159      if (targ == NULL)
   2.160          panic_domain(NULL, "Unknown IPI cpu\n");
   2.161  
   2.162 @@ -727,12 +736,10 @@ static void vlsapic_write_ipi(VCPU *vcpu
   2.163              printk("arch_boot_vcpu: huh, already awake!");
   2.164          }
   2.165      } else {
   2.166 -        int running = targ->is_running;
   2.167 -        vlsapic_deliver_ipi(targ, ((ipi_d_t)value).dm, 
   2.168 -                            ((ipi_d_t)value).vector);
   2.169 -        vcpu_unblock(targ);
   2.170 -        if (running)
   2.171 -            smp_send_event_check_cpu(targ->processor);
   2.172 +        if (((ipi_d_t)value).dm == SAPIC_LOWEST_PRIORITY ||
   2.173 +            vcpu_deliver_int(targ, ((ipi_d_t)value).dm, 
   2.174 +                             ((ipi_d_t)value).vector) < 0)
   2.175 +            panic_domain(NULL, "Deliver reserved interrupt!\n");
   2.176      }
   2.177      return;
   2.178  }
     3.1 --- a/xen/include/asm-ia64/vlsapic.h	Fri Mar 07 12:26:27 2008 -0700
     3.2 +++ b/xen/include/asm-ia64/vlsapic.h	Fri Mar 07 12:44:26 2008 -0700
     3.3 @@ -70,9 +70,9 @@ extern void vtm_set_itm(struct vcpu *vcp
     3.4  extern void vtm_set_itv(struct vcpu *vcpu, uint64_t val);
     3.5  extern void vmx_vexirq(struct vcpu  *vcpu);
     3.6  extern void vhpi_detection(struct vcpu *vcpu);
     3.7 -extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
     3.8 -extern struct vcpu * vlsapic_lid_to_vcpu(struct domain *d, uint16_t dest);
     3.9 +extern int vlsapic_deliver_int(struct domain *d,
    3.10 +			       uint16_t dest, uint64_t dm, uint64_t vector);
    3.11 +
    3.12  extern uint64_t vlsapic_read(struct vcpu *v, uint64_t addr, uint64_t s);
    3.13  extern void vlsapic_write(struct vcpu *v, uint64_t addr, uint64_t s, uint64_t val);
    3.14 -#define vlsapic_set_irq vmx_vcpu_pend_interrupt
    3.15  #endif