vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+ vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+ vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,
vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
/* Ignore */
}
+/*
+ * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
+ * of the enabled bit, so there is only one function for both here.
+ */
+unsigned long vgic_mmio_read_enable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len)
+{
+ uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
+ uint32_t value = 0;
+ unsigned int i;
+
+ /* Loop over all IRQs affected by this read */
+ for ( i = 0; i < len * 8; i++ )
+ {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+
+ if ( irq->enabled )
+ value |= (1U << i);
+
+ vgic_put_irq(vcpu->domain, irq);
+ }
+
+ return value;
+}
+
+void vgic_mmio_write_senable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len,
+ unsigned long val)
+{
+ uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
+ unsigned int i;
+
+ for_each_set_bit( i, &val, len * 8 )
+ {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+ unsigned long flags;
+ irq_desc_t *desc;
+
+ spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if ( irq->enabled ) /* skip already enabled IRQs */
+ {
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->domain, irq);
+ continue;
+ }
+
+ irq->enabled = true;
+ if ( irq->hw )
+ {
+ /*
+ * The irq cannot be a PPI, we only support delivery
+ * of SPIs to guests.
+ */
+ ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS);
+
+ desc = irq_to_desc(irq->hwintid);
+ }
+ else
+ desc = NULL;
+
+ vgic_queue_irq_unlock(vcpu->domain, irq, flags);
+
+ if ( desc )
+ vgic_sync_hardware_irq(vcpu->domain, desc, irq);
+
+ vgic_put_irq(vcpu->domain, irq);
+ }
+}
+
+void vgic_mmio_write_cenable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len,
+ unsigned long val)
+{
+ uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
+ unsigned int i;
+
+ for_each_set_bit( i, &val, len * 8 )
+ {
+ struct vgic_irq *irq;
+ unsigned long flags;
+ irq_desc_t *desc;
+
+ irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+ spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if ( !irq->enabled ) /* skip already disabled IRQs */
+ {
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->domain, irq);
+ continue;
+ }
+
+ irq->enabled = false;
+
+ if ( irq->hw )
+ {
+ /*
+ * The irq cannot be a PPI, we only support delivery
+ * of SPIs to guests.
+ */
+ ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS);
+
+ desc = irq_to_desc(irq->hwintid);
+ }
+ else
+ desc = NULL;
+
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+ if ( desc )
+ vgic_sync_hardware_irq(vcpu->domain, desc, irq);
+
+ vgic_put_irq(vcpu->domain, irq);
+ }
+}
+
static int match_region(const void *key, const void *elt)
{
const unsigned int offset = (unsigned long)key;
void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr,
unsigned int len, unsigned long val);
+unsigned long vgic_mmio_read_enable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len);
+
+void vgic_mmio_write_senable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len,
+ unsigned long val);
+
+void vgic_mmio_write_cenable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len,
+ unsigned long val);
+
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
#endif
}
}
+static unsigned int translate_irq_type(bool is_level)
+{
+ return is_level ? IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING;
+}
+
+void vgic_sync_hardware_irq(struct domain *d,
+ irq_desc_t *desc, struct vgic_irq *irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ spin_lock(&irq->irq_lock);
+
+ /*
+ * We forbid tinkering with the hardware IRQ association during
+ * a domain's lifetime.
+ */
+ ASSERT(irq->hw && desc->irq == irq->hwintid);
+
+ if ( irq->enabled )
+ {
+ /*
+ * We might end up from various callers, so check that the
+ * interrrupt is disabled before trying to change the config.
+ */
+ if ( irq_type_set_by_domain(d) &&
+ test_bit(_IRQ_DISABLED, &desc->status) )
+ gic_set_irq_type(desc, translate_irq_type(irq->config));
+
+ if ( irq->target_vcpu )
+ irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor));
+ desc->handler->enable(desc);
+ }
+ else
+ desc->handler->disable(desc);
+
+ spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
/*
* Local variables:
* mode: C