struct its_collection *collection;
u32 lpi;
u32 event_id;
+ bool enabled;
+ unsigned long *pending;
};
+#define for_each_lpi(dev, itte, kvm) \
+ list_for_each_entry(dev, &(kvm)->arch.vgic.its.device_list, dev_list) \
+ list_for_each_entry(itte, &(dev)->itt, itte_list)
+
+static struct its_itte *find_itte_by_lpi(struct kvm *kvm, int lpi)
+{
+ struct its_device *device;
+ struct its_itte *itte;
+
+ for_each_lpi(device, itte, kvm) {
+ if (itte->lpi == lpi)
+ return itte;
+ }
+ return NULL;
+}
+
#define BASER_BASE_ADDRESS(x) ((x) & 0xfffffffff000ULL)
/* The distributor lock is held by the VGIC MMIO handler. */
return false;
}
+/*
+ * Find all enabled and pending LPIs and queue them into the list
+ * registers.
+ * The dist lock is held by the caller.
+ */
+bool vits_queue_lpis(struct kvm_vcpu *vcpu)
+{
+ struct vgic_its *its = &vcpu->kvm->arch.vgic.its;
+ struct its_device *device;
+ struct its_itte *itte;
+ bool ret = true;
+
+ if (!vgic_has_its(vcpu->kvm))
+ return true;
+ if (!its->enabled || !vcpu->kvm->arch.vgic.lpis_enabled)
+ return true;
+
+ spin_lock(&its->lock);
+ for_each_lpi(device, itte, vcpu->kvm) {
+ if (!itte->enabled || !test_bit(vcpu->vcpu_id, itte->pending))
+ continue;
+
+ if (!itte->collection)
+ continue;
+
+ if (itte->collection->target_addr != vcpu->vcpu_id)
+ continue;
+
+ __clear_bit(vcpu->vcpu_id, itte->pending);
+
+ ret &= vgic_queue_irq(vcpu, 0, itte->lpi);
+ }
+
+ spin_unlock(&its->lock);
+ return ret;
+}
+
+/* Called with the distributor lock held by the caller. */
+void vits_unqueue_lpi(struct kvm_vcpu *vcpu, int lpi)
+{
+ struct vgic_its *its = &vcpu->kvm->arch.vgic.its;
+ struct its_itte *itte;
+
+ spin_lock(&its->lock);
+
+ /* Find the right ITTE and put the pending state back in there */
+ itte = find_itte_by_lpi(vcpu->kvm, lpi);
+ if (itte)
+ __set_bit(vcpu->vcpu_id, itte->pending);
+
+ spin_unlock(&its->lock);
+}
+
static int vits_handle_command(struct kvm_vcpu *vcpu, u64 *its_cmd)
{
return -ENODEV;
return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
}
+static bool vgic_queue_lpis(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->kvm->arch.vgic.vm_ops.queue_lpis)
+ return vcpu->kvm->arch.vgic.vm_ops.queue_lpis(vcpu);
+ else
+ return true;
+}
+
+static void vgic_unqueue_lpi(struct kvm_vcpu *vcpu, int irq)
+{
+ if (vcpu->kvm->arch.vgic.vm_ops.unqueue_lpi)
+ vcpu->kvm->arch.vgic.vm_ops.unqueue_lpi(vcpu, irq);
+}
+
int kvm_vgic_map_resources(struct kvm *kvm)
{
return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
vlr = vgic_get_lr(vcpu, lr);
+ /* We don't care about LPIs here */
+ if (vlr.irq >= 8192)
+ continue;
+
if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
vlr.state = 0;
vgic_set_lr(vcpu, lr, vlr);
static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
int lr_nr, int sgi_source_id)
{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_lr vlr;
vlr.state = 0;
vlr.irq = irq;
vlr.source = sgi_source_id;
- if (vgic_irq_is_active(vcpu, irq)) {
- vlr.state |= LR_STATE_ACTIVE;
- kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
- vgic_irq_clear_active(vcpu, irq);
- vgic_update_state(vcpu->kvm);
- } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
- vlr.state |= LR_STATE_PENDING;
- kvm_debug("Set pending: 0x%x\n", vlr.state);
- }
-
- if (!vgic_irq_is_edge(vcpu, irq))
- vlr.state |= LR_EOI_INT;
+ /* We care only about state for SGIs/PPIs/SPIs, not for LPIs */
+ if (irq < dist->nr_irqs) {
+ if (vgic_irq_is_active(vcpu, irq)) {
+ vlr.state |= LR_STATE_ACTIVE;
+ kvm_debug("Set active, clear distributor: 0x%x\n",
+ vlr.state);
+ vgic_irq_clear_active(vcpu, irq);
+ vgic_update_state(vcpu->kvm);
+ } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
+ vlr.state |= LR_STATE_PENDING;
+ kvm_debug("Set pending: 0x%x\n", vlr.state);
+ }
+ if (!vgic_irq_is_edge(vcpu, irq))
+ vlr.state |= LR_EOI_INT;
+ } else {
+ /* If this is an LPI, it can only be pending */
+ if (irq >= 8192)
+ vlr.state |= LR_STATE_PENDING;
+ }
vgic_set_lr(vcpu, lr_nr, vlr);
vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}
*/
bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
u64 elrsr = vgic_get_elrsr(vcpu);
unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
int lr;
/* Sanitize the input... */
BUG_ON(sgi_source_id & ~7);
BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
- BUG_ON(irq >= dist->nr_irqs);
kvm_debug("Queue IRQ%d\n", irq);
overflow = 1;
}
-
-
+ /*
+ * LPIs are not mapped in our bitmaps, so we leave the iteration
+ * to the ITS emulation code.
+ */
+ if (!vgic_queue_lpis(vcpu))
+ overflow = 1;
epilog:
if (overflow) {
for_each_clear_bit(lr_nr, elrsr_ptr, vgic_cpu->nr_lr) {
vlr = vgic_get_lr(vcpu, lr_nr);
+ /* LPIs are handled separately */
+ if (vlr.irq >= 8192) {
+ /* We just need to take care about still pending LPIs */
+ if (vlr.state & LR_STATE_PENDING) {
+ vgic_unqueue_lpi(vcpu, vlr.irq);
+ pending = true;
+ }
+ continue;
+ }
+
BUG_ON(!(vlr.state & LR_STATE_MASK));
pending = true;
}
vgic_update_state(vcpu->kvm);
- /* vgic_update_state would not cover only-active IRQs */
+ /* vgic_update_state would not cover only-active IRQs or LPIs */
if (pending)
set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
}