return ret;
}
+/*
+ * INVALL updates the per-LPI configuration status for every LPI mapped to
+ * a particular redistributor.
+ * We iterate over all mapped LPIs in our radix tree and update those.
+ */
+static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr)
+{
+ uint32_t collid = its_cmd_get_collection(cmdptr);
+ struct vcpu *vcpu;
+ struct pending_irq *pirqs[16];
+ uint64_t vlpi = 0; /* 64-bit to catch overflows */
+ unsigned int nr_lpis, i;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * As this implementation walks over all mapped LPIs, it might take
+ * too long for a real guest, so we might want to revisit this
+ * implementation for DomUs.
+ * However this command is very rare, also we don't expect many
+ * LPIs to be actually mapped, so it's fine for Dom0 to use.
+ */
+ ASSERT(is_hardware_domain(its->d));
+
+ /*
+ * If no redistributor has its LPIs enabled yet, we can't access the
+ * property table, so there is no point in executing this command.
+ * The control flow dependency here and a barrier instruction on the
+ * write side make sure we can access these without taking a lock.
+ */
+ if ( !its->d->arch.vgic.rdists_enabled )
+ return 0;
+
+ spin_lock(&its->its_lock);
+ vcpu = get_vcpu_from_collection(its, collid);
+ spin_unlock(&its->its_lock);
+
+ spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
+ read_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
+
+ do
+ {
+ int err;
+
+ nr_lpis = radix_tree_gang_lookup(&its->d->arch.vgic.pend_lpi_tree,
+ (void **)pirqs, vlpi,
+ ARRAY_SIZE(pirqs));
+
+ for ( i = 0; i < nr_lpis; i++ )
+ {
+ /* We only care about LPIs on our VCPU. */
+ if ( pirqs[i]->lpi_vcpu_id != vcpu->vcpu_id )
+ continue;
+
+ vlpi = pirqs[i]->irq;
+ /* If that fails for a single LPI, carry on to handle the rest. */
+ err = update_lpi_property(its->d, pirqs[i]);
+ if ( !err )
+ update_lpi_vgic_status(vcpu, pirqs[i]);
+ else
+ ret = err;
+ }
+ /*
+ * Loop over the next gang of pending_irqs until we reached the end of
+ * a (fully populated) tree or the lookup function returns less LPIs than
+ * it has been asked for.
+ */
+ } while ( (++vlpi < its->d->arch.vgic.nr_lpis) &&
+ (nr_lpis == ARRAY_SIZE(pirqs)) );
+
+ read_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
+ spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
+
+ return ret;
+}
+
/* Must be called with the ITS lock held. */
static int its_discard_event(struct virt_its *its,
uint32_t vdevid, uint32_t vevid)
case GITS_CMD_INV:
ret = its_handle_inv(its, command);
break;
+ case GITS_CMD_INVALL:
+ ret = its_handle_invall(its, command);
+ break;
case GITS_CMD_MAPC:
ret = its_handle_mapc(its, command);
break;