#include <linux/kvm_host.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/slab.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <kvm/arm_vgic.h>
unsigned long *pending;
};
+static struct its_device *find_its_device(struct kvm *kvm, u32 device_id)
+{
+ struct vgic_its *its = &kvm->arch.vgic.its;
+ struct its_device *device;
+
+ list_for_each_entry(device, &its->device_list, dev_list)
+ if (device_id == device->device_id)
+ return device;
+
+ return NULL;
+}
+
+static struct its_itte *find_itte(struct kvm *kvm, u32 device_id, u32 event_id)
+{
+ struct its_device *device;
+ struct its_itte *itte;
+
+ device = find_its_device(kvm, device_id);
+ if (device == NULL)
+ return NULL;
+
+ list_for_each_entry(itte, &device->itt, itte_list)
+ if (itte->event_id == event_id)
+ return itte;
+
+ return NULL;
+}
+
#define for_each_lpi(dev, itte, kvm) \
list_for_each_entry(dev, &(kvm)->arch.vgic.its.device_list, dev_list) \
list_for_each_entry(itte, &(dev)->itt, itte_list)
return NULL;
}
+static struct its_collection *find_collection(struct kvm *kvm, int coll_id)
+{
+ struct its_collection *collection;
+
+ list_for_each_entry(collection, &kvm->arch.vgic.its.collection_list,
+ coll_list) {
+ if (coll_id == collection->collection_id)
+ return collection;
+ }
+
+ return NULL;
+}
+
#define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
#define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
spin_unlock(&its->lock);
}
+static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
+{
+ return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
+}
+
+#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
+#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
+#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
+#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
+#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
+#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
+#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
+
+/* The DISCARD command frees an Interrupt Translation Table Entry (ITTE). */
+static int vits_cmd_handle_discard(struct kvm *kvm, u64 *its_cmd)
+{
+ struct vgic_its *its = &kvm->arch.vgic.its;
+ u32 device_id;
+ u32 event_id;
+ struct its_itte *itte;
+ int ret = 0;
+
+ device_id = its_cmd_get_deviceid(its_cmd);
+ event_id = its_cmd_get_id(its_cmd);
+
+ spin_lock(&its->lock);
+ itte = find_itte(kvm, device_id, event_id);
+ if (!itte || !itte->collection) {
+ ret = E_ITS_DISCARD_UNMAPPED_INTERRUPT;
+ goto out_unlock;
+ }
+
+ __clear_bit(itte->collection->target_addr, itte->pending);
+
+ list_del(&itte->itte_list);
+ kfree(itte);
+out_unlock:
+ spin_unlock(&its->lock);
+ return ret;
+}
+
+/* The MOVI command moves an ITTE to a different collection. */
+static int vits_cmd_handle_movi(struct kvm *kvm, u64 *its_cmd)
+{
+ struct vgic_its *its = &kvm->arch.vgic.its;
+ u32 device_id = its_cmd_get_deviceid(its_cmd);
+ u32 event_id = its_cmd_get_id(its_cmd);
+ u32 coll_id = its_cmd_get_collection(its_cmd);
+ struct its_itte *itte;
+ struct its_collection *collection;
+ int ret;
+
+ spin_lock(&its->lock);
+ itte = find_itte(kvm, device_id, event_id);
+ if (!itte) {
+ ret = E_ITS_MOVI_UNMAPPED_INTERRUPT;
+ goto out_unlock;
+ }
+ if (!itte->collection) {
+ ret = E_ITS_MOVI_UNMAPPED_COLLECTION;
+ goto out_unlock;
+ }
+
+ collection = find_collection(kvm, coll_id);
+ if (!collection) {
+ ret = E_ITS_MOVI_UNMAPPED_COLLECTION;
+ goto out_unlock;
+ }
+
+ if (test_and_clear_bit(itte->collection->target_addr, itte->pending))
+ __set_bit(collection->target_addr, itte->pending);
+
+ itte->collection = collection;
+out_unlock:
+ spin_unlock(&its->lock);
+ return ret;
+}
+
+static void vits_init_collection(struct kvm *kvm,
+ struct its_collection *collection,
+ u32 coll_id)
+{
+ collection->collection_id = coll_id;
+
+ list_add_tail(&collection->coll_list,
+ &kvm->arch.vgic.its.collection_list);
+}
+
+/* The MAPTI and MAPI commands map LPIs to ITTEs. */
+static int vits_cmd_handle_mapi(struct kvm *kvm, u64 *its_cmd, u8 cmd)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ u32 device_id = its_cmd_get_deviceid(its_cmd);
+ u32 event_id = its_cmd_get_id(its_cmd);
+ u32 coll_id = its_cmd_get_collection(its_cmd);
+ struct its_itte *itte, *new_itte;
+ struct its_device *device;
+ struct its_collection *collection, *new_coll;
+ int lpi_nr;
+ int ret = 0;
+
+ /* Preallocate possibly needed memory here outside of the lock */
+ new_coll = kmalloc(sizeof(struct its_collection), GFP_KERNEL);
+ new_itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
+ if (new_itte)
+ new_itte->pending = kcalloc(BITS_TO_LONGS(dist->nr_cpus),
+ sizeof(long), GFP_KERNEL);
+
+ spin_lock(&dist->its.lock);
+
+ device = find_its_device(kvm, device_id);
+ if (!device) {
+ ret = E_ITS_MAPTI_UNMAPPED_DEVICE;
+ goto out_unlock;
+ }
+
+ collection = find_collection(kvm, coll_id);
+ if (!collection && !new_coll) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if (cmd == GITS_CMD_MAPTI)
+ lpi_nr = its_cmd_get_physical_id(its_cmd);
+ else
+ lpi_nr = event_id;
+ if (lpi_nr < GIC_LPI_OFFSET ||
+ lpi_nr >= nr_idbits_propbase(dist->propbaser)) {
+ ret = E_ITS_MAPTI_PHYSICALID_OOR;
+ goto out_unlock;
+ }
+
+ itte = find_itte(kvm, device_id, event_id);
+ if (!itte) {
+ if (!new_itte || !new_itte->pending) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ itte = new_itte;
+
+ itte->event_id = event_id;
+ list_add_tail(&itte->itte_list, &device->itt);
+ } else {
+ if (new_itte)
+ kfree(new_itte->pending);
+ kfree(new_itte);
+ }
+
+ if (!collection) {
+ collection = new_coll;
+ vits_init_collection(kvm, collection, coll_id);
+ } else {
+ kfree(new_coll);
+ }
+
+ itte->collection = collection;
+ itte->lpi = lpi_nr;
+
+out_unlock:
+ spin_unlock(&dist->its.lock);
+ if (ret) {
+ kfree(new_coll);
+ if (new_itte)
+ kfree(new_itte->pending);
+ kfree(new_itte);
+ }
+ return ret;
+}
+
+static void vits_unmap_device(struct kvm *kvm, struct its_device *device)
+{
+ struct its_itte *itte, *temp;
+
+ /*
+ * The spec says that unmapping a device with still valid
+ * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
+ * since we cannot leave the memory unreferenced.
+ */
+ list_for_each_entry_safe(itte, temp, &device->itt, itte_list) {
+ list_del(&itte->itte_list);
+ kfree(itte);
+ }
+
+ list_del(&device->dev_list);
+ kfree(device);
+}
+
+/* The MAPD command maps device IDs to Interrupt Translation Tables (ITTs). */
+static int vits_cmd_handle_mapd(struct kvm *kvm, u64 *its_cmd)
+{
+ struct vgic_its *its = &kvm->arch.vgic.its;
+ bool valid = its_cmd_get_validbit(its_cmd);
+ u32 device_id = its_cmd_get_deviceid(its_cmd);
+ struct its_device *device, *new_device = NULL;
+
+ /* We preallocate memory outside of the lock here */
+ if (valid) {
+ new_device = kzalloc(sizeof(struct its_device), GFP_KERNEL);
+ if (!new_device)
+ return -ENOMEM;
+ }
+
+ spin_lock(&its->lock);
+
+ device = find_its_device(kvm, device_id);
+ if (device)
+ vits_unmap_device(kvm, device);
+
+ /*
+ * The spec does not say whether unmapping a not-mapped device
+ * is an error, so we are done in any case.
+ */
+ if (!valid)
+ goto out_unlock;
+
+ device = new_device;
+
+ device->device_id = device_id;
+ INIT_LIST_HEAD(&device->itt);
+
+ list_add_tail(&device->dev_list,
+ &kvm->arch.vgic.its.device_list);
+
+out_unlock:
+ spin_unlock(&its->lock);
+ return 0;
+}
+
+/* The MAPC command maps collection IDs to redistributors. */
+static int vits_cmd_handle_mapc(struct kvm *kvm, u64 *its_cmd)
+{
+ struct vgic_its *its = &kvm->arch.vgic.its;
+ u16 coll_id;
+ u32 target_addr;
+ struct its_collection *collection, *new_coll = NULL;
+ bool valid;
+
+ valid = its_cmd_get_validbit(its_cmd);
+ coll_id = its_cmd_get_collection(its_cmd);
+ target_addr = its_cmd_get_target_addr(its_cmd);
+
+ if (target_addr >= atomic_read(&kvm->online_vcpus))
+ return E_ITS_MAPC_PROCNUM_OOR;
+
+ /* We preallocate memory outside of the lock here */
+ if (valid) {
+ new_coll = kmalloc(sizeof(struct its_collection), GFP_KERNEL);
+ if (!new_coll)
+ return -ENOMEM;
+ }
+
+ spin_lock(&its->lock);
+ collection = find_collection(kvm, coll_id);
+
+ if (!valid) {
+ struct its_device *device;
+ struct its_itte *itte;
+ /*
+ * Clearing the mapping for that collection ID removes the
+ * entry from the list. If there wasn't any before, we can
+ * go home early.
+ */
+ if (!collection)
+ goto out_unlock;
+
+ for_each_lpi(device, itte, kvm)
+ if (itte->collection &&
+ itte->collection->collection_id == coll_id)
+ itte->collection = NULL;
+
+ list_del(&collection->coll_list);
+ kfree(collection);
+ } else {
+ if (!collection)
+ collection = new_coll;
+ else
+ kfree(new_coll);
+
+ vits_init_collection(kvm, collection, coll_id);
+ collection->target_addr = target_addr;
+ }
+
+out_unlock:
+ spin_unlock(&its->lock);
+ return 0;
+}
+
+/* The CLEAR command removes the pending state for a particular LPI. */
+static int vits_cmd_handle_clear(struct kvm *kvm, u64 *its_cmd)
+{
+ struct vgic_its *its = &kvm->arch.vgic.its;
+ u32 device_id;
+ u32 event_id;
+ struct its_itte *itte;
+ int ret = 0;
+
+ device_id = its_cmd_get_deviceid(its_cmd);
+ event_id = its_cmd_get_id(its_cmd);
+
+ spin_lock(&its->lock);
+
+ itte = find_itte(kvm, device_id, event_id);
+ if (!itte) {
+ ret = E_ITS_CLEAR_UNMAPPED_INTERRUPT;
+ goto out_unlock;
+ }
+
+ if (itte->collection)
+ __clear_bit(itte->collection->target_addr, itte->pending);
+
+out_unlock:
+ spin_unlock(&its->lock);
+ return ret;
+}
+
+/* The INV command syncs the pending bit from the memory tables. */
+static int vits_cmd_handle_inv(struct kvm *kvm, u64 *its_cmd)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ u32 device_id;
+ u32 event_id;
+ struct its_itte *itte, *new_itte;
+ gpa_t propbase;
+ int ret;
+ u8 prop;
+
+ device_id = its_cmd_get_deviceid(its_cmd);
+ event_id = its_cmd_get_id(its_cmd);
+
+ spin_lock(&dist->its.lock);
+ itte = find_itte(kvm, device_id, event_id);
+ spin_unlock(&dist->its.lock);
+ if (!itte)
+ return E_ITS_INV_UNMAPPED_INTERRUPT;
+
+ /*
+ * We cannot read from guest memory inside the spinlock, so we
+ * need to re-read our tables to learn whether the LPI number we are
+ * using is still valid.
+ */
+ do {
+ propbase = BASER_BASE_ADDRESS(dist->propbaser);
+ ret = kvm_read_guest(kvm, propbase + itte->lpi - GIC_LPI_OFFSET,
+ &prop, 1);
+ if (ret)
+ return ret;
+
+ spin_lock(&dist->its.lock);
+ new_itte = find_itte(kvm, device_id, event_id);
+ if (new_itte->lpi != itte->lpi) {
+ itte = new_itte;
+ spin_unlock(&dist->its.lock);
+ continue;
+ }
+ update_lpi_config(kvm, itte, prop);
+ spin_unlock(&dist->its.lock);
+ } while (0);
+ return 0;
+}
+
+/* The INVALL command requests flushing of all IRQ data in this collection. */
+static int vits_cmd_handle_invall(struct kvm *kvm, u64 *its_cmd)
+{
+ u32 coll_id = its_cmd_get_collection(its_cmd);
+ struct its_collection *collection;
+ struct kvm_vcpu *vcpu;
+
+ collection = find_collection(kvm, coll_id);
+ if (!collection)
+ return E_ITS_INVALL_UNMAPPED_COLLECTION;
+
+ vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+
+ its_update_lpis_configuration(kvm);
+ its_sync_lpi_pending_table(vcpu);
+
+ return 0;
+}
+
+/* The MOVALL command moves all IRQs from one redistributor to another. */
+static int vits_cmd_handle_movall(struct kvm *kvm, u64 *its_cmd)
+{
+ struct vgic_its *its = &kvm->arch.vgic.its;
+ u32 target1_addr = its_cmd_get_target_addr(its_cmd);
+ u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
+ struct its_collection *collection;
+ struct its_device *device;
+ struct its_itte *itte;
+
+ if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
+ target2_addr >= atomic_read(&kvm->online_vcpus))
+ return E_ITS_MOVALL_PROCNUM_OOR;
+
+ if (target1_addr == target2_addr)
+ return 0;
+
+ spin_lock(&its->lock);
+ for_each_lpi(device, itte, kvm) {
+ /* remap all collections mapped to target address 1 */
+ collection = itte->collection;
+ if (collection && collection->target_addr == target1_addr)
+ collection->target_addr = target2_addr;
+
+ /* move pending state if LPI is affected */
+ if (test_and_clear_bit(target1_addr, itte->pending))
+ __set_bit(target2_addr, itte->pending);
+ }
+
+ spin_unlock(&its->lock);
+ return 0;
+}
+
static int vits_handle_command(struct kvm_vcpu *vcpu, u64 *its_cmd)
{
- return -ENODEV;
+ u8 cmd = its_cmd_get_command(its_cmd);
+ int ret = -ENODEV;
+
+ switch (cmd) {
+ case GITS_CMD_MAPD:
+ ret = vits_cmd_handle_mapd(vcpu->kvm, its_cmd);
+ break;
+ case GITS_CMD_MAPC:
+ ret = vits_cmd_handle_mapc(vcpu->kvm, its_cmd);
+ break;
+ case GITS_CMD_MAPI:
+ ret = vits_cmd_handle_mapi(vcpu->kvm, its_cmd, cmd);
+ break;
+ case GITS_CMD_MAPTI:
+ ret = vits_cmd_handle_mapi(vcpu->kvm, its_cmd, cmd);
+ break;
+ case GITS_CMD_MOVI:
+ ret = vits_cmd_handle_movi(vcpu->kvm, its_cmd);
+ break;
+ case GITS_CMD_DISCARD:
+ ret = vits_cmd_handle_discard(vcpu->kvm, its_cmd);
+ break;
+ case GITS_CMD_CLEAR:
+ ret = vits_cmd_handle_clear(vcpu->kvm, its_cmd);
+ break;
+ case GITS_CMD_MOVALL:
+ ret = vits_cmd_handle_movall(vcpu->kvm, its_cmd);
+ break;
+ case GITS_CMD_INV:
+ ret = vits_cmd_handle_inv(vcpu->kvm, its_cmd);
+ break;
+ case GITS_CMD_INVALL:
+ ret = vits_cmd_handle_invall(vcpu->kvm, its_cmd);
+ break;
+ case GITS_CMD_SYNC:
+ /* we ignore this command: we are in sync all of the time */
+ ret = 0;
+ break;
+ }
+
+ return ret;
}
static bool handle_mmio_gits_cbaser(struct kvm_vcpu *vcpu,
list_for_each_safe(cur, temp, &dev->itt) {
itte = (container_of(cur, struct its_itte, itte_list));
list_del(cur);
+ kfree(itte->pending);
kfree(itte);
}
list_del(dev_cur);