obj-y += domctl.o
obj-y += sysctl.o
obj-y += domain_build.o
-obj-y += gic.o
+obj-y += gic.o gic-v2.o
obj-y += io.o
obj-y += irq.o
obj-y += kernel.o
--- /dev/null
+/*
+ * xen/arch/arm/gic-v2.c
+ *
+ * ARM Generic Interrupt Controller support v2
+ *
+ * Tim Deegan <tim@xen.org>
+ * Copyright (c) 2011 Citrix Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/irq.h>
+#include <xen/sched.h>
+#include <xen/errno.h>
+#include <xen/softirq.h>
+#include <xen/list.h>
+#include <xen/device_tree.h>
+#include <asm/p2m.h>
+#include <asm/domain.h>
+#include <asm/platform.h>
+
+#include <asm/io.h>
+#include <asm/gic.h>
+
+/*
+ * LR register definitions are GIC v2 specific.
+ * Moved these definitions from header file to here
+ */
+#define GICH_V2_LR_VIRTUAL_MASK 0x3ff
+#define GICH_V2_LR_VIRTUAL_SHIFT 0
+#define GICH_V2_LR_PHYSICAL_MASK 0x3ff
+#define GICH_V2_LR_PHYSICAL_SHIFT 10
+#define GICH_V2_LR_STATE_MASK 0x3
+#define GICH_V2_LR_STATE_SHIFT 28
+#define GICH_V2_LR_PRIORITY_SHIFT 23
+#define GICH_V2_LR_PRIORITY_MASK 0x1f
+#define GICH_V2_LR_HW_SHIFT 31
+#define GICH_V2_LR_HW_MASK 0x1
+#define GICH_V2_LR_GRP_SHIFT 30
+#define GICH_V2_LR_GRP_MASK 0x1
+#define GICH_V2_LR_MAINTENANCE_IRQ (1<<19)
+#define GICH_V2_LR_GRP1 (1<<30)
+#define GICH_V2_LR_HW (1<<31)
+#define GICH_V2_LR_CPUID_SHIFT 9
+#define GICH_V2_VTR_NRLRGS 0x3f
+
+#define GICH_V2_VMCR_PRIORITY_MASK 0x1f
+#define GICH_V2_VMCR_PRIORITY_SHIFT 27
+
+#define GICD (gicv2.map_dbase)
+#define GICC (gicv2.map_cbase)
+#define GICH (gicv2.map_hbase)
+
+/* Global state */
+static struct {
+ paddr_t dbase; /* Address of distributor registers */
+ void __iomem * map_dbase; /* IO mapped Address of distributor registers */
+ paddr_t cbase; /* Address of CPU interface registers */
+ void __iomem * map_cbase; /* IO mapped Address of CPU interface registers*/
+ paddr_t hbase; /* Address of virtual interface registers */
+ void __iomem * map_hbase; /* IO Address of virtual interface registers */
+ paddr_t vbase; /* Address of virtual cpu interface registers */
+ spinlock_t lock;
+} gicv2;
+
+static struct gic_info gicv2_info;
+
+/* The GIC mapping of CPU interfaces does not necessarily match the
+ * logical CPU numbering. Let's use mapping as returned by the GIC
+ * itself
+ */
+static DEFINE_PER_CPU(u8, gic_cpu_id);
+
+/* Maximum cpu interface per GIC */
+#define NR_GIC_CPU_IF 8
+
+static unsigned int gicv2_cpu_mask(const cpumask_t *cpumask)
+{
+ unsigned int cpu;
+ unsigned int mask = 0;
+ cpumask_t possible_mask;
+
+ cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
+ for_each_cpu( cpu, &possible_mask )
+ {
+ ASSERT(cpu < NR_GIC_CPU_IF);
+ mask |= per_cpu(gic_cpu_id, cpu);
+ }
+
+ return mask;
+}
+
+static void gicv2_save_state(struct vcpu *v)
+{
+ int i;
+
+ /* No need for spinlocks here because interrupts are disabled around
+ * this call and it only accesses struct vcpu fields that cannot be
+ * accessed simultaneously by another pCPU.
+ */
+ for ( i = 0; i < gicv2_info.nr_lrs; i++ )
+ v->arch.gic_lr[i] = readl_relaxed(GICH + GICH_LR + i * 4);
+
+ v->arch.gic_apr = readl_relaxed(GICH + GICH_APR);
+ v->arch.gic_vmcr = readl_relaxed(GICH + GICH_VMCR);
+ /* Disable until next VCPU scheduled */
+ writel_relaxed(0, GICH + GICH_HCR);
+}
+
+static void gicv2_restore_state(const struct vcpu *v)
+{
+ int i;
+
+ for ( i = 0; i < gicv2_info.nr_lrs; i++ )
+ writel_relaxed(v->arch.gic_lr[i], GICH + GICH_LR + i * 4);
+
+ writel_relaxed(v->arch.gic_apr, GICH + GICH_APR);
+ writel_relaxed(v->arch.gic_vmcr, GICH + GICH_VMCR);
+ writel_relaxed(GICH_HCR_EN, GICH + GICH_HCR);
+}
+
+static void gicv2_dump_state(const struct vcpu *v)
+{
+ int i;
+
+ if ( v == current )
+ {
+ for ( i = 0; i < gicv2_info.nr_lrs; i++ )
+ printk(" HW_LR[%d]=%x\n", i,
+ readl_relaxed(GICH + GICH_LR + i * 4));
+ }
+ else
+ {
+ for ( i = 0; i < gicv2_info.nr_lrs; i++ )
+ printk(" VCPU_LR[%d]=%x\n", i, v->arch.gic_lr[i]);
+ }
+}
+
+static void gicv2_eoi_irq(struct irq_desc *irqd)
+{
+ int irq = irqd->irq;
+ /* Lower the priority */
+ writel_relaxed(irq, GICC + GICC_EOIR);
+}
+
+static void gicv2_dir_irq(struct irq_desc *irqd)
+{
+ /* Deactivate */
+ writel_relaxed(irqd->irq, GICC + GICC_DIR);
+}
+
+static unsigned int gicv2_read_irq(void)
+{
+ return (readl_relaxed(GICC + GICC_IAR) & GICC_IA_IRQ);
+}
+
+/*
+ * needs to be called with a valid cpu_mask, ie each cpu in the mask has
+ * already called gic_cpu_init
+ */
+static void gicv2_set_irq_properties(struct irq_desc *desc,
+ const cpumask_t *cpu_mask,
+ unsigned int priority)
+{
+ uint32_t cfg, edgebit;
+ unsigned int mask = gicv2_cpu_mask(cpu_mask);
+ unsigned int irq = desc->irq;
+ unsigned int type = desc->arch.type;
+
+ ASSERT(type != DT_IRQ_TYPE_INVALID);
+ ASSERT(spin_is_locked(&desc->lock));
+
+ spin_lock(&gicv2.lock);
+ /* Set edge / level */
+ cfg = readl_relaxed(GICD + GICD_ICFGR + (irq / 16) * 4);
+ edgebit = 2u << (2 * (irq % 16));
+ if ( type & DT_IRQ_TYPE_LEVEL_MASK )
+ cfg &= ~edgebit;
+ else if ( type & DT_IRQ_TYPE_EDGE_BOTH )
+ cfg |= edgebit;
+ writel_relaxed(cfg, GICD + GICD_ICFGR + (irq / 16) * 4);
+
+ /* Set target CPU mask (RAZ/WI on uniprocessor) */
+ writeb_relaxed(mask, GICD + GICD_ITARGETSR + irq);
+ /* Set priority */
+ writeb_relaxed(priority, GICD + GICD_IPRIORITYR + irq);
+
+ spin_unlock(&gicv2.lock);
+}
+
+static void __init gicv2_dist_init(void)
+{
+ uint32_t type;
+ uint32_t cpumask;
+ uint32_t gic_cpus;
+ int i;
+
+ cpumask = readl_relaxed(GICD + GICD_ITARGETSR) & 0xff;
+ cpumask |= cpumask << 8;
+ cpumask |= cpumask << 16;
+
+ /* Disable the distributor */
+ writel_relaxed(0, GICD + GICD_CTLR);
+
+ type = readl_relaxed(GICD + GICD_TYPER);
+ gicv2_info.nr_lines = 32 * ((type & GICD_TYPE_LINES) + 1);
+ gic_cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5);
+ printk("GICv2: %d lines, %d cpu%s%s (IID %8.8x).\n",
+ gicv2_info.nr_lines, gic_cpus, (gic_cpus == 1) ? "" : "s",
+ (type & GICD_TYPE_SEC) ? ", secure" : "",
+ readl_relaxed(GICD + GICD_IIDR));
+
+ /* Default all global IRQs to level, active low */
+ for ( i = 32; i < gicv2_info.nr_lines; i += 16 )
+ writel_relaxed(0x0, GICD + GICD_ICFGR + (i / 16) * 4);
+
+ /* Route all global IRQs to this CPU */
+ for ( i = 32; i < gicv2_info.nr_lines; i += 4 )
+ writel_relaxed(cpumask, GICD + GICD_ITARGETSR + (i / 4) * 4);
+
+ /* Default priority for global interrupts */
+ for ( i = 32; i < gicv2_info.nr_lines; i += 4 )
+ writel_relaxed (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
+ GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
+ GICD + GICD_IPRIORITYR + (i / 4) * 4);
+
+ /* Disable all global interrupts */
+ for ( i = 32; i < gicv2_info.nr_lines; i += 32 )
+ writel_relaxed(~0x0, GICD + GICD_ICENABLER + (i / 32) * 4);
+
+ /* Turn on the distributor */
+ writel_relaxed(GICD_CTL_ENABLE, GICD + GICD_CTLR);
+}
+
+static void __cpuinit gicv2_cpu_init(void)
+{
+ int i;
+
+ this_cpu(gic_cpu_id) = readl_relaxed(GICD + GICD_ITARGETSR) & 0xff;
+
+ /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
+ * even though they are controlled with GICD registers, they must
+ * be set up here with the other per-cpu state. */
+ writel_relaxed(0xffff0000, GICD + GICD_ICENABLER); /* Disable all PPI */
+ writel_relaxed(0x0000ffff, GICD + GICD_ISENABLER); /* Enable all SGI */
+
+ /* Set SGI priorities */
+ for ( i = 0; i < 16; i += 4 )
+ writel_relaxed(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 |
+ GIC_PRI_IPI << 8 | GIC_PRI_IPI,
+ GICD + GICD_IPRIORITYR + (i / 4) * 4);
+
+ /* Set PPI priorities */
+ for ( i = 16; i < 32; i += 4 )
+ writel_relaxed(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
+ GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
+ GICD + GICD_IPRIORITYR + (i / 4) * 4);
+
+ /* Local settings: interface controller */
+ /* Don't mask by priority */
+ writel_relaxed(0xff, GICC + GICC_PMR);
+ /* Finest granularity of priority */
+ writel_relaxed(0x0, GICC + GICC_BPR);
+ /* Turn on delivery */
+ writel_relaxed(GICC_CTL_ENABLE|GICC_CTL_EOI, GICC + GICC_CTLR);
+}
+
+static void gicv2_cpu_disable(void)
+{
+ writel_relaxed(0x0, GICC + GICC_CTLR);
+}
+
+static void __cpuinit gicv2_hyp_init(void)
+{
+ uint32_t vtr;
+ uint8_t nr_lrs;
+
+ vtr = readl_relaxed(GICH + GICH_VTR);
+ nr_lrs = (vtr & GICH_V2_VTR_NRLRGS) + 1;
+ gicv2_info.nr_lrs = nr_lrs;
+
+ writel_relaxed(GICH_MISR_EOI, GICH + GICH_MISR);
+}
+
+static void __cpuinit gicv2_hyp_disable(void)
+{
+ writel_relaxed(0, GICH + GICH_HCR);
+}
+
+static int gicv2_secondary_cpu_init(void)
+{
+ spin_lock(&gicv2.lock);
+
+ gicv2_cpu_init();
+ gicv2_hyp_init();
+
+ spin_unlock(&gicv2.lock);
+
+ return 0;
+}
+
+static void gicv2_send_SGI(enum gic_sgi sgi, enum gic_sgi_mode irqmode,
+ const cpumask_t *cpu_mask)
+{
+ unsigned int mask = 0;
+ cpumask_t online_mask;
+
+ switch ( irqmode )
+ {
+ case SGI_TARGET_OTHERS:
+ writel_relaxed(GICD_SGI_TARGET_OTHERS | sgi, GICD + GICD_SGIR);
+ break;
+ case SGI_TARGET_SELF:
+ writel_relaxed(GICD_SGI_TARGET_SELF | sgi, GICD + GICD_SGIR);
+ break;
+ case SGI_TARGET_LIST:
+ cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
+ mask = gicv2_cpu_mask(&online_mask);
+ writel_relaxed(GICD_SGI_TARGET_LIST |
+ (mask << GICD_SGI_TARGET_SHIFT) | sgi,
+ GICD + GICD_SGIR);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/* Shut down the per-CPU GIC interface */
+static void gicv2_disable_interface(void)
+{
+ spin_lock(&gicv2.lock);
+ gicv2_cpu_disable();
+ gicv2_hyp_disable();
+ spin_unlock(&gicv2.lock);
+}
+
+static void gicv2_update_lr(int lr, const struct pending_irq *p,
+ unsigned int state)
+{
+ uint32_t lr_reg;
+
+ BUG_ON(lr >= gicv2_info.nr_lrs);
+ BUG_ON(lr < 0);
+
+ lr_reg = (((state & GICH_V2_LR_STATE_MASK) << GICH_V2_LR_STATE_SHIFT) |
+ ((GIC_PRI_TO_GUEST(p->priority) & GICH_V2_LR_PRIORITY_MASK)
+ << GICH_V2_LR_PRIORITY_SHIFT) |
+ ((p->irq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT));
+
+ if ( p->desc != NULL )
+ lr_reg |= GICH_V2_LR_HW | ((p->desc->irq & GICH_V2_LR_PHYSICAL_MASK )
+ << GICH_V2_LR_PHYSICAL_SHIFT);
+
+ writel_relaxed(lr_reg, GICH + GICH_LR + lr * 4);
+}
+
+static void gicv2_clear_lr(int lr)
+{
+ writel_relaxed(0, GICH + GICH_LR + lr * 4);
+}
+
+static int gicv_v2_init(struct domain *d)
+{
+ int ret;
+
+ /*
+ * Domain 0 gets the hardware address.
+ * Guests get the virtual platform layout.
+ */
+ if ( is_hardware_domain(d) )
+ {
+ d->arch.vgic.dbase = gicv2.dbase;
+ d->arch.vgic.cbase = gicv2.cbase;
+ }
+ else
+ {
+ d->arch.vgic.dbase = GUEST_GICD_BASE;
+ d->arch.vgic.cbase = GUEST_GICC_BASE;
+ }
+
+ d->arch.vgic.nr_lines = 0;
+
+ /*
+ * Map the gic virtual cpu interface in the gic cpu interface
+ * region of the guest.
+ *
+ * The second page is always mapped at +4K irrespective of the
+ * GIC_64K_STRIDE quirk. The DTB passed to the guest reflects this.
+ */
+ ret = map_mmio_regions(d, d->arch.vgic.cbase,
+ d->arch.vgic.cbase + PAGE_SIZE - 1,
+ gicv2.vbase);
+ if ( ret )
+ return ret;
+
+ if ( !platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
+ ret = map_mmio_regions(d, d->arch.vgic.cbase + PAGE_SIZE,
+ d->arch.vgic.cbase + (2 * PAGE_SIZE) - 1,
+ gicv2.vbase + PAGE_SIZE);
+ else
+ ret = map_mmio_regions(d, d->arch.vgic.cbase + PAGE_SIZE,
+ d->arch.vgic.cbase + (2 * PAGE_SIZE) - 1,
+ gicv2.vbase + 16*PAGE_SIZE);
+
+ return ret;
+}
+
+static void gicv2_read_lr(int lr, struct gic_lr *lr_reg)
+{
+ uint32_t lrv;
+
+ lrv = readl_relaxed(GICH + GICH_LR + lr * 4);
+ lr_reg->pirq = (lrv >> GICH_V2_LR_PHYSICAL_SHIFT) & GICH_V2_LR_PHYSICAL_MASK;
+ lr_reg->virq = (lrv >> GICH_V2_LR_VIRTUAL_SHIFT) & GICH_V2_LR_VIRTUAL_MASK;
+ lr_reg->priority = (lrv >> GICH_V2_LR_PRIORITY_SHIFT) & GICH_V2_LR_PRIORITY_MASK;
+ lr_reg->state = (lrv >> GICH_V2_LR_STATE_SHIFT) & GICH_V2_LR_STATE_MASK;
+ lr_reg->hw_status = (lrv >> GICH_V2_LR_HW_SHIFT) & GICH_V2_LR_HW_MASK;
+ lr_reg->grp = (lrv >> GICH_V2_LR_GRP_SHIFT) & GICH_V2_LR_GRP_MASK;
+}
+
+static void gicv2_write_lr(int lr, const struct gic_lr *lr_reg)
+{
+ uint32_t lrv = 0;
+
+ lrv = ( ((lr_reg->pirq & GICH_V2_LR_PHYSICAL_MASK) << GICH_V2_LR_PHYSICAL_SHIFT) |
+ ((lr_reg->virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT) |
+ ((uint32_t)(lr_reg->priority & GICH_V2_LR_PRIORITY_MASK)
+ << GICH_V2_LR_PRIORITY_SHIFT) |
+ ((uint32_t)(lr_reg->state & GICH_V2_LR_STATE_MASK)
+ << GICH_V2_LR_STATE_SHIFT) |
+ ((uint32_t)(lr_reg->hw_status & GICH_V2_LR_HW_MASK)
+ << GICH_V2_LR_HW_SHIFT) |
+ ((uint32_t)(lr_reg->grp & GICH_V2_LR_GRP_MASK) << GICH_V2_LR_GRP_SHIFT) );
+
+ writel_relaxed(lrv, GICH + GICH_LR + lr * 4);
+}
+
+static void gicv2_hcr_status(uint32_t flag, bool_t status)
+{
+ uint32_t hcr = readl_relaxed(GICH + GICH_HCR);
+
+ if ( status )
+ hcr |= flag;
+ else
+ hcr &= (~flag);
+
+ writel_relaxed(hcr, GICH + GICH_HCR);
+}
+
+static unsigned int gicv2_read_vmcr_priority(void)
+{
+ return ((readl_relaxed(GICH + GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT)
+ & GICH_V2_VMCR_PRIORITY_MASK);
+}
+
+static unsigned int gicv2_read_apr(int apr_reg)
+{
+ return readl_relaxed(GICH + GICH_APR);
+}
+
+static void gicv2_irq_enable(struct irq_desc *desc)
+{
+ unsigned long flags;
+ int irq = desc->irq;
+
+ ASSERT(spin_is_locked(&desc->lock));
+
+ spin_lock_irqsave(&gicv2.lock, flags);
+ desc->status &= ~IRQ_DISABLED;
+ dsb(sy);
+ /* Enable routing */
+ writel_relaxed((1u << (irq % 32)), GICD + GICD_ISENABLER + (irq / 32) * 4);
+ spin_unlock_irqrestore(&gicv2.lock, flags);
+}
+
+static void gicv2_irq_disable(struct irq_desc *desc)
+{
+ unsigned long flags;
+ int irq = desc->irq;
+
+ ASSERT(spin_is_locked(&desc->lock));
+
+ spin_lock_irqsave(&gicv2.lock, flags);
+ /* Disable routing */
+ writel_relaxed(1u << (irq % 32), GICD + GICD_ICENABLER + (irq / 32) * 4);
+ desc->status |= IRQ_DISABLED;
+ spin_unlock_irqrestore(&gicv2.lock, flags);
+}
+
+static unsigned int gicv2_irq_startup(struct irq_desc *desc)
+{
+ gicv2_irq_enable(desc);
+
+ return 0;
+}
+
+static void gicv2_irq_shutdown(struct irq_desc *desc)
+{
+ gicv2_irq_disable(desc);
+}
+
+static void gicv2_irq_ack(struct irq_desc *desc)
+{
+ /* No ACK -- reading IAR has done this for us */
+}
+
+static void gicv2_host_irq_end(struct irq_desc *desc)
+{
+ /* Lower the priority */
+ gicv2_eoi_irq(desc);
+ /* Deactivate */
+ gicv2_dir_irq(desc);
+}
+
+static void gicv2_guest_irq_end(struct irq_desc *desc)
+{
+ /* Lower the priority of the IRQ */
+ gicv2_eoi_irq(desc);
+ /* Deactivation happens in maintenance interrupt / via GICV */
+}
+
+static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
+{
+ BUG();
+}
+
+/* XXX different for level vs edge */
+static hw_irq_controller gicv2_host_irq_type = {
+ .typename = "gic-v2",
+ .startup = gicv2_irq_startup,
+ .shutdown = gicv2_irq_shutdown,
+ .enable = gicv2_irq_enable,
+ .disable = gicv2_irq_disable,
+ .ack = gicv2_irq_ack,
+ .end = gicv2_host_irq_end,
+ .set_affinity = gicv2_irq_set_affinity,
+};
+
+static hw_irq_controller gicv2_guest_irq_type = {
+ .typename = "gic-v2",
+ .startup = gicv2_irq_startup,
+ .shutdown = gicv2_irq_shutdown,
+ .enable = gicv2_irq_enable,
+ .disable = gicv2_irq_disable,
+ .ack = gicv2_irq_ack,
+ .end = gicv2_guest_irq_end,
+ .set_affinity = gicv2_irq_set_affinity,
+};
+
+const static struct gic_hw_operations gicv2_ops = {
+ .info = &gicv2_info,
+ .secondary_init = gicv2_secondary_cpu_init,
+ .save_state = gicv2_save_state,
+ .restore_state = gicv2_restore_state,
+ .dump_state = gicv2_dump_state,
+ .gicv_setup = gicv_v2_init,
+ .gic_host_irq_type = &gicv2_host_irq_type,
+ .gic_guest_irq_type = &gicv2_guest_irq_type,
+ .eoi_irq = gicv2_eoi_irq,
+ .deactivate_irq = gicv2_dir_irq,
+ .read_irq = gicv2_read_irq,
+ .set_irq_properties = gicv2_set_irq_properties,
+ .send_SGI = gicv2_send_SGI,
+ .disable_interface = gicv2_disable_interface,
+ .update_lr = gicv2_update_lr,
+ .update_hcr_status = gicv2_hcr_status,
+ .clear_lr = gicv2_clear_lr,
+ .read_lr = gicv2_read_lr,
+ .write_lr = gicv2_write_lr,
+ .read_vmcr_priority = gicv2_read_vmcr_priority,
+ .read_apr = gicv2_read_apr,
+};
+
+/* Set up the GIC */
+void __init gicv2_init(void)
+{
+ static const struct dt_device_match gic_ids[] __initconst =
+ {
+ DT_MATCH_GIC,
+ { /* sentinel */ },
+ };
+ struct dt_device_node *node;
+ int res;
+
+ node = dt_find_interrupt_controller(gic_ids);
+ if ( !node )
+ panic("GICv2: Unable to find compatible GIC in the device tree");
+
+ dt_device_set_used_by(node, DOMID_XEN);
+
+ res = dt_device_get_address(node, 0, &gicv2.dbase, NULL);
+ if ( res || !gicv2.dbase || (gicv2.dbase & ~PAGE_MASK) )
+ panic("GICv2: Cannot find a valid address for the distributor");
+
+ res = dt_device_get_address(node, 1, &gicv2.cbase, NULL);
+ if ( res || !gicv2.cbase || (gicv2.cbase & ~PAGE_MASK) )
+ panic("GICv2: Cannot find a valid address for the CPU");
+
+ res = dt_device_get_address(node, 2, &gicv2.hbase, NULL);
+ if ( res || !gicv2.hbase || (gicv2.hbase & ~PAGE_MASK) )
+ panic("GICv2: Cannot find a valid address for the hypervisor");
+
+ res = dt_device_get_address(node, 3, &gicv2.vbase, NULL);
+ if ( res || !gicv2.vbase || (gicv2.vbase & ~PAGE_MASK) )
+ panic("GICv2: Cannot find a valid address for the virtual CPU");
+
+ res = platform_get_irq(node, 0);
+ if ( res < 0 )
+ panic("GICv2: Cannot find the maintenance IRQ");
+ gicv2_info.maintenance_irq = res;
+
+ /* Set the GIC as the primary interrupt controller */
+ dt_interrupt_controller = node;
+
+ /* TODO: Add check on distributor, cpu size */
+
+ printk("GICv2 initialization:\n"
+ " gic_dist_addr=%"PRIpaddr"\n"
+ " gic_cpu_addr=%"PRIpaddr"\n"
+ " gic_hyp_addr=%"PRIpaddr"\n"
+ " gic_vcpu_addr=%"PRIpaddr"\n"
+ " gic_maintenance_irq=%u\n",
+ gicv2.dbase, gicv2.cbase, gicv2.hbase, gicv2.vbase,
+ gicv2_info.maintenance_irq);
+
+ if ( (gicv2.dbase & ~PAGE_MASK) || (gicv2.cbase & ~PAGE_MASK) ||
+ (gicv2.hbase & ~PAGE_MASK) || (gicv2.vbase & ~PAGE_MASK) )
+ panic("GICv2 interfaces not page aligned");
+
+ gicv2.map_dbase = ioremap_nocache(gicv2.dbase, PAGE_SIZE);
+ if ( !gicv2.map_dbase )
+ panic("GICv2: Failed to ioremap for GIC distributor\n");
+
+ if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
+ gicv2.map_cbase = ioremap_nocache(gicv2.cbase, PAGE_SIZE * 0x10);
+ else
+ gicv2.map_cbase = ioremap_nocache(gicv2.cbase, PAGE_SIZE * 2);
+
+ if ( !gicv2.map_cbase )
+ panic("GICv2: Failed to ioremap for GIC CPU interface\n");
+
+ gicv2.map_hbase = ioremap_nocache(gicv2.hbase, PAGE_SIZE);
+ if ( !gicv2.map_hbase )
+ panic("GICv2: Failed to ioremap for GIC Virtual interface\n");
+
+ /* Global settings: interrupt distributor */
+ spin_lock_init(&gicv2.lock);
+ spin_lock(&gicv2.lock);
+
+ gicv2_dist_init();
+ gicv2_cpu_init();
+ gicv2_hyp_init();
+
+ spin_unlock(&gicv2.lock);
+
+ gicv2_info.hw_version = GIC_V2;
+ register_gic_ops(&gicv2_ops);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
#include <xen/irq.h>
#include <xen/sched.h>
#include <xen/errno.h>
-#include <xen/serial.h>
#include <xen/softirq.h>
#include <xen/list.h>
#include <xen/device_tree.h>
#include <asm/io.h>
#include <asm/gic.h>
-#define GICD (gic.map_dbase)
-#define GICC (gic.map_cbase)
-#define GICH (gic.map_hbase)
-
static void gic_restore_pending_irqs(struct vcpu *v);
-/* Global state */
-static struct {
- paddr_t dbase; /* Address of distributor registers */
- void __iomem * map_dbase; /* IO mapped Address of distributor registers */
- paddr_t cbase; /* Address of CPU interface registers */
- void __iomem * map_cbase; /* IO mapped Address of CPU interface registers*/
- paddr_t hbase; /* Address of virtual interface registers */
- void __iomem * map_hbase; /* IO Address of virtual interface registers */
- paddr_t vbase; /* Address of virtual cpu interface registers */
- unsigned int lines; /* Number of interrupts (SPIs + PPIs + SGIs) */
- unsigned int maintenance_irq; /* IRQ maintenance */
- unsigned int cpus;
- spinlock_t lock;
-} gic;
-
static DEFINE_PER_CPU(uint64_t, lr_mask);
-static uint8_t nr_lrs;
-#define lr_all_full() (this_cpu(lr_mask) == ((1 << nr_lrs) - 1))
-
-/* The GIC mapping of CPU interfaces does not necessarily match the
- * logical CPU numbering. Let's use mapping as returned by the GIC
- * itself
- */
-static DEFINE_PER_CPU(u8, gic_cpu_id);
-
-/* Maximum cpu interface per GIC */
-#define NR_GIC_CPU_IF 8
+#define lr_all_full() (this_cpu(lr_mask) == ((1 << gic_hw_ops->info->nr_lrs) - 1))
#undef GIC_DEBUG
static void gic_update_one_lr(struct vcpu *v, int i);
-static unsigned int gic_cpu_mask(const cpumask_t *cpumask)
+static const struct gic_hw_operations *gic_hw_ops;
+
+void register_gic_ops(const struct gic_hw_operations *ops)
{
- unsigned int cpu;
- unsigned int mask = 0;
- cpumask_t possible_mask;
+ gic_hw_ops = ops;
+}
- cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
- for_each_cpu(cpu, &possible_mask)
- {
- ASSERT(cpu < NR_GIC_CPU_IF);
- mask |= per_cpu(gic_cpu_id, cpu);
- }
+static void clear_cpu_lr_mask(void)
+{
+ this_cpu(lr_mask) = 0ULL;
+}
- return mask;
+enum gic_version gic_hw_version(void)
+{
+ return gic_hw_ops->info->hw_version;
}
unsigned int gic_number_lines(void)
{
- return gic.lines;
+ return gic_hw_ops->info->nr_lines;
}
void gic_save_state(struct vcpu *v)
{
- int i;
ASSERT(!local_irq_is_enabled());
/* No need for spinlocks here because interrupts are disabled around
* this call and it only accesses struct vcpu fields that cannot be
* accessed simultaneously by another pCPU.
*/
- for ( i=0; i<nr_lrs; i++)
- v->arch.gic_lr[i] = readl_relaxed(GICH + GICH_LR + i * 4);
v->arch.lr_mask = this_cpu(lr_mask);
- v->arch.gic_apr = readl_relaxed(GICH + GICH_APR);
- v->arch.gic_vmcr = readl_relaxed(GICH + GICH_VMCR);
- /* Disable until next VCPU scheduled */
- writel_relaxed(0, GICH + GICH_HCR);
+ gic_hw_ops->save_state(v);
isb();
}
void gic_restore_state(struct vcpu *v)
{
- int i;
ASSERT(!local_irq_is_enabled());
if ( is_idle_vcpu(v) )
return;
this_cpu(lr_mask) = v->arch.lr_mask;
- for ( i=0; i<nr_lrs; i++)
- writel_relaxed(v->arch.gic_lr[i], GICH + GICH_LR + i * 4);
- writel_relaxed(v->arch.gic_apr, GICH + GICH_APR);
- writel_relaxed(v->arch.gic_vmcr, GICH + GICH_VMCR);
- writel_relaxed(GICH_HCR_EN, GICH + GICH_HCR);
+ gic_hw_ops->restore_state(v);
+
isb();
gic_restore_pending_irqs(v);
}
-static void gic_irq_enable(struct irq_desc *desc)
-{
- int irq = desc->irq;
- unsigned long flags;
-
- ASSERT(spin_is_locked(&desc->lock));
-
- spin_lock_irqsave(&gic.lock, flags);
- /* Enable routing */
- desc->status &= ~IRQ_DISABLED;
- dsb(sy);
- writel_relaxed((1u << (irq % 32)), GICD + GICD_ISENABLER + (irq / 32) * 4);
- spin_unlock_irqrestore(&gic.lock, flags);
-}
-
-static void gic_irq_disable(struct irq_desc *desc)
-{
- int irq = desc->irq;
- unsigned long flags;
-
- ASSERT(spin_is_locked(&desc->lock));
-
- spin_lock_irqsave(&gic.lock, flags);
- /* Disable routing */
- writel_relaxed(1u << (irq % 32), GICD + GICD_ICENABLER + (irq / 32) * 4);
- desc->status |= IRQ_DISABLED;
- spin_unlock_irqrestore(&gic.lock, flags);
-}
-
-static unsigned int gic_irq_startup(struct irq_desc *desc)
-{
- gic_irq_enable(desc);
-
- return 0;
-}
-
-static void gic_irq_shutdown(struct irq_desc *desc)
-{
- gic_irq_disable(desc);
-}
-
-static void gic_irq_ack(struct irq_desc *desc)
-{
- /* No ACK -- reading IAR has done this for us */
-}
-
-static void gic_host_irq_end(struct irq_desc *desc)
-{
- int irq = desc->irq;
- /* Lower the priority */
- writel_relaxed(irq, GICC + GICC_EOIR);
- /* Deactivate */
- writel_relaxed(irq, GICC + GICC_DIR);
-}
-
-static void gic_guest_irq_end(struct irq_desc *desc)
-{
- int irq = desc->irq;
- /* Lower the priority of the IRQ */
- writel_relaxed(irq, GICC + GICC_EOIR);
- /* Deactivation happens in maintenance interrupt / via GICV */
-}
-
-static void gic_irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
-{
- BUG();
-}
-
-/* XXX different for level vs edge */
-static hw_irq_controller gic_host_irq_type = {
- .typename = "gic",
- .startup = gic_irq_startup,
- .shutdown = gic_irq_shutdown,
- .enable = gic_irq_enable,
- .disable = gic_irq_disable,
- .ack = gic_irq_ack,
- .end = gic_host_irq_end,
- .set_affinity = gic_irq_set_affinity,
-};
-static hw_irq_controller gic_guest_irq_type = {
- .typename = "gic",
- .startup = gic_irq_startup,
- .shutdown = gic_irq_shutdown,
- .enable = gic_irq_enable,
- .disable = gic_irq_disable,
- .ack = gic_irq_ack,
- .end = gic_guest_irq_end,
- .set_affinity = gic_irq_set_affinity,
-};
-
/*
- * - needs to be called with a valid cpu_mask, ie each cpu in the mask has
+ * needs to be called with a valid cpu_mask, ie each cpu in the mask has
* already called gic_cpu_init
* - desc.lock must be held
* - arch.type must be valid (i.e != DT_IRQ_TYPE_INVALID)
const cpumask_t *cpu_mask,
unsigned int priority)
{
- uint32_t cfg, edgebit;
- unsigned int mask;
- unsigned int irq = desc->irq;
- unsigned int type = desc->arch.type;
-
- ASSERT(type != DT_IRQ_TYPE_INVALID);
- ASSERT(spin_is_locked(&desc->lock));
-
- spin_lock(&gic.lock);
-
- mask = gic_cpu_mask(cpu_mask);
-
- /* Set edge / level */
- cfg = readl_relaxed(GICD + GICD_ICFGR + (irq / 16) * 4);
- edgebit = 2u << (2 * (irq % 16));
- if ( type & DT_IRQ_TYPE_LEVEL_MASK )
- cfg &= ~edgebit;
- else if ( type & DT_IRQ_TYPE_EDGE_BOTH )
- cfg |= edgebit;
- writel_relaxed(cfg, GICD + GICD_ICFGR + (irq / 16) * 4);
-
- /* Set target CPU mask (RAZ/WI on uniprocessor) */
- writeb_relaxed(mask, GICD + GICD_ITARGETSR + irq);
- /* Set priority */
- writeb_relaxed(priority, GICD + GICD_IPRIORITYR + irq);
-
- spin_unlock(&gic.lock);
+ gic_hw_ops->set_irq_properties(desc, cpu_mask, priority);
}
/* Program the GIC to route an interrupt to the host (i.e. Xen)
unsigned int priority)
{
ASSERT(priority <= 0xff); /* Only 8 bits of priority */
- ASSERT(desc->irq < gic.lines);/* Can't route interrupts that don't exist */
+ ASSERT(desc->irq < gic_number_lines());/* Can't route interrupts that don't exist */
ASSERT(desc->status & IRQ_DISABLED);
ASSERT(spin_is_locked(&desc->lock));
- desc->handler = &gic_host_irq_type;
+ desc->handler = gic_hw_ops->gic_host_irq_type;
gic_set_irq_properties(desc, cpu_mask, priority);
}
struct pending_irq *p;
ASSERT(spin_is_locked(&desc->lock));
- desc->handler = &gic_guest_irq_type;
+ desc->handler = gic_hw_ops->gic_guest_irq_type;
desc->status |= IRQ_GUEST;
gic_set_irq_properties(desc, cpumask_of(smp_processor_id()), GIC_PRI_IRQ);
p->desc = desc;
}
-static void __init gic_dist_init(void)
-{
- uint32_t type;
- uint32_t cpumask;
- int i;
-
- cpumask = readl_relaxed(GICD + GICD_ITARGETSR) & 0xff;
- cpumask |= cpumask << 8;
- cpumask |= cpumask << 16;
-
- /* Disable the distributor */
- writel_relaxed(0, GICD + GICD_CTLR);
-
- type = readl_relaxed(GICD + GICD_TYPER);
- gic.lines = 32 * ((type & GICD_TYPE_LINES) + 1);
- gic.cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5);
- printk("GIC: %d lines, %d cpu%s%s (IID %8.8x).\n",
- gic.lines, gic.cpus, (gic.cpus == 1) ? "" : "s",
- (type & GICD_TYPE_SEC) ? ", secure" : "",
- readl_relaxed(GICD + GICD_IIDR));
-
- /* Default all global IRQs to level, active low */
- for ( i = 32; i < gic.lines; i += 16 )
- writel_relaxed(0x0, GICD + GICD_ICFGR + (i / 16) * 4);
-
- /* Route all global IRQs to this CPU */
- for ( i = 32; i < gic.lines; i += 4 )
- writel_relaxed(cpumask, GICD + GICD_ITARGETSR + (i / 4) * 4);
-
- /* Default priority for global interrupts */
- for ( i = 32; i < gic.lines; i += 4 )
- writel_relaxed (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
- GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
- GICD + GICD_IPRIORITYR + (i / 4) * 4);
-
- /* Disable all global interrupts */
- for ( i = 32; i < gic.lines; i += 32 )
- writel_relaxed(~0x0, GICD + GICD_ICENABLER + (i / 32) * 4);
-
- /* Turn on the distributor */
- writel_relaxed(GICD_CTL_ENABLE, GICD + GICD_CTLR);
-}
-
-static void __cpuinit gic_cpu_init(void)
-{
- int i;
-
- this_cpu(gic_cpu_id) = readl_relaxed(GICD + GICD_ITARGETSR) & 0xff;
-
- /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
- * even though they are controlled with GICD registers, they must
- * be set up here with the other per-cpu state. */
- writel_relaxed(0xffff0000, GICD + GICD_ICENABLER); /* Disable all PPI */
- writel_relaxed(0x0000ffff, GICD + GICD_ISENABLER); /* Enable all SGI */
-
- /* Set SGI priorities */
- for (i = 0; i < 16; i += 4)
- writel_relaxed(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 |
- GIC_PRI_IPI << 8 | GIC_PRI_IPI,
- GICD + GICD_IPRIORITYR + (i / 4) * 4);
- /* Set PPI priorities */
- for (i = 16; i < 32; i += 4)
- writel_relaxed(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
- GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
- GICD + GICD_IPRIORITYR + (i / 4) * 4);
-
- /* Local settings: interface controller */
- /* Don't mask by priority */
- writel_relaxed(0xff, GICC + GICC_PMR);
- /* Finest granularity of priority */
- writel_relaxed(0x0, GICC + GICC_BPR);
- /* Turn on delivery */
- writel_relaxed(GICC_CTL_ENABLE|GICC_CTL_EOI, GICC + GICC_CTLR);
-}
-
-static void gic_cpu_disable(void)
-{
- writel_relaxed(0x0, GICC + GICC_CTLR);
-}
-
-static void __cpuinit gic_hyp_init(void)
-{
- uint32_t vtr;
-
- vtr = readl_relaxed(GICH + GICH_VTR);
- nr_lrs = (vtr & GICH_VTR_NRLRGS) + 1;
-
- writel_relaxed(GICH_MISR_EOI, GICH + GICH_MISR);
- this_cpu(lr_mask) = 0ULL;
-}
-
-static void __cpuinit gic_hyp_disable(void)
-{
- writel_relaxed(0, GICH + GICH_HCR);
-}
-
int gic_irq_xlate(const u32 *intspec, unsigned int intsize,
unsigned int *out_hwirq,
unsigned int *out_type)
/* Set up the GIC */
void __init gic_init(void)
{
- static const struct dt_device_match gic_ids[] __initconst =
- {
- DT_MATCH_GIC,
- { /* sentinel */ },
- };
- struct dt_device_node *node;
- int res;
-
- node = dt_find_interrupt_controller(gic_ids);
- if ( !node )
- panic("Unable to find compatible GIC in the device tree");
-
- dt_device_set_used_by(node, DOMID_XEN);
-
- res = dt_device_get_address(node, 0, &gic.dbase, NULL);
- if ( res || !gic.dbase || (gic.dbase & ~PAGE_MASK) )
- panic("GIC: Cannot find a valid address for the distributor");
-
- res = dt_device_get_address(node, 1, &gic.cbase, NULL);
- if ( res || !gic.cbase || (gic.cbase & ~PAGE_MASK) )
- panic("GIC: Cannot find a valid address for the CPU");
-
- res = dt_device_get_address(node, 2, &gic.hbase, NULL);
- if ( res || !gic.hbase || (gic.hbase & ~PAGE_MASK) )
- panic("GIC: Cannot find a valid address for the hypervisor");
-
- res = dt_device_get_address(node, 3, &gic.vbase, NULL);
- if ( res || !gic.vbase || (gic.vbase & ~PAGE_MASK) )
- panic("GIC: Cannot find a valid address for the virtual CPU");
-
- res = platform_get_irq(node, 0);
- if ( res < 0 )
- panic("GIC: Cannot find the maintenance IRQ");
- gic.maintenance_irq = res;
-
- /* Set the GIC as the primary interrupt controller */
- dt_interrupt_controller = node;
-
- /* TODO: Add check on distributor, cpu size */
-
- printk("GIC initialization:\n"
- " gic_dist_addr=%"PRIpaddr"\n"
- " gic_cpu_addr=%"PRIpaddr"\n"
- " gic_hyp_addr=%"PRIpaddr"\n"
- " gic_vcpu_addr=%"PRIpaddr"\n"
- " gic_maintenance_irq=%u\n",
- gic.dbase, gic.cbase, gic.hbase, gic.vbase,
- gic.maintenance_irq);
-
- if ( (gic.dbase & ~PAGE_MASK) || (gic.cbase & ~PAGE_MASK) ||
- (gic.hbase & ~PAGE_MASK) || (gic.vbase & ~PAGE_MASK) )
- panic("GIC interfaces not page aligned");
-
- gic.map_dbase = ioremap_nocache(gic.dbase, PAGE_SIZE);
- if ( !gic.map_dbase )
- panic("Failed to ioremap for GIC distributor\n");
-
- if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
- gic.map_cbase = ioremap_nocache(gic.cbase, PAGE_SIZE * 0x10);
- else
- gic.map_cbase = ioremap_nocache(gic.cbase, PAGE_SIZE * 2);
-
- if ( !gic.map_cbase )
- panic("Failed to ioremap for GIC CPU interface\n");
-
- gic.map_hbase = ioremap_nocache(gic.hbase, PAGE_SIZE);
- if ( !gic.map_hbase )
- panic("Failed to ioremap for GIC Virtual interface\n");
-
- /* Global settings: interrupt distributor */
- spin_lock_init(&gic.lock);
- spin_lock(&gic.lock);
-
- gic_dist_init();
- gic_cpu_init();
- gic_hyp_init();
-
- spin_unlock(&gic.lock);
-}
-
-static void send_SGI(enum gic_sgi sgi, enum gic_sgi_mode irqmode,
- const cpumask_t *cpu_mask)
-{
- unsigned int mask = 0;
- cpumask_t online_mask;
-
- switch ( irqmode )
- {
- case SGI_TARGET_OTHERS:
- writel_relaxed(GICD_SGI_TARGET_OTHERS | sgi, GICD + GICD_SGIR);
- break;
- case SGI_TARGET_SELF:
- writel_relaxed(GICD_SGI_TARGET_SELF | sgi, GICD + GICD_SGIR);
- break;
- case SGI_TARGET_LIST:
- cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
- mask = gic_cpu_mask(&online_mask);
- writel_relaxed(GICD_SGI_TARGET_LIST |
- (mask << GICD_SGI_TARGET_SHIFT) | sgi,
- GICD + GICD_SGIR);
- break;
- default:
- BUG();
- }
+ gicv2_init();
+ /* Clear LR mask for cpu0 */
+ clear_cpu_lr_mask();
}
void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi)
ASSERT(sgi < 16); /* There are only 16 SGIs */
dsb(sy);
- send_SGI(sgi, SGI_TARGET_LIST, cpumask);
+ gic_hw_ops->send_SGI(sgi, SGI_TARGET_LIST, cpumask);
}
void send_SGI_one(unsigned int cpu, enum gic_sgi sgi)
{
- ASSERT(cpu < NR_GIC_CPU_IF); /* Targets bitmap only supports 8 CPUs */
send_SGI_mask(cpumask_of(cpu), sgi);
}
ASSERT(sgi < 16); /* There are only 16 SGIs */
dsb(sy);
- send_SGI(sgi, SGI_TARGET_SELF, NULL);
+ gic_hw_ops->send_SGI(sgi, SGI_TARGET_SELF, NULL);
}
void send_SGI_allbutself(enum gic_sgi sgi)
ASSERT(sgi < 16); /* There are only 16 SGIs */
dsb(sy);
- send_SGI(sgi, SGI_TARGET_OTHERS, NULL);
+ gic_hw_ops->send_SGI(sgi, SGI_TARGET_OTHERS, NULL);
}
void smp_send_state_dump(unsigned int cpu)
/* Set up the per-CPU parts of the GIC for a secondary CPU */
void __cpuinit gic_init_secondary_cpu(void)
{
- spin_lock(&gic.lock);
- gic_cpu_init();
- gic_hyp_init();
- spin_unlock(&gic.lock);
+ gic_hw_ops->secondary_init();
+ /* Clear LR mask for secondary cpus */
+ clear_cpu_lr_mask();
}
/* Shut down the per-CPU GIC interface */
{
ASSERT(!local_irq_is_enabled());
- spin_lock(&gic.lock);
- gic_cpu_disable();
- gic_hyp_disable();
- spin_unlock(&gic.lock);
+ gic_hw_ops->disable_interface();
}
static inline void gic_set_lr(int lr, struct pending_irq *p,
- unsigned int state)
+ unsigned int state)
{
- uint32_t lr_val;
-
ASSERT(!local_irq_is_enabled());
- BUG_ON(lr >= nr_lrs);
- BUG_ON(lr < 0);
- BUG_ON(state & ~(GICH_LR_STATE_MASK<<GICH_LR_STATE_SHIFT));
-
- lr_val = state | (GIC_PRI_TO_GUEST(p->priority) << GICH_LR_PRIORITY_SHIFT) |
- ((p->irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
- if ( p->desc != NULL )
- lr_val |= GICH_LR_HW | (p->desc->irq << GICH_LR_PHYSICAL_SHIFT);
- writel_relaxed(lr_val, GICH + GICH_LR + lr * 4);
+ gic_hw_ops->update_lr(lr, p, state);
set_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
unsigned int priority)
{
int i;
+ unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
ASSERT(spin_is_locked(&v->arch.vgic.lock));
static void gic_update_one_lr(struct vcpu *v, int i)
{
struct pending_irq *p;
- uint32_t lr;
int irq;
+ struct gic_lr lr_val;
ASSERT(spin_is_locked(&v->arch.vgic.lock));
ASSERT(!local_irq_is_enabled());
- lr = readl_relaxed(GICH + GICH_LR + i * 4);
- irq = (lr >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK;
+ gic_hw_ops->read_lr(i, &lr_val);
+ irq = lr_val.virq;
p = irq_to_pending(v, irq);
- if ( lr & GICH_LR_ACTIVE )
+ if ( lr_val.state & GICH_LR_ACTIVE )
{
set_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);
if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status) )
{
if ( p->desc == NULL )
- writel_relaxed(lr | GICH_LR_PENDING, GICH + GICH_LR + i * 4);
+ {
+ lr_val.state |= GICH_LR_PENDING;
+ gic_hw_ops->write_lr(i, &lr_val);
+ }
else
gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into d%dv%d: already active in LR%d\n",
irq, v->domain->domain_id, v->vcpu_id, i);
}
- } else if ( lr & GICH_LR_PENDING ) {
+ }
+ else if ( lr_val.state & GICH_LR_PENDING )
+ {
int q __attribute__ ((unused)) = test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
#ifdef GIC_DEBUG
if ( q )
gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into d%dv%d, when it is already pending in LR%d\n",
irq, v->domain->domain_id, v->vcpu_id, i);
#endif
- } else {
- writel_relaxed(0, GICH + GICH_LR + i * 4);
+ }
+ else
+ {
+ gic_hw_ops->clear_lr(i);
clear_bit(i, &this_cpu(lr_mask));
if ( p->desc != NULL )
{
int i = 0;
unsigned long flags;
+ unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
/* The idle domain has no LRs to be cleared. Since gic_restore_state
* doesn't write any LR registers for the idle domain they could be
static void gic_restore_pending_irqs(struct vcpu *v)
{
- int lr = 0, lrs = nr_lrs;
+ int lr = 0;
struct pending_irq *p, *t, *p_r;
struct list_head *inflight_r;
unsigned long flags;
+ unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
+ int lrs = nr_lrs;
spin_lock_irqsave(&v->arch.vgic.lock, flags);
struct vcpu *v = current;
struct pending_irq *p;
unsigned long flags;
- const unsigned long apr = readl_relaxed(GICH + GICH_APR);
+ const unsigned long apr = gic_hw_ops->read_apr(0);
int mask_priority;
int active_priority;
int rc = 0;
- mask_priority = (readl_relaxed(GICH + GICH_VMCR)
- >> GICH_VMCR_PRIORITY_SHIFT) & GICH_VMCR_PRIORITY_MASK;
+ mask_priority = gic_hw_ops->read_vmcr_priority();
active_priority = find_next_bit(&apr, 32, 0);
spin_lock_irqsave(&v->arch.vgic.lock, flags);
void gic_inject(void)
{
- uint32_t hcr;
ASSERT(!local_irq_is_enabled());
gic_restore_pending_irqs(current);
- hcr = readl_relaxed(GICH + GICH_HCR);
-
if ( !list_empty(¤t->arch.vgic.lr_pending) && lr_all_full() )
- writel_relaxed(hcr | GICH_HCR_UIE, GICH + GICH_HCR);
+ gic_hw_ops->update_hcr_status(GICH_HCR_UIE, 1);
else
- writel_relaxed(hcr & ~GICH_HCR_UIE, GICH + GICH_HCR);
+ gic_hw_ops->update_hcr_status(GICH_HCR_UIE, 0);
}
static void do_sgi(struct cpu_user_regs *regs, enum gic_sgi sgi)
{
/* Lower the priority */
- writel_relaxed(sgi, GICC + GICC_EOIR);
+ struct irq_desc *desc = irq_to_desc(sgi);
+
+ /* Lower the priority */
+ gic_hw_ops->eoi_irq(desc);
switch (sgi)
{
}
/* Deactivate */
- writel_relaxed(sgi, GICC + GICC_DIR);
+ gic_hw_ops->deactivate_irq(desc);
}
/* Accept an interrupt from the GIC and dispatch its handler */
void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
{
- uint32_t intack;
unsigned int irq;
-
do {
- intack = readl_relaxed(GICC + GICC_IAR);
- irq = intack & GICC_IA_IRQ;
+ /* Reading IRQ will ACK it */
+ irq = gic_hw_ops->read_irq();
if ( likely(irq >= 16 && irq < 1021) )
{
int gicv_setup(struct domain *d)
{
- int ret;
-
- /*
- * The hardware domain gets the hardware address.
- * Guests get the virtual platform layout.
- */
- if ( is_hardware_domain(d) )
- {
- d->arch.vgic.dbase = gic.dbase;
- d->arch.vgic.cbase = gic.cbase;
- }
- else
- {
- d->arch.vgic.dbase = GUEST_GICD_BASE;
- d->arch.vgic.cbase = GUEST_GICC_BASE;
- }
-
- d->arch.vgic.nr_lines = 0;
-
- /*
- * Map the gic virtual cpu interface in the gic cpu interface
- * region of the guest.
- *
- * The second page is always mapped at +4K irrespective of the
- * GIC_64K_STRIDE quirk. The DTB passed to the guest reflects this.
- */
- ret = map_mmio_regions(d, d->arch.vgic.cbase,
- d->arch.vgic.cbase + PAGE_SIZE - 1,
- gic.vbase);
- if (ret)
- return ret;
-
- if ( !platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
- ret = map_mmio_regions(d, d->arch.vgic.cbase + PAGE_SIZE,
- d->arch.vgic.cbase + (2 * PAGE_SIZE) - 1,
- gic.vbase + PAGE_SIZE);
- else
- ret = map_mmio_regions(d, d->arch.vgic.cbase + PAGE_SIZE,
- d->arch.vgic.cbase + (2 * PAGE_SIZE) - 1,
- gic.vbase + 16*PAGE_SIZE);
-
- return ret;
-
+ return gic_hw_ops->gicv_setup(d);
}
static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
void gic_dump_info(struct vcpu *v)
{
- int i;
struct pending_irq *p;
printk("GICH_LRs (vcpu %d) mask=%"PRIx64"\n", v->vcpu_id, v->arch.lr_mask);
- if ( v == current )
- {
- for ( i = 0; i < nr_lrs; i++ )
- printk(" HW_LR[%d]=%x\n", i, readl_relaxed(GICH + GICH_LR + i * 4));
- } else {
- for ( i = 0; i < nr_lrs; i++ )
- printk(" VCPU_LR[%d]=%x\n", i, v->arch.gic_lr[i]);
- }
+ gic_hw_ops->dump_state(v);
list_for_each_entry ( p, &v->arch.vgic.inflight_irqs, inflight )
{
{
printk("Pending irq=%d\n", p->irq);
}
-
}
void __cpuinit init_maintenance_interrupt(void)
{
- request_irq(gic.maintenance_irq, 0, maintenance_interrupt,
+ request_irq(gic_hw_ops->info->maintenance_irq, 0, maintenance_interrupt,
"irq-maintenance", NULL);
}
#define GICH_MISR_VGRP1E (1 << 6)
#define GICH_MISR_VGRP1D (1 << 7)
-#define GICH_LR_VIRTUAL_MASK 0x3ff
-#define GICH_LR_VIRTUAL_SHIFT 0
-#define GICH_LR_PHYSICAL_MASK 0x3ff
-#define GICH_LR_PHYSICAL_SHIFT 10
-#define GICH_LR_STATE_MASK 0x3
-#define GICH_LR_STATE_SHIFT 28
-#define GICH_LR_PRIORITY_SHIFT 23
-#define GICH_LR_MAINTENANCE_IRQ (1<<19)
-#define GICH_LR_PENDING (1<<28)
-#define GICH_LR_ACTIVE (1<<29)
-#define GICH_LR_GRP1 (1<<30)
-#define GICH_LR_HW (1<<31)
-#define GICH_LR_CPUID_SHIFT 9
-#define GICH_VTR_NRLRGS 0x3f
-
-#define GICH_VMCR_PRIORITY_MASK 0x1f
-#define GICH_VMCR_PRIORITY_SHIFT 27
-
/*
* The minimum GICC_BPR is required to be in the range 0-3. We set
* GICC_BPR to 0 but we must expect that it might be 3. This means we
#define GIC_PRI_TO_GUEST(pri) (pri >> 3) /* GICH_LR and GICH_VMCR only support
5 bits for guest irq priority */
+#define GICH_LR_PENDING 1
+#define GICH_LR_ACTIVE 2
#ifndef __ASSEMBLY__
#include <xen/device_tree.h>
#define DT_MATCH_GIC DT_MATCH_COMPATIBLE("arm,cortex-a15-gic"), \
DT_MATCH_COMPATIBLE("arm,cortex-a7-gic")
+/*
+ * Decode LR register content.
+ * The LR register format is different for GIC HW version
+ */
+struct gic_lr {
+ /* Physical IRQ */
+ uint32_t pirq;
+ /* Virtual IRQ */
+ uint32_t virq;
+ uint8_t priority;
+ uint8_t state;
+ uint8_t hw_status;
+ uint8_t grp;
+};
+
+enum gic_version {
+ GIC_V2,
+};
+
+extern enum gic_version gic_hw_version(void);
+extern void gicv2_init(void);
+
extern int domain_vgic_init(struct domain *d);
extern void domain_vgic_free(struct domain *d);
unsigned int *out_hwirq, unsigned int *out_type);
void gic_clear_lrs(struct vcpu *v);
+struct gic_info {
+ /* GIC version */
+ enum gic_version hw_version;
+ /* Number of GIC lines supported */
+ unsigned int nr_lines;
+ /* Number of LR registers */
+ uint8_t nr_lrs;
+ /* Maintenance irq number */
+ unsigned int maintenance_irq;
+};
+
+struct gic_hw_operations {
+ /* Hold GIC HW information */
+ const struct gic_info *info;
+ /* Save GIC registers */
+ void (*save_state)(struct vcpu *);
+ /* Restore GIC registers */
+ void (*restore_state)(const struct vcpu *);
+ /* Dump GIC LR register information */
+ void (*dump_state)(const struct vcpu *);
+ /* Map MMIO region of GIC */
+ int (*gicv_setup)(struct domain *);
+
+ /* hw_irq_controller to enable/disable/eoi host irq */
+ hw_irq_controller *gic_host_irq_type;
+
+ /* hw_irq_controller to enable/disable/eoi guest irq */
+ hw_irq_controller *gic_guest_irq_type;
+
+ /* End of Interrupt */
+ void (*eoi_irq)(struct irq_desc *irqd);
+ /* Deactivate/reduce priority of irq */
+ void (*deactivate_irq)(struct irq_desc *irqd);
+ /* Read IRQ id and Ack */
+ unsigned int (*read_irq)(void);
+ /* Set IRQ property */
+ void (*set_irq_properties)(struct irq_desc *desc,
+ const cpumask_t *cpu_mask,
+ unsigned int priority);
+ /* Send SGI */
+ void (*send_SGI)(enum gic_sgi sgi, enum gic_sgi_mode irqmode,
+ const cpumask_t *online_mask);
+ /* Disable CPU physical and virtual interfaces */
+ void (*disable_interface)(void);
+ /* Update LR register with state and priority */
+ void (*update_lr)(int lr, const struct pending_irq *pending_irq,
+ unsigned int state);
+ /* Update HCR status register */
+ void (*update_hcr_status)(uint32_t flag, bool_t set);
+ /* Clear LR register */
+ void (*clear_lr)(int lr);
+ /* Read LR register and populate gic_lr structure */
+ void (*read_lr)(int lr, struct gic_lr *);
+ /* Write LR register from gic_lr structure */
+ void (*write_lr)(int lr, const struct gic_lr *);
+ /* Read VMCR priority */
+ unsigned int (*read_vmcr_priority)(void);
+ /* Read APRn register */
+ unsigned int (*read_apr)(int apr_reg);
+ /* Secondary CPU init */
+ int (*secondary_init)(void);
+};
+
+void register_gic_ops(const struct gic_hw_operations *ops);
+
#endif /* __ASSEMBLY__ */
#endif