* Placeholder for the event channel interrupt. The values will be
* replaced later.
*/
- set_interrupt_ppi(intr, ~0, 0xf, DT_IRQ_TYPE_INVALID);
+ set_interrupt_ppi(intr, ~0, 0xf, IRQ_TYPE_INVALID);
res = fdt_property_interrupts(fdt, &intr, 1);
if ( res )
return res;
irq = timer_get_irq(TIMER_PHYS_SECURE_PPI);
DPRINT(" Secure interrupt %u\n", irq);
- set_interrupt_ppi(intrs[0], irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW);
+ set_interrupt_ppi(intrs[0], irq, 0xf, IRQ_TYPE_LEVEL_LOW);
irq = timer_get_irq(TIMER_PHYS_NONSECURE_PPI);
DPRINT(" Non secure interrupt %u\n", irq);
- set_interrupt_ppi(intrs[1], irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW);
+ set_interrupt_ppi(intrs[1], irq, 0xf, IRQ_TYPE_LEVEL_LOW);
irq = timer_get_irq(TIMER_VIRT_PPI);
DPRINT(" Virt interrupt %u\n", irq);
- set_interrupt_ppi(intrs[2], irq, 0xf, DT_IRQ_TYPE_LEVEL_LOW);
+ set_interrupt_ppi(intrs[2], irq, 0xf, IRQ_TYPE_LEVEL_LOW);
res = fdt_property_interrupts(fdt, intrs, 3);
if ( res )
* TODO: Handle properly the cpumask
*/
set_interrupt_ppi(intr, d->arch.evtchn_irq, 0xf,
- DT_IRQ_TYPE_LEVEL_LOW);
+ IRQ_TYPE_LEVEL_LOW);
res = fdt_setprop_inplace(kinfo->fdt, node, "interrupts",
&intr, sizeof(intr));
if ( res )
unsigned int irq = desc->irq;
unsigned int type = desc->arch.type;
- ASSERT(type != DT_IRQ_TYPE_INVALID);
+ ASSERT(type != IRQ_TYPE_INVALID);
ASSERT(spin_is_locked(&desc->lock));
spin_lock(&gicv2.lock);
/* Set edge / level */
cfg = readl_gicd(GICD_ICFGR + (irq / 16) * 4);
edgebit = 2u << (2 * (irq % 16));
- if ( type & DT_IRQ_TYPE_LEVEL_MASK )
+ if ( type & IRQ_TYPE_LEVEL_MASK )
cfg &= ~edgebit;
- else if ( type & DT_IRQ_TYPE_EDGE_BOTH )
+ else if ( type & IRQ_TYPE_EDGE_BOTH )
cfg |= edgebit;
writel_gicd(cfg, GICD_ICFGR + (irq / 16) * 4);
cfg & edgebit ? "Edge" : "Level",
actual & edgebit ? "Edge" : "Level");
desc->arch.type = actual & edgebit ?
- DT_IRQ_TYPE_EDGE_RISING :
- DT_IRQ_TYPE_LEVEL_HIGH;
+ IRQ_TYPE_EDGE_RISING :
+ IRQ_TYPE_LEVEL_HIGH;
}
/* Set target CPU mask (RAZ/WI on uniprocessor) */
cfg = readl_relaxed(base);
edgebit = 2u << (2 * (irq % 16));
- if ( type & DT_IRQ_TYPE_LEVEL_MASK )
+ if ( type & IRQ_TYPE_LEVEL_MASK )
cfg &= ~edgebit;
- else if ( type & DT_IRQ_TYPE_EDGE_BOTH )
+ else if ( type & IRQ_TYPE_EDGE_BOTH )
cfg |= edgebit;
writel_relaxed(cfg, base);
cfg & edgebit ? "Edge" : "Level",
actual & edgebit ? "Edge" : "Level");
desc->arch.type = actual & edgebit ?
- DT_IRQ_TYPE_EDGE_RISING :
- DT_IRQ_TYPE_LEVEL_HIGH;
+ IRQ_TYPE_EDGE_RISING :
+ IRQ_TYPE_LEVEL_HIGH;
}
affinity = gicv3_mpidr_to_affinity(cpu);
* needs to be called with a valid cpu_mask, ie each cpu in the mask has
* already called gic_cpu_init
* - desc.lock must be held
- * - arch.type must be valid (i.e != DT_IRQ_TYPE_INVALID)
+ * - arch.type must be valid (i.e != IRQ_TYPE_INVALID)
*/
static void gic_set_irq_properties(struct irq_desc *desc,
const cpumask_t *cpu_mask,
*out_hwirq += 16;
if ( out_type )
- *out_type = intspec[2] & DT_IRQ_TYPE_SENSE_MASK;
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
return 0;
}
int __init arch_init_one_irq_desc(struct irq_desc *desc)
{
- desc->arch.type = DT_IRQ_TYPE_INVALID;
+ desc->arch.type = IRQ_TYPE_INVALID;
return 0;
}
spin_lock(&local_irqs_type_lock);
for ( irq = 0; irq < NR_LOCAL_IRQS; irq++ )
- local_irqs_type[irq] = DT_IRQ_TYPE_INVALID;
+ local_irqs_type[irq] = IRQ_TYPE_INVALID;
spin_unlock(&local_irqs_type_lock);
BUG_ON(init_local_irq_data() < 0);
spin_lock_irqsave(&desc->lock, flags);
- if ( desc->arch.type == DT_IRQ_TYPE_INVALID )
+ if ( desc->arch.type == IRQ_TYPE_INVALID )
{
printk(XENLOG_G_ERR "IRQ %u has not been configured\n", irq);
retval = -EIO;
static bool_t irq_validate_new_type(unsigned int curr, unsigned new)
{
- return (curr == DT_IRQ_TYPE_INVALID || curr == new );
+ return (curr == IRQ_TYPE_INVALID || curr == new );
}
int irq_set_spi_type(unsigned int spi, unsigned int type)
* The interrupt controller driver will update desc->arch.type with
* the actual type which ended up configured in the hardware.
*/
- if ( desc->arch.type & DT_IRQ_TYPE_LEVEL_MASK )
+ if ( desc->arch.type & IRQ_TYPE_LEVEL_MASK )
return;
printk(XENLOG_WARNING
/**
* IRQ line type.
*
- * DT_IRQ_TYPE_NONE - default, unspecified type
- * DT_IRQ_TYPE_EDGE_RISING - rising edge triggered
- * DT_IRQ_TYPE_EDGE_FALLING - falling edge triggered
- * DT_IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered
- * DT_IRQ_TYPE_LEVEL_HIGH - high level triggered
- * DT_IRQ_TYPE_LEVEL_LOW - low level triggered
- * DT_IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits
- * DT_IRQ_TYPE_SENSE_MASK - Mask for all the above bits
- * DT_IRQ_TYPE_INVALID - Use to initialize the type
- */
-#define DT_IRQ_TYPE_NONE 0x00000000
-#define DT_IRQ_TYPE_EDGE_RISING 0x00000001
-#define DT_IRQ_TYPE_EDGE_FALLING 0x00000002
-#define DT_IRQ_TYPE_EDGE_BOTH \
- (DT_IRQ_TYPE_EDGE_FALLING | DT_IRQ_TYPE_EDGE_RISING)
-#define DT_IRQ_TYPE_LEVEL_HIGH 0x00000004
-#define DT_IRQ_TYPE_LEVEL_LOW 0x00000008
-#define DT_IRQ_TYPE_LEVEL_MASK \
- (DT_IRQ_TYPE_LEVEL_LOW | DT_IRQ_TYPE_LEVEL_HIGH)
-#define DT_IRQ_TYPE_SENSE_MASK 0x0000000f
-
-#define DT_IRQ_TYPE_INVALID 0x00000010
+ * IRQ_TYPE_NONE - default, unspecified type
+ * IRQ_TYPE_EDGE_RISING - rising edge triggered
+ * IRQ_TYPE_EDGE_FALLING - falling edge triggered
+ * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered
+ * IRQ_TYPE_LEVEL_HIGH - high level triggered
+ * IRQ_TYPE_LEVEL_LOW - low level triggered
+ * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits
+ * IRQ_TYPE_SENSE_MASK - Mask for all the above bits
+ * IRQ_TYPE_INVALID - Use to initialize the type
+ */
+#define IRQ_TYPE_NONE 0x00000000
+#define IRQ_TYPE_EDGE_RISING 0x00000001
+#define IRQ_TYPE_EDGE_FALLING 0x00000002
+#define IRQ_TYPE_EDGE_BOTH \
+ (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)
+#define IRQ_TYPE_LEVEL_HIGH 0x00000004
+#define IRQ_TYPE_LEVEL_LOW 0x00000008
+#define IRQ_TYPE_LEVEL_MASK \
+ (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)
+#define IRQ_TYPE_SENSE_MASK 0x0000000f
+
+#define IRQ_TYPE_INVALID 0x00000010
/**
* dt_irq - describe an IRQ in the device tree
* @irq: IRQ number
- * @type: IRQ type (see DT_IRQ_TYPE_*)
+ * @type: IRQ type (see IRQ_TYPE_*)
*
* This structure is returned when an interrupt is mapped.
*/
unsigned int type;
};
-/* If type == DT_IRQ_TYPE_NONE, assume we use level triggered */
+/* If type == IRQ_TYPE_NONE, assume we use level triggered */
static inline bool_t dt_irq_is_level_triggered(const struct dt_irq *irq)
{
unsigned int type = irq->type;
- return (type & DT_IRQ_TYPE_LEVEL_MASK) || (type == DT_IRQ_TYPE_NONE);
+ return (type & IRQ_TYPE_LEVEL_MASK) || (type == IRQ_TYPE_NONE);
}
/**