/* SPDX-License-Identifier: BSD-3-Clause */
-/*
- * Authors: Costin Lupu <costin.lupu@cs.pub.ro>
- *
- * Copyright (c) 2018, NEC Europe Ltd., NEC Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+/* Copyright (c) 2023, Unikraft GmbH and The Unikraft Authors.
+ * Licensed under the BSD-3-Clause License (the "License").
+ * You may not use this file except in compliance with the License.
*/
#ifndef __UK_INTCTLR_H__
#define __UK_INTCTLR_H__
-void intctrl_init(void);
-void intctrl_clear_irq(unsigned int irq);
-void intctrl_mask_irq(unsigned int irq);
-void intctrl_ack_irq(unsigned int irq);
-void intctrl_send_ipi(__u8 sgintid, __u32 cpuid);
+#ifdef __cplusplus
+export "C" {
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <uk/alloc.h>
+#include <uk/asm/irq.h>
+#include <uk/essentials.h>
+#include <uk/plat/lcpu.h>
+
+/**
+ * This event is raised before the platform code handles an IRQ. The normal
+ * IRQ handling will continue or stop according to the returned `UK_EVENT_*`
+ * value.
+ * Note: this event is usually raised in an interrupt context.
+ */
+#define UK_INTCTLR_EVENT_IRQ uk_intctlr_event_irq
+
+/** The event payload for the #UK_INTCTLR_EVENT_IRQ event */
+struct uk_intctlr_event_irq_data {
+ /** The registers of the interrupted code */
+ struct __regs *regs;
+ /** The platform specific interrupt vector number */
+ unsigned long irq;
+};
+
+enum uk_intctlr_irq_trigger {
+ UK_INTCTLR_IRQ_TRIGGER_NONE, /* interpreted as "do not change" */
+ UK_INTCTLR_IRQ_TRIGGER_EDGE,
+ UK_INTCTLR_IRQ_TRIGGER_LEVEL,
+};
+
+/** IRQ descriptor */
+struct uk_intctlr_irq {
+ unsigned int id;
+ unsigned int trigger;
+};
+
+/**
+ * Interrupt controller driver ops
+ *
+ * These must be implemented by the interrupt controller
+ */
+struct uk_intctlr_driver_ops {
+ int (*configure_irq)(struct uk_intctlr_irq *irq);
+ int (*fdt_xlat)(const void *fdt, int nodeoffset, __u32 index,
+ struct uk_intctlr_irq *irq);
+ void (*mask_irq)(unsigned int irq);
+ void (*unmask_irq)(unsigned int irq);
+};
+
+/** Interrupt controller descriptor */
+struct uk_intctlr_desc {
+ char *name;
+ struct uk_intctlr_driver_ops *ops;
+};
+
+/** Interrupt handler function */
+typedef int (*uk_intctlr_irq_handler_func_t)(void *);
+
+/**
+ * Probe the interrupt controller
+ *
+ * This function must be implemented by the driver
+ *
+ * @return zero on success or negative value on error
+ */
+int uk_intctlr_probe(void);
+
+/**
+ * Handle an interrupt
+ *
+ * This function provides a unified interrupt handling implementation.
+ * Must be called by the driver's interrupt handling routine.
+ *
+ * @param regs Register context at the time the interrupt was raised
+ * @param irq Interrupt to handle
+ * @return zero on success or negative value on error
+ */
+void uk_intctlr_irq_handle(struct __regs *regs, unsigned int irq);
+
+/**
+ * Configure an interrupt
+ *
+ * @param irq Interrupt configuration
+ * @return zero on success or negative value on error
+ */
+int uk_intctlr_irq_configure(struct uk_intctlr_irq *irq);
+
+/**
+ * Register interrupt controller driver with the uk_intctlr subsystem
+ *
+ * This function must be called by the driver during probe
+ *
+ * @param intctlr populated interrupt controller descriptor
+ * @return zero on success or negative value on error
+ */
+int uk_intctlr_register(struct uk_intctlr_desc *intctlr);
+
+/**
+ * Initialize the uk_intctlr subsystem
+ *
+ * Must be called after probing the device via uk_intctlr_probe
+ *
+ * @param alloc The allocator to use for internal allocations
+ * @return zero on success, negative value on failure
+ */
+int uk_intctlr_init(struct uk_alloc *alloc);
+
+/**
+ * Register an interrupt handler
+ *
+ * @param irq Interrupt to register handler for
+ * @param handler Handler function
+ * @param arg Caller data to be passed to the handler
+ */
+int uk_intctlr_irq_register(unsigned int irq,
+ uk_intctlr_irq_handler_func_t handler,
+ void *arg);
+
+/**
+ * Unregister a previously registered interrupt handler
+ *
+ * @param irq Interrupt to register handler for
+ * @param handler Handler function
+ */
+int uk_intctlr_irq_unregister(unsigned int irq,
+ uk_intctlr_irq_handler_func_t handler);
+
+/**
+ * Mask an interrupt
+ *
+ * @param irq Interrupt to mask
+ */
+void uk_intctlr_irq_mask(unsigned int irq);
+
+/**
+ * Unmask an interrupt
+ *
+ * @param irq Interrupt to unmask
+ */
+void uk_intctlr_irq_unmask(unsigned int irq);
+
+/**
+ * Allocate IRQs from available pool
+ *
+ * @param irqs pointer to array of irqs
+ * @param sz number of array elements
+ * @return zero on success, or negative value on error
+ */
+int uk_intctlr_irq_alloc(unsigned int *irqs, __sz count);
+
+/**
+ * Free previously allocated IRQs
+ *
+ * @param irqs pointer to array of irqs
+ * @param sz number of array elements
+ * @return zero on success, or negative value on error
+ */
+int uk_intctlr_irq_free(unsigned int *irqs, __sz count);
+
+/**
+ * Translate from `interrupts` fdt node to IRQ descriptor
+ *
+ * This function is only available for devices that are discoverable
+ * via fdt
+ *
+ * @param fdt pointer to the device tree blob
+ * @param nodeoffset offset of `interrupts` node to parse in the fdt
+ * @param index the index of the interrupt to retrieve within the node
+ * @param irq interrupt descriptor to populate
+ * @return zero on success or libfdt error on failure
+ */
+int uk_intctlr_irq_fdt_xlat(const void *fdt, int nodeoffset, __u32 index,
+ struct uk_intctlr_irq *irq);
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __cplusplus
+}
+#endif
#endif /* __UK_INTCTLR_H__ */
#include <uk/alloc.h>
#include <uk/plat/lcpu.h>
#include <uk/plat/time.h>
-#include <uk/plat/irq.h>
-#include <uk/plat/common/irq.h>
#include <uk/intctlr.h>
#include <uk/assert.h>
#include <uk/event.h>
#include <uk/print.h>
#include <errno.h>
#include <uk/bitops.h>
-#ifdef CONFIG_UKPLAT_ISR_ECTX_ASSERTIONS
+#if CONFIG_LIBUKINTCTLR_ISR_ECTX_ASSERTIONS
#include <uk/arch/ctx.h>
-#endif
+#endif /* CONFIG_LIBUKINTCTLR_ISR_ECTX_ASSERTIONS */
-UK_EVENT(UKPLAT_EVENT_IRQ);
+#define MAX_HANDLERS_PER_IRQ CONFIG_LIBUKINTCTLR_MAX_HANDLERS_PER_IRQ
-UK_TRACEPOINT(trace_plat_kvm_unhandled_irq, "Unhandled irq=%lu\n",
+struct uk_intctlr_desc *uk_intctlr;
+
+UK_EVENT(UK_INTCTLR_EVENT_IRQ);
+
+UK_TRACEPOINT(trace_uk_intctlr_unhandled_irq, "Unhandled irq=%lu\n",
unsigned long);
/* IRQ handlers declarations */
struct irq_handler {
- irq_handler_func_t func;
+ uk_intctlr_irq_handler_func_t func;
void *arg;
};
-static struct irq_handler irq_handlers[__MAX_IRQ]
- [CONFIG_KVM_MAX_IRQ_HANDLER_ENTRIES];
+static struct irq_handler irq_handlers[__MAX_IRQ][MAX_HANDLERS_PER_IRQ];
static inline struct irq_handler *allocate_handler(unsigned long irq)
{
UK_ASSERT(irq < __MAX_IRQ);
- for (int i = 0; i < CONFIG_KVM_MAX_IRQ_HANDLER_ENTRIES; i++)
+ for (int i = 0; i < MAX_HANDLERS_PER_IRQ; i++)
if (irq_handlers[irq][i].func == NULL)
return &irq_handlers[irq][i];
return NULL;
}
-int ukplat_irq_register(unsigned long irq, irq_handler_func_t func, void *arg)
+int uk_intctlr_irq_register(unsigned int irq,
+ uk_intctlr_irq_handler_func_t func, void *arg)
{
struct irq_handler *h;
unsigned long flags;
ukplat_lcpu_restore_irqf(flags);
- intctrl_clear_irq(irq);
+ uk_intctlr->ops->unmask_irq(irq);
+
return 0;
}
*/
extern unsigned long sched_have_pending_events;
-void _ukplat_irq_handle(struct __regs *regs, unsigned long irq)
+void uk_intctlr_irq_handle(struct __regs *regs, unsigned int irq)
{
struct irq_handler *h;
int i;
int rc;
- struct ukplat_event_irq_data ctx;
-#ifdef CONFIG_UKPLAT_ISR_ECTX_ASSERTIONS
+ struct uk_intctlr_event_irq_data ctx;
+#if CONFIG_LIBUKINTCTLR_ISR_ECTX_ASSERTIONS
__sz ectx_align = ukarch_ectx_align();
__u8 ectxbuf[ukarch_ectx_size() + ectx_align];
struct ukarch_ectx *ectx = (struct ukarch_ectx *)
ALIGN_UP((__uptr) ectxbuf, ectx_align);
ukarch_ectx_init(ectx);
-#endif
+#endif /* CONFIG_LIBUKINTCTLR_ISR_ECTX_ASSERTIONS */
UK_ASSERT(irq < __MAX_IRQ);
ctx.regs = regs;
ctx.irq = irq;
- rc = uk_raise_event(UKPLAT_EVENT_IRQ, &ctx);
+ rc = uk_raise_event(UK_INTCTLR_EVENT_IRQ, &ctx);
if (unlikely(rc < 0))
UK_CRASH("IRQ event handler returned error: %d\n", rc);
if (rc == UK_EVENT_HANDLED) {
goto exit_ack;
}
- for (i = 0; i < CONFIG_KVM_MAX_IRQ_HANDLER_ENTRIES; i++) {
+ for (i = 0; i < MAX_HANDLERS_PER_IRQ; i++) {
if (irq_handlers[irq][i].func == NULL)
break;
h = &irq_handlers[irq][i];
* devices, and (2) to minimize impact on drivers that share one
* interrupt line that would then stay disabled.
*/
- trace_plat_kvm_unhandled_irq(irq);
+ trace_uk_intctlr_unhandled_irq(irq);
exit_ack:
-#ifdef CONFIG_UKPLAT_ISR_ECTX_ASSERTIONS
+#if CONFIG_LIBUKINTCTLR_ISR_ECTX_ASSERTIONS
ukarch_ectx_assert_equal(ectx);
-#endif
+#endif /* CONFIG_LIBUKINTCTLR_ISR_ECTX_ASSERTIONS */
intctrl_ack_irq(irq);
+
+ return;
+}
+
+void uk_intctlr_irq_mask(unsigned int irq)
+{
+ UK_ASSERT(uk_intctlr && uk_intctlr->ops);
+
+ return uk_intctlr->ops->mask_irq(irq);
+}
+
+void uk_intctlr_irq_unmask(unsigned int irq)
+{
+ UK_ASSERT(uk_intctlr && uk_intctlr->ops);
+
+ return uk_intctlr->ops->unmask_irq(irq);
+}
+
+int uk_intctlr_irq_configure(struct uk_intctlr_irq *irq)
+{
+ UK_ASSERT(uk_intctlr && uk_intctlr->ops);
+ UK_ASSERT(irq);
+
+ return uk_intctlr->ops->configure_irq(irq);
+}
+
+int uk_intctlr_irq_fdt_xlat(const void *fdt, int nodeoffset, __u32 index,
+ struct uk_intctlr_irq *irq)
+{
+ UK_ASSERT(uk_intctlr && uk_intctlr->ops);
+ UK_ASSERT(fdt);
+ UK_ASSERT(irq);
+
+ /* We're using an assertion here instead of returning -ENOTSUP
+ * because the implementation returns libfdt error codes.
+ */
+ UK_ASSERT(uk_intctlr->ops->fdt_xlat);
+
+ return uk_intctlr->ops->fdt_xlat(fdt, nodeoffset, index, irq);
}
-int ukplat_irq_init(struct uk_alloc *a __unused)
+int uk_intctlr_init(struct uk_alloc *a __unused)
{
+ UK_ASSERT(uk_intctlr);
UK_ASSERT(ukplat_lcpu_irqs_disabled());
/* Nothing for now */
return 0;
}
+
+int uk_intctlr_register(struct uk_intctlr_desc *intctlr)
+{
+ UK_ASSERT(intctlr);
+
+ uk_intctlr = intctlr;
+
+ return 0;
+}