share_xen_page_with_guest(
virt_to_page(d->shared_info), d, XENSHARE_writable);
+ if ( (rc = domain_io_init(d)) != 0 )
+ goto fail;
+
if ( (rc = p2m_alloc_table(d)) != 0 )
goto fail;
/*
- * xen/arch/arm/io.h
+ * xen/arch/arm/io.c
*
* ARM I/O handlers
*
#include <xen/config.h>
#include <xen/lib.h>
+#include <xen/spinlock.h>
+#include <xen/sched.h>
#include <asm/current.h>
#include <asm/mmio.h>
-static const struct mmio_handler *const mmio_handlers[] =
-{
- &vgic_distr_mmio_handler,
- &vuart_mmio_handler,
-};
-#define MMIO_HANDLER_NR ARRAY_SIZE(mmio_handlers)
-
int handle_mmio(mmio_info_t *info)
{
struct vcpu *v = current;
int i;
+ const struct mmio_handler *mmio_handler;
+ const struct io_handler *io_handlers = &v->domain->arch.io_handlers;
- for ( i = 0; i < MMIO_HANDLER_NR; i++ )
- if ( mmio_handlers[i]->check_handler(v, info->gpa) )
+ for ( i = 0; i < io_handlers->num_entries; i++ )
+ {
+ mmio_handler = &io_handlers->mmio_handlers[i];
+
+ if ( (info->gpa >= mmio_handler->addr) &&
+ (info->gpa < (mmio_handler->addr + mmio_handler->size)) )
+ {
return info->dabt.write ?
- mmio_handlers[i]->write_handler(v, info) :
- mmio_handlers[i]->read_handler(v, info);
+ mmio_handler->mmio_handler_ops->write_handler(v, info) :
+ mmio_handler->mmio_handler_ops->read_handler(v, info);
+ }
+ }
return 0;
}
+
+void register_mmio_handler(struct domain *d,
+ const struct mmio_handler_ops *handle,
+ paddr_t addr, paddr_t size)
+{
+ struct io_handler *handler = &d->arch.io_handlers;
+
+ BUG_ON(handler->num_entries >= MAX_IO_HANDLER);
+
+ spin_lock(&handler->lock);
+
+ handler->mmio_handlers[handler->num_entries].mmio_handler_ops = handle;
+ handler->mmio_handlers[handler->num_entries].addr = addr;
+ handler->mmio_handlers[handler->num_entries].size = size;
+ dsb(ish);
+ handler->num_entries++;
+
+ spin_unlock(&handler->lock);
+}
+
+int domain_io_init(struct domain *d)
+{
+ spin_lock_init(&d->arch.io_handlers.lock);
+ d->arch.io_handlers.num_entries = 0;
+
+ return 0;
+}
+
/*
* Local variables:
* mode: C
/* Number of ranks of interrupt registers for a domain */
#define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_lines+31)/32)
+static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info);
+static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info);
+
/*
* Rank containing GICD_<FOO><n> for GICD_<FOO> with
* <b>-bits-per-interrupt
return vgic_rank_offset(v, 8, irq >> 2);
}
+static const struct mmio_handler_ops vgic_distr_mmio_handler = {
+ .read_handler = vgic_distr_mmio_read,
+ .write_handler = vgic_distr_mmio_write,
+};
+
int domain_vgic_init(struct domain *d)
{
int i;
}
for (i=0; i<DOMAIN_NR_RANKS(d); i++)
spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
+
+ /*
+ * We rely on gicv_setup() to initialize dbase(vGIC distributor base)
+ */
+ register_mmio_handler(d, &vgic_distr_mmio_handler,
+ d->arch.vgic.dbase, PAGE_SIZE);
+
return 0;
}
return 1;
}
-static int vgic_distr_mmio_check(struct vcpu *v, paddr_t addr)
-{
- struct domain *d = v->domain;
-
- return (addr >= (d->arch.vgic.dbase)) && (addr < (d->arch.vgic.dbase + PAGE_SIZE));
-}
-
-const struct mmio_handler vgic_distr_mmio_handler = {
- .check_handler = vgic_distr_mmio_check,
- .read_handler = vgic_distr_mmio_read,
- .write_handler = vgic_distr_mmio_write,
-};
-
struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq)
{
struct pending_irq *n;
#define domain_has_vuart(d) ((d)->arch.vuart.info != NULL)
+static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info);
+static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info);
+
+static const struct mmio_handler_ops vuart_mmio_handler = {
+ .read_handler = vuart_mmio_read,
+ .write_handler = vuart_mmio_write,
+};
+
int domain_vuart_init(struct domain *d)
{
ASSERT( is_hardware_domain(d) );
if ( !d->arch.vuart.buf )
return -ENOMEM;
+ register_mmio_handler(d, &vuart_mmio_handler,
+ d->arch.vuart.info->base_addr,
+ d->arch.vuart.info->size);
+
return 0;
}
spin_unlock(&uart->lock);
}
-static int vuart_mmio_check(struct vcpu *v, paddr_t addr)
-{
- const struct vuart_info *info = v->domain->arch.vuart.info;
-
- return (domain_has_vuart(v->domain) && addr >= info->base_addr &&
- addr <= (info->base_addr + info->size));
-}
-
static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info)
{
struct domain *d = v->domain;
return 1;
}
-const struct mmio_handler vuart_mmio_handler = {
- .check_handler = vuart_mmio_check,
- .read_handler = vuart_mmio_read,
- .write_handler = vuart_mmio_write,
-};
-
/*
* Local variables:
* mode: C
#include <asm/page.h>
#include <asm/p2m.h>
#include <asm/vfp.h>
+#include <asm/mmio.h>
#include <public/hvm/params.h>
#include <xen/serial.h>
#include <xen/hvm/iommu.h>
struct hvm_domain hvm_domain;
xen_pfn_t *grant_table_gpfn;
+ struct io_handler io_handlers;
/* Continuable domain_relinquish_resources(). */
enum {
RELMEM_not_started,
#include <asm/processor.h>
#include <asm/regs.h>
+#define MAX_IO_HANDLER 16
+
typedef struct
{
struct hsr_dabt dabt;
typedef int (*mmio_write_t)(struct vcpu *v, mmio_info_t *info);
typedef int (*mmio_check_t)(struct vcpu *v, paddr_t addr);
-struct mmio_handler {
- mmio_check_t check_handler;
+struct mmio_handler_ops {
mmio_read_t read_handler;
mmio_write_t write_handler;
};
-extern const struct mmio_handler vgic_distr_mmio_handler;
-extern const struct mmio_handler vuart_mmio_handler;
+struct mmio_handler {
+ paddr_t addr;
+ paddr_t size;
+ const struct mmio_handler_ops *mmio_handler_ops;
+};
+
+struct io_handler {
+ int num_entries;
+ spinlock_t lock;
+ struct mmio_handler mmio_handlers[MAX_IO_HANDLER];
+};
extern int handle_mmio(mmio_info_t *info);
+void register_mmio_handler(struct domain *d,
+ const struct mmio_handler_ops *handle,
+ paddr_t addr, paddr_t size);
+int domain_io_init(struct domain *d);
#endif /* __ASM_ARM_MMIO_H__ */