#include <xen/event.h>
#include <xen/iommu.h>
-static const struct hvm_mmio_ops *const
-hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
+static bool_t hvm_mmio_accept(const struct hvm_io_handler *handler,
+ const ioreq_t *p)
{
- &hpet_mmio_ops,
- &vlapic_mmio_ops,
- &vioapic_mmio_ops,
- &msixtbl_mmio_ops,
- &iommu_mmio_ops
-};
+ BUG_ON(handler->type != IOREQ_TYPE_COPY);
+
+ return handler->mmio.ops->check(current, p->addr);
+}
-static int hvm_mmio_access(struct vcpu *v,
- ioreq_t *p,
- hvm_mmio_read_t read,
- hvm_mmio_write_t write)
+static int hvm_mmio_read(const struct hvm_io_handler *handler,
+ uint64_t addr, uint32_t size, uint64_t *data)
{
- struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
- unsigned long data;
- int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
+ BUG_ON(handler->type != IOREQ_TYPE_COPY);
- if ( !p->data_is_ptr )
- {
- if ( p->dir == IOREQ_READ )
- {
- if ( vio->mmio_retrying )
- {
- if ( vio->mmio_large_read_bytes != p->size )
- return X86EMUL_UNHANDLEABLE;
- memcpy(&data, vio->mmio_large_read, p->size);
- vio->mmio_large_read_bytes = 0;
- vio->mmio_retrying = 0;
- }
- else
- rc = read(v, p->addr, p->size, &data);
- p->data = data;
- }
- else /* p->dir == IOREQ_WRITE */
- rc = write(v, p->addr, p->size, p->data);
- return rc;
- }
+ return handler->mmio.ops->read(current, addr, size, data);
+}
- if ( p->dir == IOREQ_READ )
- {
- for ( i = 0; i < p->count; i++ )
- {
- if ( vio->mmio_retrying )
- {
- if ( vio->mmio_large_read_bytes != p->size )
- return X86EMUL_UNHANDLEABLE;
- memcpy(&data, vio->mmio_large_read, p->size);
- vio->mmio_large_read_bytes = 0;
- vio->mmio_retrying = 0;
- }
- else
- {
- rc = read(v, p->addr + step * i, p->size, &data);
- if ( rc != X86EMUL_OKAY )
- break;
- }
- switch ( hvm_copy_to_guest_phys(p->data + step * i,
- &data, p->size) )
- {
- case HVMCOPY_okay:
- break;
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
- rc = X86EMUL_RETRY;
- break;
- case HVMCOPY_bad_gfn_to_mfn:
- /* Drop the write as real hardware would. */
- continue;
- case HVMCOPY_bad_gva_to_gfn:
- ASSERT(0);
- /* fall through */
- default:
- rc = X86EMUL_UNHANDLEABLE;
- break;
- }
- if ( rc != X86EMUL_OKAY)
- break;
- }
+static int hvm_mmio_write(const struct hvm_io_handler *handler,
+ uint64_t addr, uint32_t size, uint64_t data)
+{
+ BUG_ON(handler->type != IOREQ_TYPE_COPY);
- if ( rc == X86EMUL_RETRY )
- {
- vio->mmio_retry = 1;
- vio->mmio_large_read_bytes = p->size;
- memcpy(vio->mmio_large_read, &data, p->size);
- }
- }
- else
- {
- for ( i = 0; i < p->count; i++ )
- {
- switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
- p->size) )
- {
- case HVMCOPY_okay:
- break;
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
- rc = X86EMUL_RETRY;
- break;
- case HVMCOPY_bad_gfn_to_mfn:
- data = ~0;
- break;
- case HVMCOPY_bad_gva_to_gfn:
- ASSERT(0);
- /* fall through */
- default:
- rc = X86EMUL_UNHANDLEABLE;
- break;
- }
- if ( rc != X86EMUL_OKAY )
- break;
- rc = write(v, p->addr + step * i, p->size, data);
- if ( rc != X86EMUL_OKAY )
- break;
- }
+ return handler->mmio.ops->write(current, addr, size, data);
+}
- if ( rc == X86EMUL_RETRY )
- vio->mmio_retry = 1;
- }
+static const struct hvm_io_ops mmio_ops = {
+ .accept = hvm_mmio_accept,
+ .read = hvm_mmio_read,
+ .write = hvm_mmio_write
+};
- if ( i != 0 )
- {
- p->count = i;
- rc = X86EMUL_OKAY;
- }
+static bool_t hvm_portio_accept(const struct hvm_io_handler *handler,
+ const ioreq_t *p)
+{
+ unsigned int start = handler->portio.port;
+ unsigned int end = start + handler->portio.size;
- return rc;
+ BUG_ON(handler->type != IOREQ_TYPE_PIO);
+
+ return (p->addr >= start) && ((p->addr + p->size) <= end);
}
-bool_t hvm_mmio_internal(paddr_t gpa)
+static int hvm_portio_read(const struct hvm_io_handler *handler,
+ uint64_t addr, uint32_t size, uint64_t *data)
{
- struct vcpu *curr = current;
- unsigned int i;
+ uint32_t val = ~0u;
+ int rc;
+
+ BUG_ON(handler->type != IOREQ_TYPE_PIO);
- for ( i = 0; i < HVM_MMIO_HANDLER_NR; ++i )
- if ( hvm_mmio_handlers[i]->check(curr, gpa) )
- return 1;
+ rc = handler->portio.action(IOREQ_READ, addr, size, &val);
+ *data = val;
- return 0;
+ return rc;
}
-int hvm_mmio_intercept(ioreq_t *p)
+static int hvm_portio_write(const struct hvm_io_handler *handler,
+ uint64_t addr, uint32_t size, uint64_t data)
{
- struct vcpu *v = current;
- int i;
-
- for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ )
- {
- hvm_mmio_check_t check = hvm_mmio_handlers[i]->check;
+ uint32_t val = data;
- if ( check(v, p->addr) )
- {
- if ( unlikely(p->count > 1) &&
- !check(v, unlikely(p->df)
- ? p->addr - (p->count - 1L) * p->size
- : p->addr + (p->count - 1L) * p->size) )
- p->count = 1;
-
- return hvm_mmio_access(
- v, p,
- hvm_mmio_handlers[i]->read,
- hvm_mmio_handlers[i]->write);
- }
- }
+ BUG_ON(handler->type != IOREQ_TYPE_PIO);
- return X86EMUL_UNHANDLEABLE;
+ return handler->portio.action(IOREQ_WRITE, addr, size, &val);
}
-static int process_portio_intercept(portio_action_t action, ioreq_t *p)
+static const struct hvm_io_ops portio_ops = {
+ .accept = hvm_portio_accept,
+ .read = hvm_portio_read,
+ .write = hvm_portio_write
+};
+
+int hvm_process_io_intercept(const struct hvm_io_handler *handler,
+ ioreq_t *p)
{
struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ const struct hvm_io_ops *ops = (p->type == IOREQ_TYPE_COPY) ?
+ &mmio_ops : &portio_ops;
int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
- uint32_t data;
-
- if ( !p->data_is_ptr )
- {
- if ( p->dir == IOREQ_READ )
- {
- if ( vio->mmio_retrying )
- {
- if ( vio->mmio_large_read_bytes != p->size )
- return X86EMUL_UNHANDLEABLE;
- memcpy(&data, vio->mmio_large_read, p->size);
- vio->mmio_large_read_bytes = 0;
- vio->mmio_retrying = 0;
- }
- else
- rc = action(IOREQ_READ, p->addr, p->size, &data);
- p->data = data;
- }
- else
- {
- data = p->data;
- rc = action(IOREQ_WRITE, p->addr, p->size, &data);
- }
- return rc;
- }
+ uint64_t data;
+ uint64_t addr;
if ( p->dir == IOREQ_READ )
{
}
else
{
- rc = action(IOREQ_READ, p->addr, p->size, &data);
+ addr = (p->type == IOREQ_TYPE_COPY) ?
+ p->addr + step * i :
+ p->addr;
+ rc = ops->read(handler, addr, p->size, &data);
if ( rc != X86EMUL_OKAY )
break;
}
- switch ( hvm_copy_to_guest_phys(p->data + step * i,
- &data, p->size) )
+
+ if ( p->data_is_ptr )
{
- case HVMCOPY_okay:
- break;
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
- rc = X86EMUL_RETRY;
- break;
- case HVMCOPY_bad_gfn_to_mfn:
- /* Drop the write as real hardware would. */
- continue;
- case HVMCOPY_bad_gva_to_gfn:
- ASSERT(0);
- /* fall through */
- default:
- rc = X86EMUL_UNHANDLEABLE;
- break;
+ switch ( hvm_copy_to_guest_phys(p->data + step * i,
+ &data, p->size) )
+ {
+ case HVMCOPY_okay:
+ break;
+ case HVMCOPY_gfn_paged_out:
+ case HVMCOPY_gfn_shared:
+ rc = X86EMUL_RETRY;
+ break;
+ case HVMCOPY_bad_gfn_to_mfn:
+ /* Drop the write as real hardware would. */
+ continue;
+ case HVMCOPY_bad_gva_to_gfn:
+ ASSERT_UNREACHABLE();
+ /* fall through */
+ default:
+ rc = X86EMUL_UNHANDLEABLE;
+ break;
+ }
+ if ( rc != X86EMUL_OKAY )
+ break;
}
- if ( rc != X86EMUL_OKAY)
- break;
+ else
+ p->data = data;
}
if ( rc == X86EMUL_RETRY )
{
for ( i = 0; i < p->count; i++ )
{
- data = 0;
- switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
- p->size) )
+ if ( p->data_is_ptr )
{
- case HVMCOPY_okay:
- break;
- case HVMCOPY_gfn_paged_out:
- case HVMCOPY_gfn_shared:
- rc = X86EMUL_RETRY;
- break;
- case HVMCOPY_bad_gfn_to_mfn:
- data = ~0;
- break;
- case HVMCOPY_bad_gva_to_gfn:
- ASSERT(0);
- /* fall through */
- default:
- rc = X86EMUL_UNHANDLEABLE;
- break;
+ switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
+ p->size) )
+ {
+ case HVMCOPY_okay:
+ break;
+ case HVMCOPY_gfn_paged_out:
+ case HVMCOPY_gfn_shared:
+ rc = X86EMUL_RETRY;
+ break;
+ case HVMCOPY_bad_gfn_to_mfn:
+ data = ~0;
+ break;
+ case HVMCOPY_bad_gva_to_gfn:
+ ASSERT_UNREACHABLE();
+ /* fall through */
+ default:
+ rc = X86EMUL_UNHANDLEABLE;
+ break;
+ }
+ if ( rc != X86EMUL_OKAY )
+ break;
}
- if ( rc != X86EMUL_OKAY )
- break;
- rc = action(IOREQ_WRITE, p->addr, p->size, &data);
+ else
+ data = p->data;
+
+ addr = (p->type == IOREQ_TYPE_COPY) ?
+ p->addr + step * i :
+ p->addr;
+ rc = ops->write(handler, addr, p->size, data);
if ( rc != X86EMUL_OKAY )
break;
}
return rc;
}
-/*
- * Check if the request is handled inside xen
- * return value: 0 --not handled; 1 --handled
- */
-int hvm_io_intercept(ioreq_t *p, int type)
+const struct hvm_io_handler *hvm_find_io_handler(ioreq_t *p)
+{
+ struct domain *curr_d = current->domain;
+ const struct hvm_io_ops *ops = (p->type == IOREQ_TYPE_COPY) ?
+ &mmio_ops : &portio_ops;
+ unsigned int i;
+
+ BUG_ON((p->type != IOREQ_TYPE_PIO) &&
+ (p->type != IOREQ_TYPE_COPY));
+
+ for ( i = 0; i < curr_d->arch.hvm_domain.io_handler_count; i++ )
+ {
+ const struct hvm_io_handler *handler =
+ &curr_d->arch.hvm_domain.io_handler[i];
+
+ if ( handler->type != p->type )
+ continue;
+
+ if ( ops->accept(handler, p) )
+ return handler;
+ }
+
+ return NULL;
+}
+
+int hvm_io_intercept(ioreq_t *p)
{
- struct vcpu *v = current;
- struct hvm_io_handler *handler = v->domain->arch.hvm_domain.io_handler;
- int i;
- unsigned long addr, size;
+ const struct hvm_io_handler *handler;
- if ( type == HVM_PORTIO )
+ if ( p->type == IOREQ_TYPE_PIO )
{
int rc = dpci_ioport_intercept(p);
if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) )
return rc;
}
-
- for ( i = 0; i < handler->num_slot; i++ )
+ else if ( p->type == IOREQ_TYPE_COPY )
{
- if ( type != handler->hdl_list[i].type )
- continue;
- addr = handler->hdl_list[i].addr;
- size = handler->hdl_list[i].size;
- if ( (p->addr >= addr) &&
- ((p->addr + p->size) <= (addr + size)) )
- {
- if ( type == HVM_PORTIO )
- return process_portio_intercept(
- handler->hdl_list[i].action.portio, p);
+ int rc = stdvga_intercept_mmio(p);
+ if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) )
+ return rc;
+ }
- if ( unlikely(p->count > 1) &&
- (unlikely(p->df)
- ? p->addr - (p->count - 1L) * p->size < addr
- : p->addr + p->count * 1L * p->size - 1 >= addr + size) )
- p->count = 1;
+ handler = hvm_find_io_handler(p);
- return handler->hdl_list[i].action.mmio(p);
- }
+ if ( handler == NULL )
+ return X86EMUL_UNHANDLEABLE;
+
+ return hvm_process_io_intercept(handler, p);
+}
+
+struct hvm_io_handler *hvm_next_io_handler(struct domain *d)
+{
+ unsigned int i = d->arch.hvm_domain.io_handler_count++;
+
+ if ( i == NR_IO_HANDLERS )
+ {
+ domain_crash(d);
+ return NULL;
}
- return X86EMUL_UNHANDLEABLE;
+ return &d->arch.hvm_domain.io_handler[i];
}
-void register_io_handler(
- struct domain *d, unsigned long addr, unsigned int size,
- void *action, int type)
+void register_mmio_handler(struct domain *d,
+ const struct hvm_mmio_ops *ops)
{
- struct hvm_io_handler *handler = d->arch.hvm_domain.io_handler;
- int num = handler->num_slot;
+ struct hvm_io_handler *handler = hvm_next_io_handler(d);
- BUG_ON(num >= MAX_IO_HANDLER);
+ handler->type = IOREQ_TYPE_COPY;
+ handler->mmio.ops = ops;
+}
- handler->hdl_list[num].addr = addr;
- handler->hdl_list[num].size = size;
- handler->hdl_list[num].action.ptr = action;
- handler->hdl_list[num].type = type;
- handler->num_slot++;
+void register_portio_handler(struct domain *d, unsigned int port,
+ unsigned int size, portio_action_t action)
+{
+ struct hvm_io_handler *handler = hvm_next_io_handler(d);
+
+ handler->type = IOREQ_TYPE_PIO;
+ handler->portio.port = port;
+ handler->portio.size = size;
+ handler->portio.action = action;
}
-void relocate_io_handler(
- struct domain *d, unsigned long old_addr, unsigned long new_addr,
- unsigned int size, int type)
+void relocate_portio_handler(struct domain *d, unsigned int old_port,
+ unsigned int new_port, unsigned int size)
{
- struct hvm_io_handler *handler = d->arch.hvm_domain.io_handler;
- int i;
-
- for ( i = 0; i < handler->num_slot; i++ )
- if ( (handler->hdl_list[i].addr == old_addr) &&
- (handler->hdl_list[i].size == size) &&
- (handler->hdl_list[i].type == type) )
- handler->hdl_list[i].addr = new_addr;
+ unsigned int i;
+
+ for ( i = 0; i < d->arch.hvm_domain.io_handler_count; i++ )
+ {
+ struct hvm_io_handler *handler =
+ &d->arch.hvm_domain.io_handler[i];
+
+ if ( handler->type != IOREQ_TYPE_PIO )
+ continue;
+
+ if ( (handler->portio.port == old_port) &&
+ (handler->portio.size = size) )
+ {
+ handler->portio.port = new_port;
+ break;
+ }
+ }
+}
+
+bool_t hvm_mmio_internal(paddr_t gpa)
+{
+ ioreq_t p = {
+ .type = IOREQ_TYPE_COPY,
+ .addr = gpa
+ };
+
+ return hvm_find_io_handler(&p) != NULL;
}
/*
#include <public/hvm/ioreq.h>
#include <public/event_channel.h>
-#define MAX_IO_HANDLER 16
-
-#define HVM_PORTIO 0
-#define HVM_BUFFERED_IO 2
+#define NR_IO_HANDLERS 32
typedef int (*hvm_mmio_read_t)(struct vcpu *v,
unsigned long addr,
unsigned long val);
typedef int (*hvm_mmio_check_t)(struct vcpu *v, unsigned long addr);
+struct hvm_mmio_ops {
+ hvm_mmio_check_t check;
+ hvm_mmio_read_t read;
+ hvm_mmio_write_t write;
+};
+
typedef int (*portio_action_t)(
int dir, unsigned int port, unsigned int bytes, uint32_t *val);
-typedef int (*mmio_action_t)(ioreq_t *);
-struct io_handler {
- int type;
- unsigned int size;
- unsigned long addr;
- union {
- portio_action_t portio;
- mmio_action_t mmio;
- void *ptr;
- } action;
-};
struct hvm_io_handler {
- int num_slot;
- struct io_handler hdl_list[MAX_IO_HANDLER];
+ union {
+ struct {
+ const struct hvm_mmio_ops *ops;
+ } mmio;
+ struct {
+ unsigned int port, size;
+ portio_action_t action;
+ } portio;
+ };
+ uint8_t type;
};
-struct hvm_mmio_ops {
- hvm_mmio_check_t check;
- hvm_mmio_read_t read;
- hvm_mmio_write_t write;
+typedef int (*hvm_io_read_t)(const struct hvm_io_handler *,
+ uint64_t addr,
+ uint32_t size,
+ uint64_t *data);
+typedef int (*hvm_io_write_t)(const struct hvm_io_handler *,
+ uint64_t addr,
+ uint32_t size,
+ uint64_t data);
+typedef bool_t (*hvm_io_accept_t)(const struct hvm_io_handler *,
+ const ioreq_t *p);
+struct hvm_io_ops {
+ hvm_io_accept_t accept;
+ hvm_io_read_t read;
+ hvm_io_write_t write;
};
-extern const struct hvm_mmio_ops hpet_mmio_ops;
-extern const struct hvm_mmio_ops vlapic_mmio_ops;
-extern const struct hvm_mmio_ops vioapic_mmio_ops;
-extern const struct hvm_mmio_ops msixtbl_mmio_ops;
-extern const struct hvm_mmio_ops iommu_mmio_ops;
+int hvm_process_io_intercept(const struct hvm_io_handler *handler,
+ ioreq_t *p);
-#define HVM_MMIO_HANDLER_NR 5
+const struct hvm_io_handler *hvm_find_io_handler(ioreq_t *p);
-int hvm_io_intercept(ioreq_t *p, int type);
-void register_io_handler(
- struct domain *d, unsigned long addr, unsigned int size,
- void *action, int type);
-void relocate_io_handler(
- struct domain *d, unsigned long old_addr, unsigned long new_addr,
- unsigned int size, int type);
+int hvm_io_intercept(ioreq_t *p);
-static inline int hvm_portio_intercept(ioreq_t *p)
-{
- return hvm_io_intercept(p, HVM_PORTIO);
-}
-
-static inline int hvm_buffered_io_intercept(ioreq_t *p)
-{
- return hvm_io_intercept(p, HVM_BUFFERED_IO);
-}
+struct hvm_io_handler *hvm_next_io_handler(struct domain *d);
bool_t hvm_mmio_internal(paddr_t gpa);
-int hvm_mmio_intercept(ioreq_t *p);
-int hvm_buffered_io_send(ioreq_t *p);
-static inline void register_portio_handler(
+void register_mmio_handler(struct domain *d,
+ const struct hvm_mmio_ops *ops);
+
+void register_portio_handler(
struct domain *d, unsigned int port, unsigned int size,
- portio_action_t action)
-{
- register_io_handler(d, port, size, action, HVM_PORTIO);
-}
+ portio_action_t action);
-static inline void relocate_portio_handler(
+void relocate_portio_handler(
struct domain *d, unsigned int old_port, unsigned int new_port,
- unsigned int size)
-{
- relocate_io_handler(d, old_port, new_port, size, HVM_PORTIO);
-}
-
-static inline void register_buffered_io_handler(
- struct domain *d, unsigned long addr,
- unsigned int size, mmio_action_t action)
-{
- register_io_handler(d, addr, size, action, HVM_BUFFERED_IO);
-}
+ unsigned int size);
+int hvm_buffered_io_send(ioreq_t *p);
void send_timeoffset_req(unsigned long timeoff);
void send_invalidate_req(void);
int handle_mmio(void);
void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
const union vioapic_redir_entry *ent);
void msix_write_completion(struct vcpu *);
+void msixtbl_init(struct domain *d);
struct hvm_hw_stdvga {
uint8_t sr_index;
};
void stdvga_init(struct domain *d);
+int stdvga_intercept_mmio(ioreq_t *p);
void stdvga_deinit(struct domain *d);
extern void hvm_dpci_msi_eoi(struct domain *d, int vector);
#endif /* __ASM_X86_HVM_IO_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */