#include <xen/init.h>
#include <xen/types.h>
#include <xen/pv_console.h>
+#include <xen/event.h>
#include <asm/apic.h>
#include <asm/guest.h>
{
unsigned int cpu = smp_processor_id();
struct vcpu_info *vcpu_info = &XEN_shared_info->vcpu_info[cpu];
+ unsigned long pending;
vcpu_info->evtchn_upcall_pending = 0;
- xchg(&vcpu_info->evtchn_pending_sel, 0);
+ pending = xchg(&vcpu_info->evtchn_pending_sel, 0);
- pv_console_rx(regs);
+ while ( pending )
+ {
+ unsigned int l1 = ffsl(pending) - 1;
+ unsigned long evtchn = xchg(&XEN_shared_info->evtchn_pending[l1], 0);
+
+ __clear_bit(l1, &pending);
+ evtchn &= ~XEN_shared_info->evtchn_mask[l1];
+ while ( evtchn )
+ {
+ unsigned int port = ffsl(evtchn) - 1;
+
+ __clear_bit(port, &evtchn);
+ port += l1 * BITS_PER_LONG;
+
+ if ( pv_console && port == pv_console_evtchn() )
+ pv_console_rx(regs);
+ else if ( pv_shim )
+ pv_shim_inject_evtchn(port);
+ }
+ }
ack_APIC_irq();
}
vcpu_info->evtchn_upcall_pending = 0;
xchg(&vcpu_info->evtchn_pending_sel, 0);
- /*
- * evtchn_pending can be cleared only on the boot CPU because it's
- * located in a shared structure.
- */
- for ( i = 0; i < 8; i++ )
+ for ( i = 0; i < ARRAY_SIZE(XEN_shared_info->evtchn_pending); i++ )
xchg(&XEN_shared_info->evtchn_pending[i], 0);
}
}
alloc_direct_apic_vector(&evtchn_upcall_vector, xen_evtchn_upcall);
/* Mask all upcalls */
- for ( i = 0; i < 8; i++ )
+ for ( i = 0; i < ARRAY_SIZE(XEN_shared_info->evtchn_mask); i++ )
xchg(&XEN_shared_info->evtchn_mask[i], ~0ul);
ap_setup_event_channels(true);
+
+ /* Unamsk all upcalls. TODO: use hypercall */
+ for ( i = 0; i < ARRAY_SIZE(XEN_shared_info->evtchn_mask); i++ )
+ xchg(&XEN_shared_info->evtchn_mask[i], 0ul);
}
void __init hypervisor_early_setup(struct e820map *e820)
*
* Copyright (c) 2017 Citrix Systems Ltd.
*/
+#include <xen/event.h>
+#include <xen/guest_access.h>
#include <xen/hypercall.h>
#include <xen/init.h>
#include <xen/shutdown.h>
: COMPAT_L1_PROT));
}
+static void evtchn_reserve(struct domain *d, unsigned int port)
+{
+ ASSERT(port_is_valid(d, port));
+ evtchn_from_port(d, port)->state = ECS_RESERVED;
+}
+
+static bool evtchn_handled(struct domain *d, unsigned int port)
+{
+ ASSERT(port_is_valid(d, port));
+ /* The shim manages VIRQs, the rest is forwarded to L0. */
+ return evtchn_from_port(d, port)->state == ECS_VIRQ;
+}
+
+static void evtchn_assign_vcpu(struct domain *d, unsigned int port,
+ unsigned int vcpu)
+{
+ ASSERT(port_is_valid(d, port));
+ evtchn_from_port(d, port)->notify_vcpu_id = vcpu;
+}
+
void __init pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start,
unsigned long va_start, unsigned long store_va,
unsigned long console_va, unsigned long vphysmap,
replace_va(d, l4start, va, param); \
dom0_update_physmap(d, (va - va_start) >> PAGE_SHIFT, param, vphysmap);\
} \
+ else \
+ { \
+ BUG_ON(evtchn_allocate_port(d, param)); \
+ evtchn_reserve(d, param); \
+ } \
})
SET_AND_MAP_PARAM(HVM_PARAM_STORE_PFN, si->store_mfn, store_va);
SET_AND_MAP_PARAM(HVM_PARAM_STORE_EVTCHN, si->store_evtchn, 0);
machine_restart(5000);
}
+long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+ struct domain *d = current->domain;
+ long rc;
+
+ switch ( cmd )
+ {
+#define EVTCHN_FORWARD(cmd, port_field) \
+case EVTCHNOP_##cmd: { \
+ struct evtchn_##cmd op; \
+ \
+ if ( copy_from_guest(&op, arg, 1) != 0 ) \
+ return -EFAULT; \
+ \
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_##cmd, &op); \
+ if ( rc ) \
+ break; \
+ \
+ rc = evtchn_allocate_port(d, op.port_field); \
+ if ( rc ) \
+ { \
+ struct evtchn_close close = { \
+ .port = op.port_field, \
+ }; \
+ \
+ BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_close, &close)); \
+ } \
+ else \
+ evtchn_reserve(d, op.port_field); \
+ \
+ if ( !rc && __copy_to_guest(arg, &op, 1) ) \
+ rc = -EFAULT; \
+ \
+ break; \
+ }
+ EVTCHN_FORWARD(alloc_unbound, port)
+ EVTCHN_FORWARD(bind_interdomain, local_port)
+#undef EVTCHN_FORWARD
+
+ case EVTCHNOP_bind_virq: {
+ struct evtchn_bind_virq virq;
+ struct evtchn_alloc_unbound alloc = {
+ .dom = DOMID_SELF,
+ .remote_dom = DOMID_SELF,
+ };
+
+ if ( copy_from_guest(&virq, arg, 1) != 0 )
+ return -EFAULT;
+ /*
+ * The event channel space is actually controlled by L0 Xen, so
+ * allocate a port from L0 and then force the VIRQ to be bound to that
+ * specific port.
+ *
+ * This is only required for VIRQ because the rest of the event channel
+ * operations are handled directly by L0.
+ */
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_alloc_unbound, &alloc);
+ if ( rc )
+ break;
+
+ /* Force L1 to use the event channel port allocated on L0. */
+ rc = evtchn_bind_virq(&virq, alloc.port);
+ if ( rc )
+ {
+ struct evtchn_close free = {
+ .port = alloc.port,
+ };
+
+ xen_hypercall_event_channel_op(EVTCHNOP_close, &free);
+ }
+
+ if ( !rc && __copy_to_guest(arg, &virq, 1) )
+ rc = -EFAULT;
+
+ break;
+ }
+ case EVTCHNOP_status: {
+ struct evtchn_status status;
+
+ if ( copy_from_guest(&status, arg, 1) != 0 )
+ return -EFAULT;
+
+ if ( !port_is_valid(d, status.port) )
+ return -EINVAL;
+
+ if ( evtchn_handled(d, status.port) )
+ rc = evtchn_status(&status);
+ else
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_status, &status);
+
+ break;
+ }
+ case EVTCHNOP_bind_vcpu: {
+ struct evtchn_bind_vcpu vcpu;
+
+ if ( copy_from_guest(&vcpu, arg, 1) != 0 )
+ return -EFAULT;
+
+ if ( !port_is_valid(d, vcpu.port) )
+ return -EINVAL;
+
+ if ( evtchn_handled(d, vcpu.port) )
+ rc = evtchn_bind_vcpu(vcpu.port, vcpu.vcpu);
+ else
+ {
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_bind_vcpu, &vcpu);
+ if ( !rc )
+ evtchn_assign_vcpu(d, vcpu.port, vcpu.vcpu);
+ }
+
+ break;
+ }
+ case EVTCHNOP_close: {
+ struct evtchn_close close;
+
+ if ( copy_from_guest(&close, arg, 1) != 0 )
+ return -EFAULT;
+
+ if ( !port_is_valid(d, close.port) )
+ return -EINVAL;
+
+ if ( evtchn_handled(d, close.port) )
+ {
+ rc = evtchn_close(d, close.port, true);
+ if ( rc )
+ break;
+ }
+ else
+ evtchn_free(d, evtchn_from_port(d, close.port));
+
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_close, &close);
+ if ( rc )
+ /*
+ * If the port cannot be closed on the L0 mark it as reserved
+ * in the shim to avoid re-using it.
+ */
+ evtchn_reserve(d, close.port);
+
+ break;
+ }
+ case EVTCHNOP_bind_ipi: {
+ struct evtchn_bind_ipi ipi;
+
+ if ( copy_from_guest(&ipi, arg, 1) != 0 )
+ return -EFAULT;
+
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_bind_ipi, &ipi);
+ if ( rc )
+ break;
+
+ rc = evtchn_allocate_port(d, ipi.port);
+ if ( rc )
+ {
+ struct evtchn_close close = {
+ .port = ipi.port,
+ };
+
+ /*
+ * If closing the event channel port also fails there's not
+ * much the shim can do, since it has been unable to reserve
+ * the port in it's event channel space.
+ */
+ BUG_ON(xen_hypercall_event_channel_op(EVTCHNOP_close, &close));
+ break;
+ }
+
+ evtchn_reserve(d, ipi.port);
+ evtchn_assign_vcpu(d, ipi.port, ipi.vcpu);
+
+ if ( __copy_to_guest(arg, &ipi, 1) )
+ rc = -EFAULT;
+
+ break;
+ }
+ case EVTCHNOP_unmask: {
+ struct evtchn_unmask unmask;
+
+ if ( copy_from_guest(&unmask, arg, 1) != 0 )
+ return -EFAULT;
+
+ /* Unmask is handled in L1 */
+ rc = evtchn_unmask(unmask.port);
+
+ break;
+ }
+ case EVTCHNOP_send: {
+ struct evtchn_send send;
+
+ if ( copy_from_guest(&send, arg, 1) != 0 )
+ return -EFAULT;
+
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_send, &send);
+
+ break;
+ }
+ case EVTCHNOP_reset: {
+ struct evtchn_reset reset;
+
+ if ( copy_from_guest(&reset, arg, 1) != 0 )
+ return -EFAULT;
+
+ rc = xen_hypercall_event_channel_op(EVTCHNOP_reset, &reset);
+
+ break;
+ }
+ default:
+ /* No FIFO or PIRQ support for now */
+ rc = -ENOSYS;
+ break;
+ }
+
+ return rc;
+}
+
+void pv_shim_inject_evtchn(unsigned int port)
+{
+ if ( port_is_valid(pv_domain, port) )
+ {
+ struct evtchn *chn = evtchn_from_port(pv_domain, port);
+
+ evtchn_port_set_pending(pv_domain, chn->notify_vcpu_id, chn);
+ }
+}
+
domid_t get_dom0_domid(void)
{
uint32_t eax, ebx, ecx, edx;
struct domain *hardware_domain __read_mostly;
+struct domain *pv_domain __read_mostly;
+
#ifdef CONFIG_LATE_HWDOM
domid_t hardware_domid __read_mostly;
integer_param("hardware_dom", hardware_domid);
rcu_assign_pointer(*pd, d);
rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
spin_unlock(&domlist_update_lock);
+
+#ifdef CONFIG_X86
+ if ( pv_shim )
+ pv_domain = d;
+#endif
}
return d;
#include <public/event_channel.h>
#include <xsm/xsm.h>
+#ifdef CONFIG_X86
+#include <asm/pv/shim.h>
+#endif
+
#define ERROR_EXIT(_errno) \
do { \
gdprintk(XENLOG_WARNING, \
xfree(bucket);
}
+int evtchn_allocate_port(struct domain *d, unsigned int port)
+{
+ if ( port > d->max_evtchn_port || port >= d->max_evtchns )
+ return -ENOSPC;
+
+ if ( port_is_valid(d, port) )
+ {
+ if ( evtchn_from_port(d, port)->state != ECS_FREE ||
+ evtchn_port_is_busy(d, port) )
+ return -EBUSY;
+ }
+ else
+ {
+ struct evtchn *chn;
+ struct evtchn **grp;
+
+ if ( !group_from_port(d, port) )
+ {
+ grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
+ if ( !grp )
+ return -ENOMEM;
+ group_from_port(d, port) = grp;
+ }
+
+ chn = alloc_evtchn_bucket(d, port);
+ if ( !chn )
+ return -ENOMEM;
+ bucket_from_port(d, port) = chn;
+
+ write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
+ }
+
+ return 0;
+}
+
static int get_free_port(struct domain *d)
{
- struct evtchn *chn;
- struct evtchn **grp;
int port;
if ( d->is_dying )
return -EINVAL;
- for ( port = 0; port_is_valid(d, port); port++ )
+ for ( port = 0; port <= d->max_evtchn_port; port++ )
{
- if ( port > d->max_evtchn_port )
- return -ENOSPC;
- if ( evtchn_from_port(d, port)->state == ECS_FREE
- && !evtchn_port_is_busy(d, port) )
- return port;
- }
+ int rc = evtchn_allocate_port(d, port);
- if ( port == d->max_evtchns || port > d->max_evtchn_port )
- return -ENOSPC;
+ if ( rc == -EBUSY )
+ continue;
- if ( !group_from_port(d, port) )
- {
- grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
- if ( !grp )
- return -ENOMEM;
- group_from_port(d, port) = grp;
+ return port;
}
- chn = alloc_evtchn_bucket(d, port);
- if ( !chn )
- return -ENOMEM;
- bucket_from_port(d, port) = chn;
-
- write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
-
- return port;
+ return -ENOSPC;
}
-static void free_evtchn(struct domain *d, struct evtchn *chn)
+void evtchn_free(struct domain *d, struct evtchn *chn)
{
/* Clear pending event to avoid unexpected behavior on re-bind. */
evtchn_port_clear_pending(d, chn);
}
-static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
+int evtchn_bind_virq(evtchn_bind_virq_t *bind, int port)
{
struct evtchn *chn;
struct vcpu *v;
struct domain *d = current->domain;
- int port, virq = bind->virq, vcpu = bind->vcpu;
- long rc = 0;
+ int virq = bind->virq, vcpu = bind->vcpu;
+ int rc = 0;
if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
return -EINVAL;
if ( v->virq_to_evtchn[virq] != 0 )
ERROR_EXIT(-EEXIST);
- if ( (port = get_free_port(d)) < 0 )
+ if ( port >= 0 )
+ {
+ if ( (rc = evtchn_allocate_port(d, port)) < 0 )
+ ERROR_EXIT(rc);
+ }
+ else if ( (port = get_free_port(d)) < 0 )
ERROR_EXIT(port);
chn = evtchn_from_port(d, port);
}
-static long evtchn_close(struct domain *d1, int port1, bool_t guest)
+long evtchn_close(struct domain *d1, int port1, bool guest)
{
struct domain *d2 = NULL;
struct vcpu *v;
double_evtchn_lock(chn1, chn2);
- free_evtchn(d1, chn1);
+ evtchn_free(d1, chn1);
chn2->state = ECS_UNBOUND;
chn2->u.unbound.remote_domid = d1->domain_id;
}
spin_lock(&chn1->lock);
- free_evtchn(d1, chn1);
+ evtchn_free(d1, chn1);
spin_unlock(&chn1->lock);
out:
}
}
-static long evtchn_status(evtchn_status_t *status)
+long evtchn_status(evtchn_status_t *status)
{
struct domain *d;
domid_t dom = status->dom;
{
long rc;
+#ifdef CONFIG_X86
+ if ( pv_shim )
+ return pv_shim_event_channel_op(cmd, arg);
+#endif
+
switch ( cmd )
{
case EVTCHNOP_alloc_unbound: {
struct evtchn_bind_virq bind_virq;
if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
return -EFAULT;
- rc = evtchn_bind_virq(&bind_virq);
+ rc = evtchn_bind_virq(&bind_virq, -1);
if ( !rc && __copy_to_guest(arg, &bind_virq, 1) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
xen_hypercall_evtchn_send(cons_evtchn);
}
+evtchn_port_t pv_console_evtchn(void)
+{
+ return cons_evtchn;
+}
+
size_t pv_console_rx(struct cpu_user_regs *regs)
{
char c;
if ( !cons_ring )
return 0;
- /* TODO: move this somewhere */
- if ( !test_bit(cons_evtchn, XEN_shared_info->evtchn_pending) )
- return 0;
-
prod = ACCESS_ONCE(cons_ring->in_prod);
cons = cons_ring->in_cons;
/* Get pointers before reading the ring */
ACCESS_ONCE(cons_ring->in_cons) = cons;
notify_daemon();
- clear_bit(cons_evtchn, XEN_shared_info->evtchn_pending);
-
return recv;
}
unsigned long console_va, unsigned long vphysmap,
start_info_t *si);
void pv_shim_shutdown(uint8_t reason);
+long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
+void pv_shim_inject_evtchn(unsigned int port);
domid_t get_dom0_domid(void);
#else
{
ASSERT_UNREACHABLE();
}
+static inline long pv_shim_event_channel_op(int cmd,
+ XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+ ASSERT_UNREACHABLE();
+ return 0;
+}
+static inline void pv_shim_inject_evtchn(unsigned int port)
+{
+ ASSERT_UNREACHABLE();
+}
static inline domid_t get_dom0_domid(void)
{
return 0;
/* Bind a local event-channel port to the specified VCPU. */
long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
+/* Bind a VIRQ. */
+int evtchn_bind_virq(evtchn_bind_virq_t *bind, int port);
+
+/* Get the status of an event channel port. */
+long evtchn_status(evtchn_status_t *status);
+
+/* Close an event channel. */
+long evtchn_close(struct domain *d1, int port1, bool guest);
+
+/* Free an event channel. */
+void evtchn_free(struct domain *d, struct evtchn *chn);
+
+/* Allocate a specific event channel port. */
+int evtchn_allocate_port(struct domain *d, unsigned int port);
+
/* Unmask a local event-channel port. */
int evtchn_unmask(unsigned int port);
void pv_console_init_postirq(void);
void pv_console_puts(const char *buf);
size_t pv_console_rx(struct cpu_user_regs *regs);
+evtchn_port_t pv_console_evtchn(void);
#else
static inline void pv_console_init_postirq(void) { }
static inline void pv_console_puts(const char *buf) { }
static inline size_t pv_console_rx(struct cpu_user_regs *regs) { return 0; }
+evtchn_port_t pv_console_evtchn(void)
+{
+ ASSERT_UNREACHABLE();
+ return 0;
+}
#endif /* !CONFIG_XEN_GUEST */
#endif /* __XEN_PV_CONSOLE_H__ */
/* A global pointer to the hardware domain (usually DOM0). */
extern struct domain *hardware_domain;
+extern struct domain *pv_domain;
+
#ifdef CONFIG_LATE_HWDOM
extern domid_t hardware_domid;
#else