# compiler_rt is strictly speaking necessary only in KERNONLY=true,
# but building it always makes testing kernonly easier
TARGETS+= compiler_rt
-INSTALLTGTS= librumpkern_bmktc_install
+INSTALLTGTS+= librumpkern_bmktc_install
INSTALLTGTS+= librumpkern_mman_install
ifneq (${KERNONLY},true)
LDSCRIPT:= $(abspath $(OBJ_DIR)/xen/minios.lds)
+INSTALLTGTS= librumpxen_xendev_install librumpnet_xenif_install
+
include ../Makefile.inc
.PHONY: default
links:
$(MAKE) -C xen links
-$(eval $(call BUILDLIB_target,librumpxendev,.))
-$(eval $(call BUILDLIB_target,librumpxenif,.))
+$(eval $(call BUILDLIB_target,librumpxen_xendev,.))
+$(eval $(call BUILDLIB_target,librumpnet_xenif,.))
-xenlibs: ${RROBJLIB}/librumpxenif/librumpxenif.a ${RROBJLIB}/librumpxendev/librumpxendev.a
+xenlibs: ${RROBJLIB}/librumpxen_xendev/librumpxen_xendev.a ${RROBJLIB}/librumpnet_xenif/librumpnet_xenif.a
$(MAINOBJ): $(RUMP_OBJS) platformlibs xenlibs
$(CC) -Wl,-r $(CFLAGS) $(LDFLAGS) $(RUMP_OBJS) -nostdlib -o $@ \
--- /dev/null
+.include <bsd.own.mk>
+
+LIB= rumpnet_xenif
+
+SRCS= if_virt.c
+SRCS+= xenif_component.c
+
+RUMPTOP= ${TOPRUMP}
+
+IFBASE= -DVIRTIF_BASE=xenif
+
+CPPFLAGS+= -I${RUMPTOP}/librump/rumpkern -I${RUMPTOP}/librump/rumpnet
+CPPFLAGS+= -I${.CURDIR}
+CPPFLAGS+= ${IFBASE}
+
+RUMPCOMP_USER_SRCS= xenif_user.c
+RUMPCOMP_USER_CPPFLAGS+= -I${.CURDIR}/..
+RUMPCOMP_USER_CPPFLAGS+= -I${.CURDIR}/../xen/include
+RUMPCOMP_USER_CPPFLAGS+= -I${.CURDIR}/../../../include
+RUMPCOMP_USER_CPPFLAGS+= ${IFBASE}
+
+# XXX
+.undef RUMPKERN_ONLY
+
+.include "${RUMPTOP}/Makefile.rump"
+.include <bsd.lib.mk>
+.include <bsd.klinks.mk>
--- /dev/null
+/* $NetBSD: if_virt.c,v 1.36 2013/07/04 11:46:51 pooka Exp $ */
+
+/*
+ * Copyright (c) 2008, 2013 Antti Kantee. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: if_virt.c,v 1.36 2013/07/04 11:46:51 pooka Exp $");
+
+#include <sys/param.h>
+#include <sys/condvar.h>
+#include <sys/fcntl.h>
+#include <sys/kernel.h>
+#include <sys/kmem.h>
+#include <sys/kthread.h>
+#include <sys/mutex.h>
+#include <sys/poll.h>
+#include <sys/sockio.h>
+#include <sys/socketvar.h>
+#include <sys/cprng.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_ether.h>
+#include <net/if_tap.h>
+
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+
+#include <rump/rump.h>
+
+#include "rump_private.h"
+#include "rump_net_private.h"
+
+#include "if_virt.h"
+#include "if_virt_user.h"
+
+/*
+ * Virtual interface. Uses hypercalls to shovel packets back
+ * and forth. The exact method for shoveling depends on the
+ * hypercall implementation.
+ */
+
+static int virtif_init(struct ifnet *);
+static int virtif_ioctl(struct ifnet *, u_long, void *);
+static void virtif_start(struct ifnet *);
+static void virtif_stop(struct ifnet *, int);
+
+struct virtif_sc {
+ struct ethercom sc_ec;
+ struct virtif_user *sc_viu;
+};
+
+static int virtif_clone(struct if_clone *, int);
+static int virtif_unclone(struct ifnet *);
+
+struct if_clone VIF_CLONER =
+ IF_CLONE_INITIALIZER(VIF_NAME, virtif_clone, virtif_unclone);
+
+static int
+virtif_clone(struct if_clone *ifc, int num)
+{
+ struct virtif_sc *sc;
+ struct virtif_user *viu;
+ struct ifnet *ifp;
+ uint8_t enaddr[ETHER_ADDR_LEN] = { 0xb2, 0x0a, 0x00, 0x0b, 0x0e, 0x01 };
+ char enaddrstr[3*ETHER_ADDR_LEN];
+ int error = 0;
+
+ if (num >= 0x100)
+ return E2BIG;
+
+ enaddr[2] = cprng_fast32() & 0xff;
+ enaddr[5] = num;
+
+ sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
+
+ if ((error = VIFHYPER_CREATE(num, sc, enaddr, &viu)) != 0) {
+ kmem_free(sc, sizeof(*sc));
+ return error;
+ }
+ sc->sc_viu = viu;
+
+ ifp = &sc->sc_ec.ec_if;
+ snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", VIF_NAME, num);
+ ifp->if_softc = sc;
+
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_init = virtif_init;
+ ifp->if_ioctl = virtif_ioctl;
+ ifp->if_start = virtif_start;
+ ifp->if_stop = virtif_stop;
+ IFQ_SET_READY(&ifp->if_snd);
+
+ if_attach(ifp);
+ ether_ifattach(ifp, enaddr);
+
+ ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr);
+ aprint_normal_ifnet(ifp, "Ethernet address %s\n", enaddrstr);
+
+ if (error) {
+ virtif_unclone(ifp);
+ }
+
+ return error;
+}
+
+static int
+virtif_unclone(struct ifnet *ifp)
+{
+ struct virtif_sc *sc = ifp->if_softc;
+
+ VIFHYPER_DYING(sc->sc_viu);
+
+ virtif_stop(ifp, 1);
+ if_down(ifp);
+
+ VIFHYPER_DESTROY(sc->sc_viu);
+
+ kmem_free(sc, sizeof(*sc));
+
+ ether_ifdetach(ifp);
+ if_detach(ifp);
+
+ return 0;
+}
+
+static int
+virtif_init(struct ifnet *ifp)
+{
+
+ ifp->if_flags |= IFF_RUNNING;
+ return 0;
+}
+
+static int
+virtif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
+{
+ int s, rv;
+
+ s = splnet();
+ rv = ether_ioctl(ifp, cmd, data);
+ if (rv == ENETRESET)
+ rv = 0;
+ splx(s);
+
+ return rv;
+}
+
+/*
+ * Output packets in-context until outgoing queue is empty.
+ * Assume that VIFHYPER_SEND() is fast enough to not make it
+ * necessary to drop kernel_lock.
+ */
+#define LB_SH 32
+static void
+virtif_start(struct ifnet *ifp)
+{
+ struct virtif_sc *sc = ifp->if_softc;
+ struct mbuf *m, *m0;
+ struct iovec io[LB_SH];
+ int i;
+
+ ifp->if_flags |= IFF_OACTIVE;
+
+ for (;;) {
+ IF_DEQUEUE(&ifp->if_snd, m0);
+ if (!m0) {
+ break;
+ }
+
+ m = m0;
+ for (i = 0; i < LB_SH && m; i++) {
+ io[i].iov_base = mtod(m, void *);
+ io[i].iov_len = m->m_len;
+ m = m->m_next;
+ }
+ if (i == LB_SH)
+ panic("lazy bum");
+ bpf_mtap(ifp, m0);
+
+ VIFHYPER_SEND(sc->sc_viu, io, i);
+
+ m_freem(m0);
+ }
+
+ ifp->if_flags &= ~IFF_OACTIVE;
+}
+
+static void
+virtif_stop(struct ifnet *ifp, int disable)
+{
+
+ ifp->if_flags &= ~IFF_RUNNING;
+}
+
+void
+rump_virtif_pktdeliver(struct virtif_sc *sc, struct iovec *iov, size_t iovlen)
+{
+ struct ifnet *ifp = &sc->sc_ec.ec_if;
+ struct mbuf *m;
+ size_t i;
+ int off, olen;
+
+ if ((ifp->if_flags & IFF_RUNNING) == 0)
+ return;
+
+ m = m_gethdr(M_NOWAIT, MT_DATA);
+ if (m == NULL)
+ return; /* drop packet */
+ m->m_len = m->m_pkthdr.len = 0;
+
+ for (i = 0, off = 0; i < iovlen; i++) {
+ olen = m->m_pkthdr.len;
+ m_copyback(m, off, iov[i].iov_len, iov[i].iov_base);
+ off += iov[i].iov_len;
+ if (olen + off != m->m_pkthdr.len) {
+ aprint_verbose_ifnet(ifp, "m_copyback failed\n");
+ m_freem(m);
+ return;
+ }
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+ KERNEL_LOCK(1, NULL);
+ bpf_mtap(ifp, m);
+ ether_input(ifp, m);
+ KERNEL_UNLOCK_LAST(NULL);
+}
--- /dev/null
+/* $NetBSD: if_virt.h,v 1.2 2013/07/04 11:58:11 pooka Exp $ */
+
+/*
+ * NOTE! This file is supposed to work on !NetBSD platforms.
+ */
+
+#ifndef VIRTIF_BASE
+#error Define VIRTIF_BASE
+#endif
+
+#define VIF_STRING(x) #x
+#define VIF_STRINGIFY(x) VIF_STRING(x)
+#define VIF_CONCAT(x,y) x##y
+#define VIF_CONCAT3(x,y,z) x##y##z
+#define VIF_BASENAME(x,y) VIF_CONCAT(x,y)
+#define VIF_BASENAME3(x,y,z) VIF_CONCAT3(x,y,z)
+
+#define VIF_CLONER VIF_BASENAME(VIRTIF_BASE,_cloner)
+#define VIF_NAME VIF_STRINGIFY(VIRTIF_BASE)
+
+#define VIFHYPER_CREATE VIF_BASENAME3(rumpcomp_,VIRTIF_BASE,_create)
+#define VIFHYPER_DYING VIF_BASENAME3(rumpcomp_,VIRTIF_BASE,_dying)
+#define VIFHYPER_DESTROY VIF_BASENAME3(rumpcomp_,VIRTIF_BASE,_destroy)
+#define VIFHYPER_SEND VIF_BASENAME3(rumpcomp_,VIRTIF_BASE,_send)
+
+struct virtif_sc;
+void rump_virtif_pktdeliver(struct virtif_sc *, struct iovec *, size_t);
--- /dev/null
+/* $NetBSD: rumpcomp_user.h,v 1.4 2013/07/04 11:46:51 pooka Exp $ */
+
+/*
+ * Copyright (c) 2013 Antti Kantee. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+struct virtif_user;
+
+int VIFHYPER_CREATE(int, struct virtif_sc *, uint8_t *,
+ struct virtif_user **);
+void VIFHYPER_DYING(struct virtif_user *);
+void VIFHYPER_DESTROY(struct virtif_user *);
+
+void VIFHYPER_SEND(struct virtif_user *, struct iovec *, size_t);
--- /dev/null
+/* $NetBSD: component.c,v 1.4 2013/07/04 11:46:51 pooka Exp $ */
+
+/*
+ * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
+ *
+ * Development of this software was supported by The Nokia Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: component.c,v 1.4 2013/07/04 11:46:51 pooka Exp $");
+
+#include <sys/param.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+
+#include <net/if.h>
+
+#include "rump_private.h"
+#include "rump_net_private.h"
+#include "if_virt.h"
+
+RUMP_COMPONENT(RUMP_COMPONENT_NET_IF)
+{
+ extern struct if_clone VIF_CLONER; /* XXX */
+
+ if_clone_attach(&VIF_CLONER);
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Antti Kantee. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* XXX */
+struct iovec {
+ void *iov_base;
+ unsigned long iov_len;
+};
+
+#include <mini-os/os.h>
+#include <mini-os/netfront.h>
+
+#include <bmk-core/errno.h>
+#include <bmk-core/memalloc.h>
+#include <bmk-core/string.h>
+#include <bmk-core/sched.h>
+
+#include <bmk-rumpuser/core_types.h>
+#include <bmk-rumpuser/rumpuser.h>
+
+#include "if_virt.h"
+#include "if_virt_user.h"
+
+/*
+ * For now, shovel the packets from the interrupt to a
+ * thread context via an intermediate set of buffers. Need
+ * to fix this a bit down the road.
+ */
+#define MAXPKT 2000
+struct onepkt {
+ unsigned char pkt_data[MAXPKT];
+ int pkt_dlen;
+};
+
+#define NBUF 64
+struct virtif_user {
+ struct netfront_dev *viu_dev;
+ struct bmk_thread *viu_rcvr;
+ struct bmk_thread *viu_thr;
+ struct virtif_sc *viu_vifsc;
+
+ int viu_read;
+ int viu_write;
+ int viu_dying;
+ struct onepkt viu_pkts[NBUF];
+};
+
+/*
+ * Ok, based on how (the unmodified) netfront works, we need to
+ * consume the data here. So store it locally (and revisit some day).
+ */
+static void
+myrecv(struct netfront_dev *dev, unsigned char *data, int dlen)
+{
+ struct virtif_user *viu = netfront_get_private(dev);
+ int nextw;
+
+ /* TODO: we should be at the correct spl already, assert how? */
+
+ nextw = (viu->viu_write+1) % NBUF;
+ /* queue full? drop packet */
+ if (nextw == viu->viu_read) {
+ return;
+ }
+
+ if (dlen > MAXPKT) {
+ minios_printk("myrecv: pkt len %d too big\n", dlen);
+ return;
+ }
+
+ bmk_memcpy(viu->viu_pkts[viu->viu_write].pkt_data, data, dlen);
+ viu->viu_pkts[viu->viu_write].pkt_dlen = dlen;
+ viu->viu_write = nextw;
+
+ if (viu->viu_rcvr)
+ bmk_sched_wake(viu->viu_rcvr);
+}
+
+static void
+pusher(void *arg)
+{
+ struct virtif_user *viu = arg;
+ struct iovec iov;
+ struct onepkt *mypkt;
+ int flags;
+
+ /* give us a rump kernel context */
+ rumpuser__hyp.hyp_schedule();
+ rumpuser__hyp.hyp_lwproc_newlwp(0);
+ rumpuser__hyp.hyp_unschedule();
+
+ local_irq_save(flags);
+ again:
+ while (!viu->viu_dying) {
+ while (viu->viu_read == viu->viu_write) {
+ viu->viu_rcvr = bmk_current;
+ bmk_sched_blockprepare();
+ local_irq_restore(flags);
+ bmk_sched_block();
+ local_irq_save(flags);
+ viu->viu_rcvr = NULL;
+ goto again;
+ }
+ mypkt = &viu->viu_pkts[viu->viu_read];
+ local_irq_restore(flags);
+
+ iov.iov_base = mypkt->pkt_data;
+ iov.iov_len = mypkt->pkt_dlen;
+
+ rumpuser__hyp.hyp_schedule();
+ rump_virtif_pktdeliver(viu->viu_vifsc, &iov, 1);
+ rumpuser__hyp.hyp_unschedule();
+
+ local_irq_save(flags);
+ viu->viu_read = (viu->viu_read+1) % NBUF;
+ }
+ local_irq_restore(flags);
+}
+
+int
+VIFHYPER_CREATE(int devnum, struct virtif_sc *vif_sc, uint8_t *enaddr,
+ struct virtif_user **viup)
+{
+ struct virtif_user *viu = NULL;
+ int rv, nlocks;
+
+ rumpkern_unsched(&nlocks, NULL);
+
+ viu = bmk_memalloc(sizeof(*viu), 0, BMK_MEMWHO_RUMPKERN);
+ if (viu == NULL) {
+ rv = BMK_ENOMEM;
+ goto out;
+ }
+ bmk_memset(viu, 0, sizeof(*viu));
+ viu->viu_vifsc = vif_sc;
+
+ viu->viu_dev = netfront_init(NULL, myrecv, enaddr, NULL, viu);
+ if (!viu->viu_dev) {
+ rv = BMK_EINVAL; /* ? */
+ bmk_memfree(viu, BMK_MEMWHO_RUMPKERN);
+ goto out;
+ }
+
+ viu->viu_thr = bmk_sched_create("xenifp",
+ NULL, 1, pusher, viu, NULL, 0);
+ if (viu->viu_thr == NULL) {
+ minios_printk("fatal thread creation failure\n"); /* XXX */
+ minios_do_exit();
+ }
+
+ rv = 0;
+
+ out:
+ rumpkern_sched(nlocks, NULL);
+
+ *viup = viu;
+ return rv;
+}
+
+void
+VIFHYPER_SEND(struct virtif_user *viu,
+ struct iovec *iov, size_t iovlen)
+{
+ size_t tlen, i;
+ int nlocks;
+ void *d;
+ char *d0;
+
+ rumpkern_unsched(&nlocks, NULL);
+ /*
+ * netfront doesn't do scatter-gather, so just simply
+ * copy the data into one lump here. drop packet if we
+ * can't allocate temp memory space.
+ */
+ if (iovlen == 1) {
+ d = iov->iov_base;
+ tlen = iov->iov_len;
+ } else {
+ for (i = 0, tlen = 0; i < iovlen; i++) {
+ tlen += iov[i].iov_len;
+ }
+
+ /*
+ * allocate the temp space from RUMPKERN instead of BMK
+ * since there are no huge repercussions if we fail or
+ * succeed.
+ */
+ d = d0 = bmk_memalloc(tlen, 0, BMK_MEMWHO_RUMPKERN);
+ if (d == NULL)
+ goto out;
+
+ for (i = 0; i < iovlen; i++) {
+ bmk_memcpy(d0, iov[i].iov_base, iov[i].iov_len);
+ d0 += iov[i].iov_len;
+ }
+ }
+
+ netfront_xmit(viu->viu_dev, d, tlen);
+
+ if (iovlen != 1)
+ bmk_memfree(d, BMK_MEMWHO_RUMPKERN);
+
+ out:
+ rumpkern_sched(nlocks, NULL);
+}
+
+void
+VIFHYPER_DYING(struct virtif_user *viu)
+{
+
+ viu->viu_dying = 1;
+ if (viu->viu_rcvr)
+ bmk_sched_wake(viu->viu_rcvr);
+}
+
+void
+VIFHYPER_DESTROY(struct virtif_user *viu)
+{
+
+ ASSERT(viu->viu_dying == 1);
+
+ bmk_sched_join(viu->viu_thr);
+ netfront_shutdown(viu->viu_dev);
+ bmk_memfree(viu, BMK_MEMWHO_RUMPKERN);
+}
--- /dev/null
+LIB= rumpxen_xendev
+
+SRCS= xendev_component.c
+SRCS+= busdev.c
+SRCS+= evtdev.c
+SRCS+= privcmd.c
+
+RUMPTOP= ${TOPRUMP}
+
+CPPFLAGS+= -I${RUMPTOP}/librump/rumpkern -I${RUMPTOP}/librump
+CPPFLAGS+= -I${RUMPTOP}/librump/rumpvfs
+CPPFLAGS+= -I${.CURDIR}
+CPPFLAGS+= -I${.CURDIR}/../xen/include -D__RUMP_KERNEL__ -I${.CURDIR}/..
+CPPFLAGS+= -I${.CURDIR}/../../../include
+
+.if ${BUILDRR:Uno} == "true"
+.include "${RUMPRUN_MKCONF}"
+CPPFLAGS+= -I${OBJDIR}/dest.stage/include
+.endif
+
+RUMP_SYM_NORENAME=xenbus_|HYPERVISOR_|minios_|bmk_
+
+.include "${RUMPTOP}/Makefile.rump"
+.include <bsd.lib.mk>
+.include <bsd.klinks.mk>
--- /dev/null
+/*
+ * xenbus_dev.c
+ *
+ * Driver giving user-space access to the kernel's xenbus connection
+ * to xenstore. Adapted heavily from NetBSD's xenbus_dev.c, so much
+ * so that practically none of the original remains.
+ *
+ * Copyright (c) 2014 Citrix
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * (From original xenbus_dev.c:)
+ * Copyright (c) 2005, Christian Limpach
+ * Copyright (c) 2005, Rusty Russell, IBM Corporation
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: $");
+
+#include "rumpxen_xendev.h"
+
+#include <bmk-rumpuser/rumpuser.h>
+
+#define BUFFER_SIZE (XENSTORE_PAYLOAD_MAX+sizeof(struct xsd_sockmsg))
+
+#include <xen/io/xs_wire.h>
+
+#include <mini-os/xenbus.h>
+#include <mini-os/wait.h>
+
+/*----- data structures -----*/
+
+struct xenbus_dev_request {
+ struct xenbus_event xb;
+ uint32_t xb_id, user_id;
+ uint32_t req_type;
+ union {
+ struct xenbus_dev_transaction *trans;
+ struct xenbus_dev_watch *watch;
+ } u;
+};
+
+struct xenbus_dev_transaction {
+ LIST_ENTRY(xenbus_dev_transaction) entry;
+ xenbus_transaction_t tx_id;
+ struct xenbus_dev_request destroy;
+};
+
+struct xenbus_dev_watch {
+ struct xenbus_watch xb;
+ LIST_ENTRY(xenbus_dev_watch) entry;
+ struct xsd_sockmsg *wmsg;
+ char *path, *user_token;
+ _Bool visible_to_user;
+ struct xenbus_dev_request destroy;
+};
+
+struct xenbus_dev_data {
+ kmutex_t lock;
+ int outstanding_requests;
+ LIST_HEAD(, xenbus_dev_transaction) transactions;
+ LIST_HEAD(, xenbus_dev_watch) watches;
+ struct xenbus_event_queue replies; /* Entirely unread by user. */
+
+ _Bool queued_enomem, want_restart;
+
+ /* Partially written request(s). */
+ unsigned int wbuf_used;
+ union {
+ struct xsd_sockmsg msg;
+ unsigned char buffer[BUFFER_SIZE];
+ } wbuf;
+
+ /* Partially read response. */
+ struct xsd_sockmsg *rmsg; /* .id==user_id; data follows */
+ int rmsg_done;
+ void (*rmsg_free)(void*);
+
+ struct selinfo selinfo;
+ /* The lock used for the purposes described in select(9)
+ * is xenbus_req_lock, not d->lock. */
+};
+
+/*----- helpers -----*/
+
+static void
+free_watch(struct xenbus_dev_watch *watch)
+{
+ xbd_free(watch->path);
+ xbd_free(watch->user_token);
+ xbd_free(watch);
+}
+
+static struct xenbus_dev_transaction*
+find_transaction(struct xenbus_dev_data *d, xenbus_transaction_t id)
+{
+ struct xenbus_dev_transaction *trans;
+
+ LIST_FOREACH(trans, &d->transactions, entry)
+ if (trans->tx_id == d->wbuf.msg.tx_id)
+ return trans;
+ /* not found */
+ return 0;
+}
+
+static struct xenbus_dev_watch*
+find_visible_watch(struct xenbus_dev_data *d,
+ const char *path, const char *token)
+{
+ struct xenbus_dev_watch *watch;
+
+ LIST_FOREACH(watch, &d->watches, entry)
+ if (watch->visible_to_user &&
+ !strcmp(path, watch->path) &&
+ !strcmp(token, watch->user_token))
+ return watch;
+ /* not found */
+ return 0;
+}
+
+/*----- request handling (writes to the device) -----*/
+
+static void
+make_request(struct xenbus_dev_data *d, struct xenbus_dev_request *req,
+ uint32_t tx_id, const struct write_req *wreqs, int num_wreqs)
+/* Caller should have filled in req->req_id, ->u, and (if needed)
+ * ->user_id. We deal with ->xb and ->xb_id. */
+{
+ req->xb.watch = 0;
+ req->xb_id = xenbus_id_allocate(&d->replies, &req->xb);
+
+ KASSERT(d->outstanding_requests < INT_MAX);
+ d->outstanding_requests++;
+
+ xenbus_xb_write(req->req_type, req->xb_id, tx_id,
+ wreqs, num_wreqs);
+}
+
+static void
+watch_write_req_string(struct write_req **wreqp, const char *string)
+{
+ struct write_req *wreq = (*wreqp)++;
+ int l = strlen(string);
+ wreq->len = l+1;
+ wreq->data = string;
+}
+
+static void
+make_watch_request(struct xenbus_dev_data *d, struct xenbus_dev_request *req,
+ uint32_t tx_id, struct xenbus_dev_watch *watch)
+{
+ struct write_req wreqs[2], *wreq = wreqs;
+ watch_write_req_string(&wreq, watch->path);
+ watch_write_req_string(&wreq, watch->xb.token);
+ KASSERT((char*)wreq == (char*)wreqs + sizeof(wreqs));
+
+ req->u.watch = watch;
+ make_request(d, req, tx_id, wreqs, 2);
+}
+
+static void
+write_trouble(struct xenbus_dev_data *d, const char *what)
+{
+ printf("xenbus dev: bad write: %s\n", what);
+
+#ifdef RUMP_DEV_XEN_DEBUG
+ {
+ unsigned int i;
+ printf(" %d bytes:", d->wbuf_used);
+ for (i=0; i<d->wbuf_used; i++) {
+ if (!(i & 3)) printf(" ");
+ printf("%02x", d->wbuf.buffer[i]);
+ }
+ printf(".\n");
+ }
+#endif /*RUMP_DEV_XEN_DEBUG*/
+
+ d->wbuf_used = 0; /* discard everything buffered */
+}
+
+/* void __NORETURN__ WTROUBLE(const char *details_without_newline);
+ * assumes: struct xenbus_dev_data *d;
+ * int err;
+ * end: */
+#define WTROUBLE(s) do{ write_trouble(d,s); err = EINVAL; goto end; }while(0)
+
+static void
+forward_request(struct xenbus_dev_data *d, struct xenbus_dev_request *req)
+{
+ struct write_req wreq = {
+ d->wbuf.buffer + sizeof(d->wbuf.msg),
+ d->wbuf_used - sizeof(d->wbuf.msg),
+ };
+
+ make_request(d, req, d->wbuf.msg.tx_id, &wreq, 1);
+}
+
+static _Bool
+watch_message_parse_string(const char **p, const char *end,
+ const char **string_r)
+{
+ const char *nul = memchr(*p, 0, end - *p);
+ if (!nul)
+ return 0;
+
+ *string_r = *p;
+ *p = nul+1;
+
+ return 1;
+}
+
+static _Bool
+watch_message_parse(const struct xsd_sockmsg *msg,
+ const char **path_r, const char **token_r)
+{
+ const char *begin = (const char*)msg;
+ const char *p = begin + sizeof(*msg);
+ const char *end = p + msg->len;
+ KASSERT(p <= end);
+
+ return
+ watch_message_parse_string(&p, end, path_r) &&
+ watch_message_parse_string(&p, end, token_r);
+}
+
+static int
+process_request(struct xenbus_dev_data *d)
+{
+ struct xenbus_dev_request *req;
+ struct xenbus_dev_transaction *trans;
+ struct xenbus_dev_watch *watch_free = 0, *watch;
+ const char *wpath, *wtoken;
+ int err;
+
+ DPRINTF(("/dev/xen/xenbus: request, type=%d\n",
+ d->wbuf.msg.type));
+
+ req = xbd_malloc(sizeof(*req));
+ if (!req) {
+ err = ENOMEM;
+ goto end;
+ }
+ req->user_id = d->wbuf.msg.req_id;
+ req->req_type = d->wbuf.msg.type;
+
+ switch (d->wbuf.msg.type) {
+ case XS_DIRECTORY:
+ case XS_READ:
+ case XS_GET_PERMS:
+ case XS_GET_DOMAIN_PATH:
+ case XS_IS_DOMAIN_INTRODUCED:
+ case XS_WRITE:
+ case XS_MKDIR:
+ case XS_RM:
+ case XS_SET_PERMS:
+ if (d->wbuf.msg.tx_id) {
+ if (!find_transaction(d, d->wbuf.msg.tx_id))
+ WTROUBLE("unknown transaction");
+ }
+ forward_request(d, req);
+ break;
+
+ case XS_TRANSACTION_START:
+ if (d->wbuf.msg.tx_id)
+ WTROUBLE("nested transaction");
+ req->u.trans = xbd_malloc(sizeof(*req->u.trans));
+ if (!req->u.trans) {
+ err = ENOMEM;
+ goto end;
+ }
+ forward_request(d, req);
+ break;
+
+ case XS_TRANSACTION_END:
+ if (!d->wbuf.msg.tx_id)
+ WTROUBLE("ending zero transaction");
+ req->u.trans = trans = find_transaction(d, d->wbuf.msg.tx_id);
+ if (!trans)
+ WTROUBLE("ending unknown transaction");
+ LIST_REMOVE(trans, entry); /* prevent more reqs using it */
+ forward_request(d, req);
+ break;
+
+ case XS_WATCH:
+ if (d->wbuf.msg.tx_id)
+ WTROUBLE("XS_WATCH with transaction");
+ if (!watch_message_parse(&d->wbuf.msg, &wpath, &wtoken))
+ WTROUBLE("bad XS_WATCH message");
+
+ watch = watch_free = xbd_malloc(sizeof(*watch));
+ if (!watch) {
+ err = ENOMEM;
+ goto end;
+ }
+
+ watch->path = xbd_strdup(wpath);
+ watch->user_token = xbd_strdup(wtoken);
+ if (!watch->path || !watch->user_token) {
+ err = ENOMEM;
+ goto end;
+ }
+
+ watch->xb.events = &d->replies;
+ xenbus_watch_prepare(&watch->xb);
+
+ watch_free = 0; /* we are committed */
+ watch->visible_to_user = 0;
+ LIST_INSERT_HEAD(&d->watches, watch, entry);
+ make_watch_request(d, req, d->wbuf.msg.tx_id, watch);
+ break;
+
+ case XS_UNWATCH:
+ if (d->wbuf.msg.tx_id)
+ WTROUBLE("XS_UNWATCH with transaction");
+ if (!watch_message_parse(&d->wbuf.msg, &wpath, &wtoken))
+ WTROUBLE("bad XS_WATCH message");
+
+ watch = find_visible_watch(d, wpath, wtoken);
+ if (!watch)
+ WTROUBLE("unwatch nonexistent watch");
+
+ watch->visible_to_user = 0;
+ make_watch_request(d, req, d->wbuf.msg.tx_id, watch);
+ break;
+
+ default:
+ WTROUBLE("unknown request message type");
+ }
+
+ err = 0;
+end:
+ if (watch_free)
+ free_watch(watch_free);
+ return err;
+}
+
+static int
+xenbus_dev_write(struct file *fp, off_t *offset, struct uio *uio,
+ kauth_cred_t cred, int flags)
+{
+ struct xenbus_dev_data *d = fp->f_data;
+ int err;
+
+ DPRINTF(("/dev/xen/xenbus: write...\n"));
+
+ if (uio->uio_offset < 0)
+ return EINVAL;
+
+ mutex_enter(&d->lock);
+
+ for (;;) { /* keep reading more until we're done */
+
+ if (!uio->uio_resid)
+ break;
+
+ uio->uio_offset = d->wbuf_used;
+ err = uiomove(d->wbuf.buffer + d->wbuf_used,
+ sizeof(d->wbuf.buffer) - d->wbuf_used,
+ uio);
+ d->wbuf_used = uio->uio_offset;
+ if (err)
+ goto end;
+
+ for (;;) { /* process message(s) in the buffer */
+
+ if (d->wbuf_used < sizeof(d->wbuf.msg))
+ break;
+
+ if (d->wbuf.msg.len > XENSTORE_PAYLOAD_MAX)
+ WTROUBLE("too much payload in packet");
+
+ uint32_t packetlen =
+ d->wbuf.msg.len + sizeof(d->wbuf.msg);
+
+ KASSERT(packetlen <= sizeof(d->wbuf.buffer));
+
+ if (d->wbuf_used < packetlen)
+ break;
+
+ err = process_request(d);
+
+ if (d->wbuf_used) {
+ /* Remove from the buffer before checking
+ * for errors - but some errors may have
+ * emptied the buffer already. */
+ d->wbuf_used -= packetlen;
+ memmove(d->wbuf.buffer,
+ d->wbuf.buffer + packetlen,
+ d->wbuf_used);
+ }
+
+ if (err)
+ goto end;
+ }
+ }
+
+ err = 0;
+end:
+ mutex_exit(&d->lock);
+
+ DPRINTF(("/dev/xen/xenbus: write done, err=%d\n", err));
+ return err;
+}
+
+/*----- response and watch event handling (reads from the device) -----*/
+
+static struct xsd_sockmsg*
+process_watch_event(struct xenbus_dev_data *d, struct xenbus_event *event,
+ struct xenbus_dev_watch *watch,
+ void (**mfree_r)(void*))
+{
+
+ /* We need to make a new XS_WATCH_EVENT message because the
+ * one from xenstored (a) isn't visible to us here and (b)
+ * anyway has the wrong token in it. */
+
+ DPRINTF(("/dev/xen/xenbus: watch event,"
+ " wpath=%s user_token=%s epath=%s xb.token=%s\n",
+ watch->path, watch->user_token,
+ event->path, watch->xb.token));
+
+ /* Define the parts of the message */
+
+#define WATCH_MESSAGE_PART_STRING(PART,x) \
+ PART(strlen((x)) + 1, memcpy(p, (x), sz))
+
+#define WATCH_MESSAGE_PARTS(PART) \
+ PART(sizeof(struct xsd_sockmsg), (void)0) \
+ WATCH_MESSAGE_PART_STRING(PART,event->path) \
+ WATCH_MESSAGE_PART_STRING(PART,watch->user_token)
+
+ /* Compute the size */
+
+ size_t totalsz = 0;
+ size_t sz = 0;
+
+#define WATCH_MESSAGE_PART_ADD_SIZE(calcpartsz, fill) \
+ totalsz += (calcpartsz);
+
+ WATCH_MESSAGE_PARTS(WATCH_MESSAGE_PART_ADD_SIZE);
+
+ DPRINTF(("/dev/xen/xenbus: watch event allocating %lu\n",
+ (unsigned long)totalsz));
+
+ /* Allocate it and fill in the header */
+
+ struct xsd_sockmsg *reply = xbd_malloc(totalsz);
+ if (!reply) {
+ printf("xenbus dev: out of memory for watch event"
+ " wpath=`%s' epath=`%s'\n",
+ watch->path, event->path);
+ d->queued_enomem = 1;
+ goto end;
+ }
+
+ bzero(reply, sizeof(*reply));
+ reply->type = XS_WATCH_EVENT;
+ reply->len = totalsz - sizeof(*reply);
+
+ char *p = (void*)reply;
+
+ /* Fill in the rest of the message */
+
+#define WATCH_MESSAGE_PART_ADD(calcpartsz, fill) \
+ sz = (calcpartsz); \
+ fill; \
+ p += sz;
+
+ WATCH_MESSAGE_PARTS(WATCH_MESSAGE_PART_ADD);
+
+ KASSERT(p == (const char*)reply + totalsz);
+
+ /* Now we are done */
+
+end:
+ xenbus_free(event);
+ *mfree_r = xbd_free;
+ return reply;
+}
+
+/* Returned value is from malloc() */
+static struct xsd_sockmsg*
+process_response(struct xenbus_dev_data *d, struct xenbus_dev_request *req,
+ void (**mfree_r)(void*))
+{
+ struct xenbus_dev_watch *watch;
+ struct xsd_sockmsg *msg = req->xb.reply;
+
+ msg->req_id = req->user_id;
+
+ _Bool error = msg->type == XS_ERROR;
+ KASSERT(error || msg->type == req->req_type);
+
+ DPRINTF(("/dev/xen/xenbus: response, req_type=%d msg->type=%d\n",
+ req->req_type, msg->type));
+
+ switch (req->req_type) {
+
+ case XS_TRANSACTION_START:
+ if (error)
+ break;
+ KASSERT(msg->len >= 2);
+ KASSERT(!((uint8_t*)(msg+1))[msg->len-1]);
+ req->u.trans->tx_id =
+ strtoul((char*)&msg + sizeof(*msg),
+ 0, 0);
+ LIST_INSERT_HEAD(&d->transactions, req->u.trans,
+ entry);
+ break;
+
+ case XS_TRANSACTION_END:
+ xbd_free(req->u.trans);
+ break;
+
+ case XS_WATCH:
+ watch = req->u.watch;
+ if (error)
+ goto do_unwatch;
+ watch->visible_to_user = 1;
+ break;
+
+ case XS_UNWATCH:
+ KASSERT(!error);
+ watch = req->u.watch;
+ do_unwatch:
+ KASSERT(!watch->visible_to_user);
+ LIST_REMOVE(watch, entry);
+ xenbus_watch_release(&watch->xb);
+ free_watch(watch);
+ break;
+
+ }
+
+ xenbus_id_release(req->xb_id);
+ xbd_free(req);
+ KASSERT(d->outstanding_requests > 0);
+ d->outstanding_requests--;
+
+ *mfree_r = xenbus_free;
+ return msg;
+}
+
+static struct xsd_sockmsg*
+process_event(struct xenbus_dev_data *d, struct xenbus_event *event,
+ void (**mfree_r)(void*))
+{
+ if (event->watch) {
+ struct xenbus_dev_watch *watch =
+ container_of(event->watch, struct xenbus_dev_watch, xb);
+
+ return process_watch_event(d, event, watch, mfree_r);
+
+ } else {
+ struct xenbus_dev_request *req =
+ container_of(event, struct xenbus_dev_request, xb);
+
+ return process_response(d, req, mfree_r);
+ }
+
+}
+
+static struct xsd_sockmsg*
+next_event_msg(struct xenbus_dev_data *d, struct file *fp, int *err_r,
+ void (**mfree_r)(void*))
+/* If !err_r, always blocks and always returns successfully.
+ * If !!err_r, will block iff user process read should block:
+ * will either return successfully, or set *err_r and return 0.
+ *
+ * Must be called with d->lock held; may temporarily release it. */
+{
+ int nlocks;
+ DEFINE_WAIT(w);
+ spin_lock(&xenbus_req_lock);
+
+ while (STAILQ_EMPTY(&d->replies.events)) {
+ if (err_r) {
+ if (d->want_restart) {
+ *err_r = ERESTART;
+ goto fail;
+ }
+ if (fp->f_flag & FNONBLOCK) {
+ *err_r = EAGAIN;
+ goto fail;
+ }
+ }
+
+ DPRINTF(("/dev/xen/xenbus: about to block err_r=%p\n", err_r));
+
+ minios_add_waiter(w, d->replies.waitq);
+ spin_unlock(&xenbus_req_lock);
+ mutex_exit(&d->lock);
+ rumpkern_unsched(&nlocks, 0);
+
+ minios_wait(w);
+
+ rumpkern_sched(nlocks, 0);
+ mutex_enter(&d->lock);
+ spin_lock(&xenbus_req_lock);
+ minios_remove_waiter(w, d->replies.waitq);
+ }
+ struct xenbus_event *event = STAILQ_FIRST(&d->replies.events);
+ STAILQ_REMOVE_HEAD(&d->replies.events, entry);
+
+ spin_unlock(&xenbus_req_lock);
+
+ DPRINTF(("/dev/xen/xenbus: next_event_msg found an event %p\n",event));
+ return process_event(d, event, mfree_r);
+
+fail:
+ DPRINTF(("/dev/xen/xenbus: not blocking, returning %d\n", *err_r));
+ spin_unlock(&xenbus_req_lock);
+ return 0;
+}
+
+static int
+xenbus_dev_read(struct file *fp, off_t *offset, struct uio *uio,
+ kauth_cred_t cred, int flags)
+{
+ struct xenbus_dev_data *d = fp->f_data;
+ size_t org_resid = uio->uio_resid;
+ int err;
+
+ DPRINTF(("/dev/xen/xenbus: read...\n"));
+ mutex_enter(&d->lock);
+
+ for (;;) {
+ DPRINTF(("/dev/xen/xenbus: read... uio_resid=%lu (org=%lu)"
+ " q.enomem=%d\n",
+ (unsigned long)uio->uio_resid,
+ (unsigned long)org_resid,
+ d->queued_enomem));
+ if (d->queued_enomem) {
+ if (org_resid != uio->uio_resid)
+ /* return early now; report it next time */
+ break;
+ err = ENOMEM;
+ d->queued_enomem = 0;
+ goto end;
+ }
+
+ if (!uio->uio_resid)
+ /* done what we have been asked to do */
+ break;
+
+ if (!d->rmsg) {
+ d->rmsg = next_event_msg(d, fp, &err, &d->rmsg_free);
+ if (!d->rmsg) {
+ if (uio->uio_resid != org_resid)
+ /* Done something, claim success. */
+ break;
+ goto end;
+ }
+ }
+
+ uint32_t avail = sizeof(*d->rmsg) + d->rmsg->len;
+ KASSERT(avail < BUFFER_SIZE*2); /* sanity check */
+ KASSERT(avail > 0);
+ KASSERT(d->rmsg_done <= avail);
+
+ DPRINTF(("/dev/xen/xenbus: read... rmsg->len=%lu"
+ " msg_done=%lu avail=%lu\n",
+ (unsigned long)d->rmsg->len,
+ (unsigned long)d->rmsg_done,
+ (unsigned long)avail));
+
+ uio->uio_offset = d->rmsg_done;
+ err = uiomove((char*)d->rmsg + d->rmsg_done,
+ avail - d->rmsg_done,
+ uio);
+ d->rmsg_done = uio->uio_offset;
+ if (err)
+ goto end;
+
+ if (d->rmsg_done == avail) {
+ DPRINTF(("/dev/xen/xenbus: read... msg complete\n"));
+ d->rmsg_free(d->rmsg);
+ d->rmsg = 0;
+ d->rmsg_done = 0;
+ }
+ }
+
+ err = 0;
+
+end:
+ mutex_exit(&d->lock);
+ DPRINTF(("/dev/xen/xenbus: read done, err=%d\n", err));
+ return err;
+}
+
+/*----- more exciting reading -----*/
+
+#define RBITS (POLLIN | POLLRDNORM)
+#define WBITS (POLLOUT | POLLWRNORM)
+
+static void
+xenbus_dev_xb_wakeup(struct xenbus_event_queue *queue)
+{
+ /* called with req_lock held */
+ DPRINTF(("/dev/xen/xenbus: wakeup\n"));
+ struct xenbus_dev_data *d =
+ container_of(queue, struct xenbus_dev_data, replies);
+ minios_wake_up(&d->replies.waitq);
+ selnotify(&d->selinfo, RBITS, NOTE_SUBMIT);
+}
+
+static void
+xenbus_dev_restart(file_t *fp)
+{
+ struct xenbus_dev_data *d = fp->f_data;
+
+ DPRINTF(("/dev/xen/xenbus: restart!\n"));
+
+ mutex_enter(&d->lock);
+ spin_lock(&xenbus_req_lock);
+
+ d->want_restart |= 1;
+ minios_wake_up(&d->replies.waitq);
+
+ spin_unlock(&xenbus_req_lock);
+ mutex_exit(&d->lock);
+}
+
+static int
+xenbus_dev_poll(struct file *fp, int events)
+{
+ struct xenbus_dev_data *d = fp->f_data;
+ int revents = 0;
+
+ DPRINTF(("/dev/xen/xenbus: poll events=0%o...\n", events));
+
+ mutex_enter(&d->lock);
+ spin_lock(&xenbus_req_lock);
+
+ /* always writeable - we don't do proper blocking for writing
+ * since this can only wait at most until other requests have
+ * been handled by xenstored */
+ revents |= events & WBITS;
+
+ if (events & RBITS)
+ if (d->rmsg || d->queued_enomem || d->want_restart)
+ revents |= events & RBITS;
+
+ if (!revents) {
+ if (events & RBITS)
+ selrecord(curlwp, &d->selinfo);
+ }
+
+ spin_unlock(&xenbus_req_lock);
+ mutex_exit(&d->lock);
+
+ DPRINTF(("/dev/xen/xenbus: poll events=0%o done, revents=0%o\n",
+ events, revents));
+ return revents;
+}
+
+/*----- setup etc. -----*/
+
+static int
+xenbus_dev_close(struct file *fp)
+{
+ struct xenbus_dev_data *d = fp->f_data;
+
+ DPRINTF(("/dev/xen/xenbus: close...\n"));
+
+ /* Not neeeded against concurrent access (we assume!)
+ * but next_event_msg will want to unlock and relock it */
+ mutex_enter(&d->lock);
+
+ xbd_free(d->rmsg);
+ d->rmsg = 0;
+
+ for (;;) {
+ DPRINTF(("/dev/xen/xenbus: close loop\n"));
+ /* We need to go round this again and again because
+ * there might be requests in flight. Eg if the
+ * user has an XS_WATCH in flight we have to wait for it
+ * to be done and then unwatch it again. */
+
+ struct xenbus_dev_watch *watch, *watch_tmp;
+ LIST_FOREACH_SAFE(watch, &d->watches, entry, watch_tmp) {
+ DPRINTF(("/dev/xen/xenbus: close watch %p %d\n",
+ watch, watch->visible_to_user));
+ if (watch->visible_to_user) {
+ /* mirrors process_request XS_UNWATCH */
+ watch->destroy.req_type = XS_UNWATCH;
+ watch->visible_to_user = 0;
+ make_watch_request(d, &watch->destroy, 0,
+ watch);
+ }
+ }
+
+ struct xenbus_dev_transaction *trans, *trans_tmp;
+ const struct write_req trans_end_data = { "F", 2 };
+ LIST_FOREACH_SAFE(trans, &d->transactions, entry, trans_tmp) {
+ DPRINTF(("/dev/xen/xenbus: close transaction"
+ " %p %"PRIx32"\n",
+ trans, (unsigned int)trans->tx_id));
+ /* mirrors process_request XS_TRANSACTION_END */
+ trans->destroy.req_type = XS_TRANSACTION_END;
+ trans->destroy.u.trans = trans;
+ LIST_REMOVE(trans, entry);
+ make_request(d, &trans->destroy, trans->tx_id,
+ &trans_end_data, 1);
+ }
+
+ DPRINTF(("/dev/xen/xenbus: close outstanding=%d\n",
+ d->outstanding_requests));
+ KASSERT(d->outstanding_requests >= 0);
+ if (!d->outstanding_requests)
+ break;
+
+ void (*dfree)(void*);
+ struct xsd_sockmsg *discard = next_event_msg(d, fp, 0, &dfree);
+ KASSERT(discard);
+ dfree(discard);
+ }
+
+ KASSERT(!d->outstanding_requests);
+ KASSERT(!d->rmsg);
+ KASSERT(LIST_EMPTY(&d->transactions));
+ KASSERT(LIST_EMPTY(&d->watches));
+
+ DPRINTF(("/dev/xen/xenbus: close seldestroy outstanding=%d\n",
+ d->outstanding_requests));
+ seldestroy(&d->selinfo);
+ xbd_free(d);
+
+ DPRINTF(("/dev/xen/xenbus: close done.\n"));
+ return 0;
+}
+
+const struct fileops xenbus_dev_fileops = {
+ .fo_read = xenbus_dev_read,
+ .fo_write = xenbus_dev_write,
+ .fo_ioctl = fbadop_ioctl,
+ .fo_fcntl = fnullop_fcntl,
+ .fo_poll = xenbus_dev_poll,
+ .fo_stat = fbadop_stat,
+ .fo_close = xenbus_dev_close,
+ .fo_kqfilter = fnullop_kqfilter,
+ .fo_restart = xenbus_dev_restart,
+};
+
+int
+xenbus_dev_open(struct file *fp, void **fdata_r)
+{
+ struct xenbus_dev_data *d;
+
+ d = xbd_malloc(sizeof(*d));
+ if (!d)
+ return ENOMEM;
+
+ mutex_init(&d->lock, MUTEX_DEFAULT, IPL_HIGH);
+ d->outstanding_requests = 0;
+ LIST_INIT(&d->transactions);
+ LIST_INIT(&d->watches);
+ xenbus_event_queue_init(&d->replies);
+ d->replies.wakeup = xenbus_dev_xb_wakeup;
+ d->queued_enomem = 0;
+ d->want_restart = 0;
+ d->wbuf_used = 0;
+ d->rmsg = 0;
+ d->rmsg_done = 0;
+ selinit(&d->selinfo);
+
+ *fdata_r = d;
+ return 0;
+}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
--- /dev/null
+/*
+ * evtdev.c
+ *
+ * Driver giving user-space access to the kernel's event channel.
+ *
+ * Copyright (c) 2015 Wei Liu <wei.liu2@citrix.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <sys/atomic.h>
+#include <sys/kmem.h>
+#include <sys/resource.h>
+#include <sys/vnode.h>
+
+#include "rumpxen_xendev.h"
+
+#include <bmk-rumpuser/rumpuser.h>
+#include <bmk-core/memalloc.h>
+
+#include <mini-os/events.h>
+#include <mini-os/wait.h>
+
+/* For ioctl interface. */
+#include "xenio3.h"
+
+/*----- data structures -----*/
+#define EVTDEV_RING_SIZE 2048
+#define EVTDEV_RING_MASK 2047
+#define BYTES_PER_PORT 4
+
+/* See the rump_evtdev_callback for locking information */
+u_int xenevt_ring[EVTDEV_RING_SIZE];
+u_int xenevt_ring_prod, xenevt_ring_cons;
+
+struct xenevt_dev_data {
+ u_int ring[EVTDEV_RING_SIZE];
+ u_int ring_cons;
+ u_int ring_prod;
+#define EVTDEV_F_OVERFLOW 0x1 /* ring overflow */
+ u_int flags;
+
+ kmutex_t lock;
+ kcondvar_t cv;
+ struct selinfo selinfo; /* used by poll, see select(9) */
+};
+
+/* Kernel event -> device instance mapping */
+static kmutex_t devevent_lock;
+static struct xenevt_dev_data *devevents[NR_EVENT_CHANNELS];
+
+/*----- helpers -----*/
+#define WBITS (POLLOUT | POLLWRNORM)
+#define RBITS (POLLIN | POLLRDNORM)
+
+/* call with d->lock held */
+static void queue(struct xenevt_dev_data *d, u_int port)
+{
+ KASSERT(mutex_owned(&d->lock));
+
+ if (d->ring_cons == ((d->ring_prod + 1) & EVTDEV_RING_MASK)) {
+ d->flags |= EVTDEV_F_OVERFLOW;
+ printf("evtdev: ring overflow port %d\n", port);
+ } else {
+ d->ring[d->ring_prod] = port;
+ membar_producer();
+ d->ring_prod = (d->ring_prod + 1) & EVTDEV_RING_MASK;
+ }
+ /* notify */
+ cv_signal(&d->cv);
+ selnotify(&d->selinfo, RBITS, NOTE_SUBMIT);
+}
+
+/* This callback is serialised by mini-os */
+static void rump_evtdev_callback(u_int port)
+{
+ if (xenevt_ring_cons == ((xenevt_ring_prod + 1) & EVTDEV_RING_MASK)) {
+ printf("xenevt driver ring overflowed!\n");
+ } else {
+ xenevt_ring[xenevt_ring_prod] = port;
+ membar_producer();
+ xenevt_ring_prod = (xenevt_ring_prod + 1) & EVTDEV_RING_MASK;
+ }
+
+ minios_wake_up(&minios_events_waitq);
+}
+
+static void xenevt_thread_func(void *ign)
+{
+ u_int prod = xenevt_ring_prod;
+ u_int cons;
+
+ /* give us a rump kernel context */
+ rumpuser__hyp.hyp_schedule();
+ rumpuser__hyp.hyp_lwproc_newlwp(0);
+ rumpuser__hyp.hyp_unschedule();
+
+ for (;;) {
+ minios_wait_event(minios_events_waitq, xenevt_ring_prod != prod);
+ prod = xenevt_ring_prod;
+ cons = xenevt_ring_cons;
+
+ membar_sync();
+
+ while (cons != prod) {
+ u_int port = xenevt_ring[cons];
+ struct xenevt_dev_data *d;
+
+ KASSERT(port < NR_EVENT_CHANNELS);
+
+ mutex_enter(&devevent_lock);
+
+ d = devevents[port];
+
+ KASSERT(d);
+
+ mutex_enter(&d->lock);
+
+ queue(d, port);
+
+ mutex_exit(&d->lock);
+ mutex_exit(&devevent_lock);
+
+ cons++;
+ }
+
+ membar_sync();
+
+ xenevt_ring_cons = cons;
+ }
+}
+
+/*----- request handling (writes to the device) -----*/
+static int
+xenevt_dev_write(struct file *fp, off_t *offset, struct uio *uio,
+ kauth_cred_t cred, int flags)
+{
+ struct xenevt_dev_data *d = fp->f_data;
+ uint16_t *chans = NULL;
+ int i, nentries, err;
+ size_t size = 0;
+
+ DPRINTF(("/dev/xenevt: write...\n"));
+
+ if (uio->uio_resid == 0) {
+ err = 0;
+ goto out;
+ }
+
+ nentries = uio->uio_resid / sizeof(uint16_t);
+ if (nentries > NR_EVENT_CHANNELS) {
+ err = EMSGSIZE;
+ goto out;
+ }
+
+ size = nentries * sizeof(uint16_t);
+ chans = kmem_alloc(size, KM_SLEEP);
+
+ err = uiomove(chans, uio->uio_resid, uio);
+ if (err) goto out;
+
+ mutex_enter(&devevent_lock);
+ for (i = 0; i < nentries; i++) {
+ if (chans[i] < NR_EVENT_CHANNELS &&
+ devevents[chans[i]] == d)
+ minios_unmask_evtchn(chans[i]);
+ }
+ mutex_exit(&devevent_lock);
+
+ KASSERT(err == 0);
+out:
+ DPRINTF(("/dev/xenevt: write done, err=%d\n", err));
+ if (size) kmem_free(chans, size);
+ return err;
+}
+
+static int
+xenevt_dev_read(struct file *fp, off_t *offset, struct uio *uio,
+ kauth_cred_t cred, int read_flags)
+{
+ struct xenevt_dev_data *d = fp->f_data;
+ u_int cons, prod, len, uio_len;
+ int err;
+
+ DPRINTF(("/dev/xenevt: read...\n"));
+
+ mutex_enter(&d->lock);
+
+ err = 0;
+ while (err == 0) {
+ cons = d->ring_cons;
+ prod = d->ring_prod;
+
+ if (cons != prod) break; /* data available */
+
+ if (d->flags & EVTDEV_F_OVERFLOW) break;
+
+ /* nothing to read */
+ if ((fp->f_flag & FNONBLOCK) == 0)
+ err = cv_wait_sig(&d->cv, &d->lock);
+ else
+ err = EAGAIN;
+ }
+
+ if (err == 0 && (d->flags & EVTDEV_F_OVERFLOW))
+ err = EFBIG;
+
+ if (err) goto out;
+
+ uio_len = uio->uio_resid / BYTES_PER_PORT;
+ if (cons <= prod)
+ len = prod - cons;
+ else
+ len = EVTDEV_RING_SIZE - cons;
+ if (len > uio_len)
+ len = uio_len;
+ err = uiomove(&d->ring[cons], len * BYTES_PER_PORT, uio);
+ if (err) goto out;
+
+ cons = (cons + len) & EVTDEV_RING_MASK;
+ uio_len = uio->uio_resid / BYTES_PER_PORT;
+ if (uio_len == 0) goto done;
+
+ /* ring wrapped */
+ len = prod - cons;
+ if (len > uio_len)
+ len = uio_len;
+ err = uiomove(&d->ring[cons], len * BYTES_PER_PORT, uio);
+ if (err) goto out;
+ cons = (cons + len) & EVTDEV_RING_MASK;
+
+done:
+ d->ring_cons = cons;
+out:
+ mutex_exit(&d->lock);
+ DPRINTF(("/dev/xenevt: read done, err=%d\n", err));
+ return err;
+}
+
+/*----- more exciting reading -----*/
+static int
+xenevt_dev_poll(struct file *fp, int events)
+{
+ struct xenevt_dev_data *d = fp->f_data;
+ int revents = 0;
+
+ DPRINTF(("/dev/xenevt: poll events=0x%x...\n", events));
+
+ mutex_enter(&d->lock);
+
+ /* always writable because write is used to unmask event
+ * channel */
+ revents |= events & WBITS;
+
+ if ((events & RBITS) && (d->ring_prod != d->ring_cons))
+ revents |= events & RBITS;
+
+ /* in the case caller only interests in read but no data
+ * available to read */
+ if (!revents && (events & RBITS))
+ selrecord(curlwp, &d->selinfo);
+
+ mutex_exit(&d->lock);
+ DPRINTF(("/dev/xenevt: poll events=0x%x done, revents=0x%x\n",
+ events, revents));
+ return revents;
+}
+
+static int
+xenevt_dev_ioctl(struct file *fp, ulong cmd, void *data)
+{
+ struct xenevt_dev_data *d = fp->f_data;
+ int err;
+
+ switch (cmd) {
+ case IOCTL_EVTCHN_RESET:
+ {
+ mutex_enter(&d->lock);
+ d->ring_cons = d->ring_prod = 0;
+ d->flags = 0;
+ mutex_exit(&d->lock);
+ break;
+ }
+ case IOCTL_EVTCHN_BIND_VIRQ:
+ {
+ struct ioctl_evtchn_bind_virq *bind_virq = data;
+ evtchn_bind_virq_t op;
+
+ op.virq = bind_virq->virq;
+ op.vcpu = 0;
+ if ((err = minios_event_channel_op(EVTCHNOP_bind_virq, &op))) {
+ printf("IOCTL_EVTCHN_BIND_VIRQ failed: virq %d error %d\n",
+ bind_virq->virq, err);
+ return -err;
+ }
+ bind_virq->port = op.port;
+ mutex_enter(&devevent_lock);
+ KASSERT(devevents[bind_virq->port] == NULL);
+ devevents[bind_virq->port] = d;
+ mutex_exit(&devevent_lock);
+ minios_bind_evtchn(bind_virq->port, minios_evtdev_handler, d);
+ minios_unmask_evtchn(bind_virq->port);
+
+ break;
+ }
+ case IOCTL_EVTCHN_BIND_INTERDOMAIN:
+ {
+ struct ioctl_evtchn_bind_interdomain *bind_intd = data;
+ evtchn_bind_interdomain_t op;
+
+ op.remote_dom = bind_intd->remote_domain;
+ op.remote_port = bind_intd->remote_port;
+ if ((err = minios_event_channel_op(EVTCHNOP_bind_interdomain, &op))) {
+ printf("IOCTL_EVTCHN_BIND_INTERDOMAIN failed: "
+ "remote domain %d port %d error %d\n",
+ bind_intd->remote_domain, bind_intd->remote_port, err);
+ return -err;
+ }
+ bind_intd->port = op.local_port;
+ mutex_enter(&devevent_lock);
+ KASSERT(devevents[bind_intd->port] == NULL);
+ devevents[bind_intd->port] = d;
+ mutex_exit(&devevent_lock);
+ minios_bind_evtchn(bind_intd->port, minios_evtdev_handler, d);
+ minios_unmask_evtchn(bind_intd->port);
+
+ break;
+ }
+ case IOCTL_EVTCHN_BIND_UNBOUND_PORT:
+ {
+ struct ioctl_evtchn_bind_unbound_port *bind_unbound = data;
+ evtchn_alloc_unbound_t op;
+
+ op.dom = DOMID_SELF;
+ op.remote_dom = bind_unbound->remote_domain;
+ if ((err = minios_event_channel_op(EVTCHNOP_alloc_unbound, &op))) {
+ printf("IOCTL_EVTCHN_BIND_UNBOUND_PORT failed: "
+ "remote domain %d error %d\n",
+ bind_unbound->remote_domain, err);
+ return -err;
+ }
+ bind_unbound->port = op.port;
+ mutex_enter(&devevent_lock);
+ KASSERT(devevents[bind_unbound->port] == NULL);
+ devevents[bind_unbound->port] = d;
+ mutex_exit(&devevent_lock);
+ minios_bind_evtchn(bind_unbound->port, minios_evtdev_handler, d);
+ minios_unmask_evtchn(bind_unbound->port);
+
+ break;
+ }
+ case IOCTL_EVTCHN_UNBIND:
+ {
+ struct ioctl_evtchn_unbind *unbind = data;
+
+ if (unbind->port >= NR_EVENT_CHANNELS)
+ return EINVAL;
+ mutex_enter(&devevent_lock);
+ if (devevents[unbind->port] != d) {
+ mutex_exit(&devevent_lock);
+ return ENOTCONN;
+ }
+ devevents[unbind->port] = NULL;
+ mutex_exit(&devevent_lock);
+ minios_mask_evtchn(unbind->port);
+ minios_unbind_evtchn(unbind->port);
+
+ break;
+ }
+ case IOCTL_EVTCHN_NOTIFY:
+ {
+ struct ioctl_evtchn_notify *notify = data;
+
+ if (notify->port >= NR_EVENT_CHANNELS)
+ return EINVAL;
+ mutex_enter(&devevent_lock);
+ if (devevents[notify->port] != d) {
+ mutex_exit(&devevent_lock);
+ return ENOTCONN;
+ }
+ minios_notify_remote_via_evtchn(notify->port);
+ mutex_exit(&devevent_lock);
+
+ break;
+ }
+ default:
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+/*----- setup etc. -----*/
+
+static int
+xenevt_dev_close(struct file *fp)
+{
+ struct xenevt_dev_data *d = fp->f_data;
+ int i;
+
+ DPRINTF(("/dev/xenevt: close...\n"));
+
+ mutex_enter(&devevent_lock);
+ mutex_enter(&d->lock);
+ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
+ if (devevents[i] == d) {
+ minios_unbind_evtchn(i);
+ devevents[i] = NULL;
+ }
+ }
+ mutex_exit(&d->lock);
+ mutex_exit(&devevent_lock);
+
+ seldestroy(&d->selinfo);
+ mutex_destroy(&d->lock);
+ kmem_free(d, sizeof(*d));
+ cv_destroy(&d->cv);
+
+ DPRINTF(("/dev/xenevt: close done.\n"));
+
+ fp->f_data = NULL;
+
+ return 0;
+}
+
+const struct fileops xenevt_dev_fileops = {
+ .fo_read = xenevt_dev_read,
+ .fo_write = xenevt_dev_write,
+ .fo_ioctl = xenevt_dev_ioctl,
+ .fo_fcntl = fnullop_fcntl,
+ .fo_poll = xenevt_dev_poll,
+ .fo_stat = fbadop_stat,
+ .fo_close = xenevt_dev_close,
+ .fo_kqfilter = fnullop_kqfilter,
+ .fo_restart = fnullop_restart,
+};
+
+int
+xenevt_dev_open(struct file *fp, void **fdata_r)
+{
+ struct xenevt_dev_data *d;
+
+ d = kmem_zalloc(sizeof(*d), KM_SLEEP);
+
+ mutex_init(&d->lock, MUTEX_DEFAULT, IPL_HIGH);
+ selinit(&d->selinfo);
+ cv_init(&d->cv, "xenevt");
+
+ *fdata_r = d;
+ return 0;
+}
+
+void xenevt_dev_init(void)
+{
+ mutex_init(&devevent_lock, MUTEX_DEFAULT, IPL_NONE);
+ minios_events_register_rump_callback(rump_evtdev_callback);
+ bmk_sched_create("xenevt", NULL, 0, xenevt_thread_func, NULL,
+ NULL, 0);
+}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
--- /dev/null
+/* $NetBSD: privcmd.c,v 1.49 2014/10/17 16:37:02 christos Exp $ */
+
+/*-
+ * Copyright (c) 2004 Christian Limpach.
+ * Copyright (c) 2015 Wei Liu.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: privcmd.c,v 1.49 2014/10/17 16:37:02 christos Exp $");
+
+#include <sys/errno.h>
+#include <sys/mount.h>
+#include <sys/dirent.h>
+#include <uvm/uvm_prot.h>
+#include <sys/vnode_if.h>
+#include <sys/vnode.h>
+#include <miscfs/kernfs/kernfs.h>
+
+#include "rumpxen_xendev.h"
+
+#include <mini-os/mm.h>
+
+#include "xenio.h"
+
+static int
+xenprivcmd_xen2bsd_errno(int error)
+{
+ /*
+ * Xen uses System V error codes.
+ * In order to keep bloat as minimal as possible,
+ * only convert what really impact us.
+ */
+
+ switch(-error) {
+ case 0:
+ return 0;
+ case 1:
+ return EPERM;
+ case 2:
+ return ENOENT;
+ case 3:
+ return ESRCH;
+ case 4:
+ return EINTR;
+ case 5:
+ return EIO;
+ case 6:
+ return ENXIO;
+ case 7:
+ return E2BIG;
+ case 8:
+ return ENOEXEC;
+ case 9:
+ return EBADF;
+ case 10:
+ return ECHILD;
+ case 11:
+ return EAGAIN;
+ case 12:
+ return ENOMEM;
+ case 13:
+ return EACCES;
+ case 14:
+ return EFAULT;
+ case 15:
+ return ENOTBLK;
+ case 16:
+ return EBUSY;
+ case 17:
+ return EEXIST;
+ case 18:
+ return EXDEV;
+ case 19:
+ return ENODEV;
+ case 20:
+ return ENOTDIR;
+ case 21:
+ return EISDIR;
+ case 22:
+ return EINVAL;
+ case 23:
+ return ENFILE;
+ case 24:
+ return EMFILE;
+ case 25:
+ return ENOTTY;
+ case 26:
+ return ETXTBSY;
+ case 27:
+ return EFBIG;
+ case 28:
+ return ENOSPC;
+ case 29:
+ return ESPIPE;
+ case 30:
+ return EROFS;
+ case 31:
+ return EMLINK;
+ case 32:
+ return EPIPE;
+ case 33:
+ return EDOM;
+ case 34:
+ return ERANGE;
+ case 35:
+ return EDEADLK;
+ case 36:
+ return ENAMETOOLONG;
+ case 37:
+ return ENOLCK;
+ case 38:
+ return ENOSYS;
+ case 39:
+ return ENOTEMPTY;
+ case 40:
+ return ELOOP;
+ case 42:
+ return ENOMSG;
+ case 43:
+ return EIDRM;
+ case 60:
+ return ENOSTR;
+ case 61:
+ return ENODATA;
+ case 62:
+ return ETIME;
+ case 63:
+ return ENOSR;
+ case 66:
+ return EREMOTE;
+ case 74:
+ return EBADMSG;
+ case 75:
+ return EOVERFLOW;
+ case 84:
+ return EILSEQ;
+ case 87:
+ return EUSERS;
+ case 88:
+ return ENOTSOCK;
+ case 89:
+ return EDESTADDRREQ;
+ case 90:
+ return EMSGSIZE;
+ case 91:
+ return EPROTOTYPE;
+ case 92:
+ return ENOPROTOOPT;
+ case 93:
+ return EPROTONOSUPPORT;
+ case 94:
+ return ESOCKTNOSUPPORT;
+ case 95:
+ return EOPNOTSUPP;
+ case 96:
+ return EPFNOSUPPORT;
+ case 97:
+ return EAFNOSUPPORT;
+ case 98:
+ return EADDRINUSE;
+ case 99:
+ return EADDRNOTAVAIL;
+ case 100:
+ return ENETDOWN;
+ case 101:
+ return ENETUNREACH;
+ case 102:
+ return ENETRESET;
+ case 103:
+ return ECONNABORTED;
+ case 104:
+ return ECONNRESET;
+ case 105:
+ return ENOBUFS;
+ case 106:
+ return EISCONN;
+ case 107:
+ return ENOTCONN;
+ case 108:
+ return ESHUTDOWN;
+ case 109:
+ return ETOOMANYREFS;
+ case 110:
+ return ETIMEDOUT;
+ case 111:
+ return ECONNREFUSED;
+ case 112:
+ return EHOSTDOWN;
+ case 113:
+ return EHOSTUNREACH;
+ case 114:
+ return EALREADY;
+ case 115:
+ return EINPROGRESS;
+ case 116:
+ return ESTALE;
+ case 122:
+ return EDQUOT;
+ default:
+ printf("unknown xen error code %d\n", -error);
+ return -error;
+ }
+}
+
+static int
+xenprivcmd_ioctl(void *v)
+{
+ int err;
+ struct vop_ioctl_args *ap = v;
+
+ switch (ap->a_command) {
+ case IOCTL_PRIVCMD_HYPERCALL:
+ {
+ privcmd_hypercall_t *hc = (privcmd_hypercall_t *)ap->a_data;
+
+ err = minios_hypercall(hc->op, hc->arg[0], hc->arg[1],
+ hc->arg[2], hc->arg[3], hc->arg[4]);
+ if (err >= 0) {
+ hc->retval = err;
+ err = 0;
+ } else {
+ err = xenprivcmd_xen2bsd_errno(err);
+ hc->retval = 0;
+ }
+
+ break;
+ }
+ case IOCTL_PRIVCMD_MMAP:
+ {
+ int i;
+ privcmd_mmap_t *mcmd = ap->a_data;
+ privcmd_mmap_entry_t mentry;
+
+ for (i = 0; i < mcmd->num; i++) {
+ err = copyin(&mcmd->entry[i], &mentry, sizeof(mentry));
+ if (err)
+ return err;
+
+ if (mentry.npages == 0 || mentry.va & PAGE_MASK)
+ return EINVAL;
+
+ /* Call with err argument == NULL will just crash
+ * the domain.
+ */
+ minios_map_frames(mentry.va, &mentry.mfn, mentry.npages,
+ 0, 0, mcmd->dom, NULL,
+ minios_get_l1prot());
+ }
+
+ err = 0;
+ break;
+ }
+ case IOCTL_PRIVCMD_MMAPBATCH:
+ {
+ privcmd_mmapbatch_t *pmb = ap->a_data;
+
+ if (pmb->num == 0 || pmb->addr & PAGE_MASK)
+ return EINVAL;
+
+ /* Call with err argument == NULL will just crash the
+ * domain.
+ */
+ minios_map_frames(pmb->addr, pmb->arr, pmb->num, 1, 0,
+ pmb->dom, NULL, minios_get_l1prot());
+ err = 0;
+ break;
+ }
+ default:
+ err = EINVAL;
+ }
+
+ return err;
+}
+
+static const struct kernfs_fileop xenprivcmd_fileops[] = {
+ { .kf_fileop = KERNFS_FILEOP_IOCTL, .kf_vop = xenprivcmd_ioctl },
+};
+
+#define XENPRIVCMD_MODE (S_IRUSR)
+extern kernfs_parentdir_t *kernxen_pkt;
+void xenprivcmd_init(void)
+{
+ kernfs_entry_t *dkt;
+ kfstype kfst;
+
+ kfst = KERNFS_ALLOCTYPE(xenprivcmd_fileops);
+
+ KERNFS_ALLOCENTRY(dkt, M_TEMP, M_WAITOK);
+ KERNFS_INITENTRY(dkt, DT_REG, "privcmd", NULL, kfst, VREG,
+ XENPRIVCMD_MODE);
+ kernfs_addentry(kernxen_pkt, dkt);
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Citrix
+ *
+ * Header for /dev/xen* in a rumpkernel.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef RUMP_DEV_XEN_H
+#define RUMP_DEV_XEN_H
+
+#include <sys/filedesc.h>
+#include <sys/malloc.h>
+#include <sys/lwp.h>
+#include <sys/device.h>
+#include <sys/conf.h>
+#include <sys/stat.h>
+#include <sys/select.h>
+#include <sys/file.h>
+#include <sys/poll.h>
+
+
+/* nicked from NetBSD sys/dev/pci/cxgb/cxgb_adapter.h */
+#ifndef container_of
+#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
+#endif
+
+//#define RUMP_DEV_XEN_DEBUG 1
+
+#ifdef RUMP_DEV_XEN_DEBUG
+#define DPRINTF(a) (printf a)
+#else
+#define DPRINTF(a) /* nothing */
+#endif
+
+
+/* Device operations, for devs table in rump_dev_xen.c */
+
+extern int xenbus_dev_open(struct file *fp, void **fdata);
+extern const struct fileops xenbus_dev_fileops;
+extern void xenevt_dev_init(void);
+extern int xenevt_dev_open(struct file *fp, void **fdata);
+extern const struct fileops xenevt_dev_fileops;
+extern void xenprivcmd_init(void);
+
+static inline void*
+xbd_malloc(size_t sz)
+{
+ return malloc(sz, M_DEVBUF, M_WAITOK);
+}
+
+static inline void
+xbd_free(void *p)
+{
+ if (p) /* free(9) is not like free(3)! */
+ free(p, M_DEVBUF);
+}
+
+char *xbd_strdup(const char *s);
+
+#endif /*RUMP_DEV_XEN_H*/
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
--- /dev/null
+/*
+ * rump_dev_xen.c
+ *
+ * Machinery for setting up the contents of /dev/xen* in a rumpkernel.
+ *
+ * Copyright (c) 2014 Citrix
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: $");
+
+#include "rumpxen_xendev.h"
+
+#include "rump_private.h"
+#include "rump_vfs_private.h"
+
+#include <sys/vfs_syscalls.h>
+#include <sys/dirent.h>
+#include <miscfs/kernfs/kernfs.h>
+
+char *xbd_strdup(const char *s)
+{
+ char *r;
+ size_t l = strlen(s) + 1;
+ r = xbd_malloc(l);
+ if (!r)
+ return r;
+ memcpy(r, s, l);
+ return r;
+}
+
+#define DEV_XEN "/dev/xen"
+
+static const struct xen_dev_info {
+ const char *path;
+ void (*xd_init)(void);
+ int (*xd_open)(struct file *fp, void **fdata_r);
+ const struct fileops *fo;
+} devs[] = {
+#define XDEV(cmin, path, init_fn, component) \
+ [cmin] = { path, init_fn, component##_dev_open, \
+ &component##_dev_fileops }
+ XDEV(0, DEV_XEN "/xenbus", NULL, xenbus),
+ XDEV(1, "/dev/xenevt", xenevt_dev_init, xenevt),
+#undef XDEV
+};
+
+#define NUM_DEV_INFOS (sizeof(devs)/sizeof(devs[0]))
+
+static int
+xen_dev_open(dev_t dev, int flags, int mode, struct lwp *l)
+{
+ const struct xen_dev_info *xdinfo;
+ int fd, err;
+ struct file *fp;
+ void *fdata;
+
+ DPRINTF(("xen devsw: opening minor=%lu\n", (unsigned long)minor(dev)));
+
+ if (minor(dev) < 0 || minor(dev) >= NUM_DEV_INFOS)
+ return ENODEV;
+
+ xdinfo = &devs[minor(dev)];
+
+ if (!xdinfo->xd_open)
+ return ENODEV;
+
+ err = fd_allocfile(&fp, &fd);
+ if (err)
+ return err;
+
+ DPRINTF(("%s: opening...\n", xdinfo->path));
+
+ err = xdinfo->xd_open(fp, &fdata);
+ if (err) {
+ fd_abort(curproc, fp, fd);
+ return err;
+ }
+
+ DPRINTF(("%s: opened, fd_clone\n", xdinfo->path));
+
+ return fd_clone(fp, fd, flags, xdinfo->fo, fdata);
+}
+
+static const struct cdevsw xen_dev_cdevsw = {
+ .d_open = xen_dev_open,
+ .d_close = nullclose,
+ .d_read = noread,
+ .d_write = nowrite,
+ .d_ioctl = noioctl,
+ .d_stop = nostop,
+ .d_tty = notty,
+ .d_poll = nopoll,
+ .d_mmap = nommap,
+ .d_kqfilter = nokqfilter,
+ .d_flag = D_OTHER
+};
+
+#define DIR_MODE (S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
+kernfs_parentdir_t *kernxen_pkt;
+static void xenkernfs_init(void)
+{
+ kernfs_entry_t *dkt;
+ KERNFS_ALLOCENTRY(dkt, M_TEMP, M_WAITOK);
+ KERNFS_INITENTRY(dkt, DT_DIR, "xen", NULL, KFSsubdir, VDIR, DIR_MODE);
+ kernfs_addentry(NULL, dkt);
+ kernxen_pkt = KERNFS_ENTOPARENTDIR(dkt);
+}
+
+RUMP_COMPONENT(RUMP_COMPONENT_DEV)
+{
+ devmajor_t bmaj, cmaj;
+ devminor_t cmin;
+ int err;
+ const struct xen_dev_info *xdinfo;
+
+ DPRINTF(("xen devsw: attaching\n"));
+
+ err = do_sys_mkdir(DEV_XEN, 0755, UIO_SYSSPACE);
+ if (err && err != EEXIST)
+ panic("xen devsw: mkdir " DEV_XEN " failed: %d", err);
+
+ bmaj = cmaj = NODEVMAJOR;
+ err = devsw_attach("xen", NULL, &bmaj, &xen_dev_cdevsw, &cmaj);
+ if (err)
+ panic("xen devsw: attach failed: %d", err);
+
+ for (cmin = 0; cmin < NUM_DEV_INFOS; cmin++) {
+ xdinfo = &devs[cmin];
+ err = rump_vfs_makeonedevnode(S_IFCHR, xdinfo->path, cmaj, cmin);
+ if (err)
+ panic("%s: cannot create device node: %d",
+ xdinfo->path, err);
+ if (xdinfo->xd_init)
+ xdinfo->xd_init();
+ DPRINTF(("%s: created, %lu.%lu\n",
+ xdinfo->path, (unsigned long)cmaj, (unsigned long)cmin));
+ }
+
+ xenkernfs_init();
+ xenprivcmd_init();
+}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
+
--- /dev/null
+/* $NetBSD: xenio.h,v 1.9 2011/01/10 11:13:03 cegger Exp $ */
+
+/******************************************************************************
+ * privcmd.h
+ *
+ * Copyright (c) 2003-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_XENIO_H__
+#define __XEN_XENIO_H__
+
+/* Interface to /proc/xen/privcmd */
+
+typedef struct privcmd_hypercall
+{
+ unsigned long op;
+ unsigned long arg[5];
+ long retval;
+} privcmd_hypercall_t;
+
+typedef struct privcmd_mmap_entry {
+ unsigned long va;
+ unsigned long mfn;
+ unsigned long npages;
+} privcmd_mmap_entry_t;
+
+typedef struct privcmd_mmap {
+ int num;
+ domid_t dom; /* target domain */
+ privcmd_mmap_entry_t *entry;
+} privcmd_mmap_t;
+
+typedef struct privcmd_mmapbatch {
+ int num; /* number of pages to populate */
+ domid_t dom; /* target domain */
+ unsigned long addr; /* virtual address */
+ unsigned long *arr; /* array of mfns - top nibble set on err */
+} privcmd_mmapbatch_t;
+
+typedef struct privcmd_mmapbatch_v2 {
+ int num; /* number of pages to populate */
+ domid_t dom; /* target domain */
+ uint64_t addr; /* virtual address */
+ const xen_pfn_t *arr; /* array of mfns */
+ int *err; /* array of error codes */
+} privcmd_mmapbatch_v2_t;
+
+typedef struct privcmd_blkmsg
+{
+ unsigned long op;
+ void *buf;
+ int buf_size;
+} privcmd_blkmsg_t;
+
+/*
+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
+ * @arg: &privcmd_hypercall_t
+ * Return: Value returned from execution of the specified hypercall.
+ */
+#define IOCTL_PRIVCMD_HYPERCALL \
+ _IOWR('P', 0, privcmd_hypercall_t)
+
+#if defined(_KERNEL)
+/* compat */
+#define IOCTL_PRIVCMD_INITDOMAIN_EVTCHN_OLD \
+ _IO('P', 1)
+
+typedef struct oprivcmd_hypercall
+{
+ unsigned long op;
+ unsigned long arg[5];
+} oprivcmd_hypercall_t;
+
+#define IOCTL_PRIVCMD_HYPERCALL_OLD \
+ _IOWR('P', 0, oprivcmd_hypercall_t)
+#endif /* defined(_KERNEL) */
+
+#define IOCTL_PRIVCMD_MMAP \
+ _IOW('P', 2, privcmd_mmap_t)
+#define IOCTL_PRIVCMD_MMAPBATCH \
+ _IOW('P', 3, privcmd_mmapbatch_t)
+#define IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN \
+ _IOR('P', 4, unsigned long)
+
+/*
+ * @cmd: IOCTL_PRIVCMD_INITDOMAIN_EVTCHN
+ * @arg: n/a
+ * Return: Port associated with domain-controller end of control event channel
+ * for the initial domain.
+ */
+#define IOCTL_PRIVCMD_INITDOMAIN_EVTCHN \
+ _IOR('P', 5, int)
+#define IOCTL_PRIVCMD_MMAPBATCH_V2 \
+ _IOW('P', 6, privcmd_mmapbatch_v2_t)
+
+/* Interface to /dev/xenevt */
+/* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
+#define EVTCHN_RESET _IO('E', 1)
+/* EVTCHN_BIND: Bind to the specified event-channel port. */
+#define EVTCHN_BIND _IOW('E', 2, unsigned long)
+/* EVTCHN_UNBIND: Unbind from the specified event-channel port. */
+#define EVTCHN_UNBIND _IOW('E', 3, unsigned long)
+
+#endif /* __XEN_XENIO_H__ */
--- /dev/null
+/* $NetBSD: xenio3.h,v 1.3 2010/09/03 06:07:24 cegger Exp $ */
+/******************************************************************************
+ * evtchn.h
+ *
+ * Interface to /dev/xen/evtchn.
+ *
+ * Copyright (c) 2003-2005, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_XENIO3_H__
+#define __XEN_XENIO3_H__
+
+/*
+ * Bind a fresh port to VIRQ @virq.
+ * Return allocated port.
+ */
+#define IOCTL_EVTCHN_BIND_VIRQ \
+ _IOWR('E', 4, struct ioctl_evtchn_bind_virq)
+struct ioctl_evtchn_bind_virq {
+ unsigned int virq;
+ unsigned int port;
+};
+
+/*
+ * Bind a fresh port to remote <@remote_domain, @remote_port>.
+ * Return allocated port.
+ */
+#define IOCTL_EVTCHN_BIND_INTERDOMAIN \
+ _IOWR('E', 5, struct ioctl_evtchn_bind_interdomain)
+struct ioctl_evtchn_bind_interdomain {
+ unsigned int remote_domain, remote_port;
+ unsigned int port;
+};
+
+/*
+ * Allocate a fresh port for binding to @remote_domain.
+ * Return allocated port.
+ */
+#define IOCTL_EVTCHN_BIND_UNBOUND_PORT \
+ _IOWR('E', 6, struct ioctl_evtchn_bind_unbound_port)
+struct ioctl_evtchn_bind_unbound_port {
+ unsigned int remote_domain;
+ unsigned int port;
+};
+
+/*
+ * Unbind previously allocated @port.
+ */
+#define IOCTL_EVTCHN_UNBIND \
+ _IOW('E', 7, struct ioctl_evtchn_unbind)
+struct ioctl_evtchn_unbind {
+ unsigned int port;
+};
+
+/*
+ * Send event to previously allocated @port.
+ */
+#define IOCTL_EVTCHN_NOTIFY \
+ _IOW('E', 8, struct ioctl_evtchn_notify)
+struct ioctl_evtchn_notify {
+ unsigned int port;
+};
+
+/* Clear and reinitialise the event buffer. Clear error condition. */
+#define IOCTL_EVTCHN_RESET \
+ _IO('E', 9)
+
+#endif /* __XEN_XENIO3_H__ */
+++ /dev/null
-LIB= rumpxen_xendev
-
-SRCS= xendev_component.c
-SRCS+= busdev.c
-SRCS+= evtdev.c
-SRCS+= privcmd.c
-
-RUMPTOP= ${TOPRUMP}
-
-CPPFLAGS+= -I${RUMPTOP}/librump/rumpkern -I${RUMPTOP}/librump
-CPPFLAGS+= -I${RUMPTOP}/librump/rumpvfs
-CPPFLAGS+= -I${.CURDIR}
-CPPFLAGS+= -I${.CURDIR}/../xen/include -D__RUMP_KERNEL__ -I${.CURDIR}/..
-CPPFLAGS+= -I${.CURDIR}/../../../include
-
-.if ${BUILDRR:Uno} == "true"
-.include "${RUMPRUN_MKCONF}"
-CPPFLAGS+= -I${OBJDIR}/dest.stage/include
-.endif
-
-RUMP_SYM_NORENAME=xenbus_|HYPERVISOR_|minios_|bmk_
-
-.include "${RUMPTOP}/Makefile.rump"
-.include <bsd.lib.mk>
-.include <bsd.klinks.mk>
+++ /dev/null
-/*
- * xenbus_dev.c
- *
- * Driver giving user-space access to the kernel's xenbus connection
- * to xenstore. Adapted heavily from NetBSD's xenbus_dev.c, so much
- * so that practically none of the original remains.
- *
- * Copyright (c) 2014 Citrix
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * (From original xenbus_dev.c:)
- * Copyright (c) 2005, Christian Limpach
- * Copyright (c) 2005, Rusty Russell, IBM Corporation
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: $");
-
-#include "rumpxen_xendev.h"
-
-#include <bmk-rumpuser/rumpuser.h>
-
-#define BUFFER_SIZE (XENSTORE_PAYLOAD_MAX+sizeof(struct xsd_sockmsg))
-
-#include <xen/io/xs_wire.h>
-
-#include <mini-os/xenbus.h>
-#include <mini-os/wait.h>
-
-/*----- data structures -----*/
-
-struct xenbus_dev_request {
- struct xenbus_event xb;
- uint32_t xb_id, user_id;
- uint32_t req_type;
- union {
- struct xenbus_dev_transaction *trans;
- struct xenbus_dev_watch *watch;
- } u;
-};
-
-struct xenbus_dev_transaction {
- LIST_ENTRY(xenbus_dev_transaction) entry;
- xenbus_transaction_t tx_id;
- struct xenbus_dev_request destroy;
-};
-
-struct xenbus_dev_watch {
- struct xenbus_watch xb;
- LIST_ENTRY(xenbus_dev_watch) entry;
- struct xsd_sockmsg *wmsg;
- char *path, *user_token;
- _Bool visible_to_user;
- struct xenbus_dev_request destroy;
-};
-
-struct xenbus_dev_data {
- kmutex_t lock;
- int outstanding_requests;
- LIST_HEAD(, xenbus_dev_transaction) transactions;
- LIST_HEAD(, xenbus_dev_watch) watches;
- struct xenbus_event_queue replies; /* Entirely unread by user. */
-
- _Bool queued_enomem, want_restart;
-
- /* Partially written request(s). */
- unsigned int wbuf_used;
- union {
- struct xsd_sockmsg msg;
- unsigned char buffer[BUFFER_SIZE];
- } wbuf;
-
- /* Partially read response. */
- struct xsd_sockmsg *rmsg; /* .id==user_id; data follows */
- int rmsg_done;
- void (*rmsg_free)(void*);
-
- struct selinfo selinfo;
- /* The lock used for the purposes described in select(9)
- * is xenbus_req_lock, not d->lock. */
-};
-
-/*----- helpers -----*/
-
-static void
-free_watch(struct xenbus_dev_watch *watch)
-{
- xbd_free(watch->path);
- xbd_free(watch->user_token);
- xbd_free(watch);
-}
-
-static struct xenbus_dev_transaction*
-find_transaction(struct xenbus_dev_data *d, xenbus_transaction_t id)
-{
- struct xenbus_dev_transaction *trans;
-
- LIST_FOREACH(trans, &d->transactions, entry)
- if (trans->tx_id == d->wbuf.msg.tx_id)
- return trans;
- /* not found */
- return 0;
-}
-
-static struct xenbus_dev_watch*
-find_visible_watch(struct xenbus_dev_data *d,
- const char *path, const char *token)
-{
- struct xenbus_dev_watch *watch;
-
- LIST_FOREACH(watch, &d->watches, entry)
- if (watch->visible_to_user &&
- !strcmp(path, watch->path) &&
- !strcmp(token, watch->user_token))
- return watch;
- /* not found */
- return 0;
-}
-
-/*----- request handling (writes to the device) -----*/
-
-static void
-make_request(struct xenbus_dev_data *d, struct xenbus_dev_request *req,
- uint32_t tx_id, const struct write_req *wreqs, int num_wreqs)
-/* Caller should have filled in req->req_id, ->u, and (if needed)
- * ->user_id. We deal with ->xb and ->xb_id. */
-{
- req->xb.watch = 0;
- req->xb_id = xenbus_id_allocate(&d->replies, &req->xb);
-
- KASSERT(d->outstanding_requests < INT_MAX);
- d->outstanding_requests++;
-
- xenbus_xb_write(req->req_type, req->xb_id, tx_id,
- wreqs, num_wreqs);
-}
-
-static void
-watch_write_req_string(struct write_req **wreqp, const char *string)
-{
- struct write_req *wreq = (*wreqp)++;
- int l = strlen(string);
- wreq->len = l+1;
- wreq->data = string;
-}
-
-static void
-make_watch_request(struct xenbus_dev_data *d, struct xenbus_dev_request *req,
- uint32_t tx_id, struct xenbus_dev_watch *watch)
-{
- struct write_req wreqs[2], *wreq = wreqs;
- watch_write_req_string(&wreq, watch->path);
- watch_write_req_string(&wreq, watch->xb.token);
- KASSERT((char*)wreq == (char*)wreqs + sizeof(wreqs));
-
- req->u.watch = watch;
- make_request(d, req, tx_id, wreqs, 2);
-}
-
-static void
-write_trouble(struct xenbus_dev_data *d, const char *what)
-{
- printf("xenbus dev: bad write: %s\n", what);
-
-#ifdef RUMP_DEV_XEN_DEBUG
- {
- unsigned int i;
- printf(" %d bytes:", d->wbuf_used);
- for (i=0; i<d->wbuf_used; i++) {
- if (!(i & 3)) printf(" ");
- printf("%02x", d->wbuf.buffer[i]);
- }
- printf(".\n");
- }
-#endif /*RUMP_DEV_XEN_DEBUG*/
-
- d->wbuf_used = 0; /* discard everything buffered */
-}
-
-/* void __NORETURN__ WTROUBLE(const char *details_without_newline);
- * assumes: struct xenbus_dev_data *d;
- * int err;
- * end: */
-#define WTROUBLE(s) do{ write_trouble(d,s); err = EINVAL; goto end; }while(0)
-
-static void
-forward_request(struct xenbus_dev_data *d, struct xenbus_dev_request *req)
-{
- struct write_req wreq = {
- d->wbuf.buffer + sizeof(d->wbuf.msg),
- d->wbuf_used - sizeof(d->wbuf.msg),
- };
-
- make_request(d, req, d->wbuf.msg.tx_id, &wreq, 1);
-}
-
-static _Bool
-watch_message_parse_string(const char **p, const char *end,
- const char **string_r)
-{
- const char *nul = memchr(*p, 0, end - *p);
- if (!nul)
- return 0;
-
- *string_r = *p;
- *p = nul+1;
-
- return 1;
-}
-
-static _Bool
-watch_message_parse(const struct xsd_sockmsg *msg,
- const char **path_r, const char **token_r)
-{
- const char *begin = (const char*)msg;
- const char *p = begin + sizeof(*msg);
- const char *end = p + msg->len;
- KASSERT(p <= end);
-
- return
- watch_message_parse_string(&p, end, path_r) &&
- watch_message_parse_string(&p, end, token_r);
-}
-
-static int
-process_request(struct xenbus_dev_data *d)
-{
- struct xenbus_dev_request *req;
- struct xenbus_dev_transaction *trans;
- struct xenbus_dev_watch *watch_free = 0, *watch;
- const char *wpath, *wtoken;
- int err;
-
- DPRINTF(("/dev/xen/xenbus: request, type=%d\n",
- d->wbuf.msg.type));
-
- req = xbd_malloc(sizeof(*req));
- if (!req) {
- err = ENOMEM;
- goto end;
- }
- req->user_id = d->wbuf.msg.req_id;
- req->req_type = d->wbuf.msg.type;
-
- switch (d->wbuf.msg.type) {
- case XS_DIRECTORY:
- case XS_READ:
- case XS_GET_PERMS:
- case XS_GET_DOMAIN_PATH:
- case XS_IS_DOMAIN_INTRODUCED:
- case XS_WRITE:
- case XS_MKDIR:
- case XS_RM:
- case XS_SET_PERMS:
- if (d->wbuf.msg.tx_id) {
- if (!find_transaction(d, d->wbuf.msg.tx_id))
- WTROUBLE("unknown transaction");
- }
- forward_request(d, req);
- break;
-
- case XS_TRANSACTION_START:
- if (d->wbuf.msg.tx_id)
- WTROUBLE("nested transaction");
- req->u.trans = xbd_malloc(sizeof(*req->u.trans));
- if (!req->u.trans) {
- err = ENOMEM;
- goto end;
- }
- forward_request(d, req);
- break;
-
- case XS_TRANSACTION_END:
- if (!d->wbuf.msg.tx_id)
- WTROUBLE("ending zero transaction");
- req->u.trans = trans = find_transaction(d, d->wbuf.msg.tx_id);
- if (!trans)
- WTROUBLE("ending unknown transaction");
- LIST_REMOVE(trans, entry); /* prevent more reqs using it */
- forward_request(d, req);
- break;
-
- case XS_WATCH:
- if (d->wbuf.msg.tx_id)
- WTROUBLE("XS_WATCH with transaction");
- if (!watch_message_parse(&d->wbuf.msg, &wpath, &wtoken))
- WTROUBLE("bad XS_WATCH message");
-
- watch = watch_free = xbd_malloc(sizeof(*watch));
- if (!watch) {
- err = ENOMEM;
- goto end;
- }
-
- watch->path = xbd_strdup(wpath);
- watch->user_token = xbd_strdup(wtoken);
- if (!watch->path || !watch->user_token) {
- err = ENOMEM;
- goto end;
- }
-
- watch->xb.events = &d->replies;
- xenbus_watch_prepare(&watch->xb);
-
- watch_free = 0; /* we are committed */
- watch->visible_to_user = 0;
- LIST_INSERT_HEAD(&d->watches, watch, entry);
- make_watch_request(d, req, d->wbuf.msg.tx_id, watch);
- break;
-
- case XS_UNWATCH:
- if (d->wbuf.msg.tx_id)
- WTROUBLE("XS_UNWATCH with transaction");
- if (!watch_message_parse(&d->wbuf.msg, &wpath, &wtoken))
- WTROUBLE("bad XS_WATCH message");
-
- watch = find_visible_watch(d, wpath, wtoken);
- if (!watch)
- WTROUBLE("unwatch nonexistent watch");
-
- watch->visible_to_user = 0;
- make_watch_request(d, req, d->wbuf.msg.tx_id, watch);
- break;
-
- default:
- WTROUBLE("unknown request message type");
- }
-
- err = 0;
-end:
- if (watch_free)
- free_watch(watch_free);
- return err;
-}
-
-static int
-xenbus_dev_write(struct file *fp, off_t *offset, struct uio *uio,
- kauth_cred_t cred, int flags)
-{
- struct xenbus_dev_data *d = fp->f_data;
- int err;
-
- DPRINTF(("/dev/xen/xenbus: write...\n"));
-
- if (uio->uio_offset < 0)
- return EINVAL;
-
- mutex_enter(&d->lock);
-
- for (;;) { /* keep reading more until we're done */
-
- if (!uio->uio_resid)
- break;
-
- uio->uio_offset = d->wbuf_used;
- err = uiomove(d->wbuf.buffer + d->wbuf_used,
- sizeof(d->wbuf.buffer) - d->wbuf_used,
- uio);
- d->wbuf_used = uio->uio_offset;
- if (err)
- goto end;
-
- for (;;) { /* process message(s) in the buffer */
-
- if (d->wbuf_used < sizeof(d->wbuf.msg))
- break;
-
- if (d->wbuf.msg.len > XENSTORE_PAYLOAD_MAX)
- WTROUBLE("too much payload in packet");
-
- uint32_t packetlen =
- d->wbuf.msg.len + sizeof(d->wbuf.msg);
-
- KASSERT(packetlen <= sizeof(d->wbuf.buffer));
-
- if (d->wbuf_used < packetlen)
- break;
-
- err = process_request(d);
-
- if (d->wbuf_used) {
- /* Remove from the buffer before checking
- * for errors - but some errors may have
- * emptied the buffer already. */
- d->wbuf_used -= packetlen;
- memmove(d->wbuf.buffer,
- d->wbuf.buffer + packetlen,
- d->wbuf_used);
- }
-
- if (err)
- goto end;
- }
- }
-
- err = 0;
-end:
- mutex_exit(&d->lock);
-
- DPRINTF(("/dev/xen/xenbus: write done, err=%d\n", err));
- return err;
-}
-
-/*----- response and watch event handling (reads from the device) -----*/
-
-static struct xsd_sockmsg*
-process_watch_event(struct xenbus_dev_data *d, struct xenbus_event *event,
- struct xenbus_dev_watch *watch,
- void (**mfree_r)(void*))
-{
-
- /* We need to make a new XS_WATCH_EVENT message because the
- * one from xenstored (a) isn't visible to us here and (b)
- * anyway has the wrong token in it. */
-
- DPRINTF(("/dev/xen/xenbus: watch event,"
- " wpath=%s user_token=%s epath=%s xb.token=%s\n",
- watch->path, watch->user_token,
- event->path, watch->xb.token));
-
- /* Define the parts of the message */
-
-#define WATCH_MESSAGE_PART_STRING(PART,x) \
- PART(strlen((x)) + 1, memcpy(p, (x), sz))
-
-#define WATCH_MESSAGE_PARTS(PART) \
- PART(sizeof(struct xsd_sockmsg), (void)0) \
- WATCH_MESSAGE_PART_STRING(PART,event->path) \
- WATCH_MESSAGE_PART_STRING(PART,watch->user_token)
-
- /* Compute the size */
-
- size_t totalsz = 0;
- size_t sz = 0;
-
-#define WATCH_MESSAGE_PART_ADD_SIZE(calcpartsz, fill) \
- totalsz += (calcpartsz);
-
- WATCH_MESSAGE_PARTS(WATCH_MESSAGE_PART_ADD_SIZE);
-
- DPRINTF(("/dev/xen/xenbus: watch event allocating %lu\n",
- (unsigned long)totalsz));
-
- /* Allocate it and fill in the header */
-
- struct xsd_sockmsg *reply = xbd_malloc(totalsz);
- if (!reply) {
- printf("xenbus dev: out of memory for watch event"
- " wpath=`%s' epath=`%s'\n",
- watch->path, event->path);
- d->queued_enomem = 1;
- goto end;
- }
-
- bzero(reply, sizeof(*reply));
- reply->type = XS_WATCH_EVENT;
- reply->len = totalsz - sizeof(*reply);
-
- char *p = (void*)reply;
-
- /* Fill in the rest of the message */
-
-#define WATCH_MESSAGE_PART_ADD(calcpartsz, fill) \
- sz = (calcpartsz); \
- fill; \
- p += sz;
-
- WATCH_MESSAGE_PARTS(WATCH_MESSAGE_PART_ADD);
-
- KASSERT(p == (const char*)reply + totalsz);
-
- /* Now we are done */
-
-end:
- xenbus_free(event);
- *mfree_r = xbd_free;
- return reply;
-}
-
-/* Returned value is from malloc() */
-static struct xsd_sockmsg*
-process_response(struct xenbus_dev_data *d, struct xenbus_dev_request *req,
- void (**mfree_r)(void*))
-{
- struct xenbus_dev_watch *watch;
- struct xsd_sockmsg *msg = req->xb.reply;
-
- msg->req_id = req->user_id;
-
- _Bool error = msg->type == XS_ERROR;
- KASSERT(error || msg->type == req->req_type);
-
- DPRINTF(("/dev/xen/xenbus: response, req_type=%d msg->type=%d\n",
- req->req_type, msg->type));
-
- switch (req->req_type) {
-
- case XS_TRANSACTION_START:
- if (error)
- break;
- KASSERT(msg->len >= 2);
- KASSERT(!((uint8_t*)(msg+1))[msg->len-1]);
- req->u.trans->tx_id =
- strtoul((char*)&msg + sizeof(*msg),
- 0, 0);
- LIST_INSERT_HEAD(&d->transactions, req->u.trans,
- entry);
- break;
-
- case XS_TRANSACTION_END:
- xbd_free(req->u.trans);
- break;
-
- case XS_WATCH:
- watch = req->u.watch;
- if (error)
- goto do_unwatch;
- watch->visible_to_user = 1;
- break;
-
- case XS_UNWATCH:
- KASSERT(!error);
- watch = req->u.watch;
- do_unwatch:
- KASSERT(!watch->visible_to_user);
- LIST_REMOVE(watch, entry);
- xenbus_watch_release(&watch->xb);
- free_watch(watch);
- break;
-
- }
-
- xenbus_id_release(req->xb_id);
- xbd_free(req);
- KASSERT(d->outstanding_requests > 0);
- d->outstanding_requests--;
-
- *mfree_r = xenbus_free;
- return msg;
-}
-
-static struct xsd_sockmsg*
-process_event(struct xenbus_dev_data *d, struct xenbus_event *event,
- void (**mfree_r)(void*))
-{
- if (event->watch) {
- struct xenbus_dev_watch *watch =
- container_of(event->watch, struct xenbus_dev_watch, xb);
-
- return process_watch_event(d, event, watch, mfree_r);
-
- } else {
- struct xenbus_dev_request *req =
- container_of(event, struct xenbus_dev_request, xb);
-
- return process_response(d, req, mfree_r);
- }
-
-}
-
-static struct xsd_sockmsg*
-next_event_msg(struct xenbus_dev_data *d, struct file *fp, int *err_r,
- void (**mfree_r)(void*))
-/* If !err_r, always blocks and always returns successfully.
- * If !!err_r, will block iff user process read should block:
- * will either return successfully, or set *err_r and return 0.
- *
- * Must be called with d->lock held; may temporarily release it. */
-{
- int nlocks;
- DEFINE_WAIT(w);
- spin_lock(&xenbus_req_lock);
-
- while (STAILQ_EMPTY(&d->replies.events)) {
- if (err_r) {
- if (d->want_restart) {
- *err_r = ERESTART;
- goto fail;
- }
- if (fp->f_flag & FNONBLOCK) {
- *err_r = EAGAIN;
- goto fail;
- }
- }
-
- DPRINTF(("/dev/xen/xenbus: about to block err_r=%p\n", err_r));
-
- minios_add_waiter(w, d->replies.waitq);
- spin_unlock(&xenbus_req_lock);
- mutex_exit(&d->lock);
- rumpkern_unsched(&nlocks, 0);
-
- minios_wait(w);
-
- rumpkern_sched(nlocks, 0);
- mutex_enter(&d->lock);
- spin_lock(&xenbus_req_lock);
- minios_remove_waiter(w, d->replies.waitq);
- }
- struct xenbus_event *event = STAILQ_FIRST(&d->replies.events);
- STAILQ_REMOVE_HEAD(&d->replies.events, entry);
-
- spin_unlock(&xenbus_req_lock);
-
- DPRINTF(("/dev/xen/xenbus: next_event_msg found an event %p\n",event));
- return process_event(d, event, mfree_r);
-
-fail:
- DPRINTF(("/dev/xen/xenbus: not blocking, returning %d\n", *err_r));
- spin_unlock(&xenbus_req_lock);
- return 0;
-}
-
-static int
-xenbus_dev_read(struct file *fp, off_t *offset, struct uio *uio,
- kauth_cred_t cred, int flags)
-{
- struct xenbus_dev_data *d = fp->f_data;
- size_t org_resid = uio->uio_resid;
- int err;
-
- DPRINTF(("/dev/xen/xenbus: read...\n"));
- mutex_enter(&d->lock);
-
- for (;;) {
- DPRINTF(("/dev/xen/xenbus: read... uio_resid=%lu (org=%lu)"
- " q.enomem=%d\n",
- (unsigned long)uio->uio_resid,
- (unsigned long)org_resid,
- d->queued_enomem));
- if (d->queued_enomem) {
- if (org_resid != uio->uio_resid)
- /* return early now; report it next time */
- break;
- err = ENOMEM;
- d->queued_enomem = 0;
- goto end;
- }
-
- if (!uio->uio_resid)
- /* done what we have been asked to do */
- break;
-
- if (!d->rmsg) {
- d->rmsg = next_event_msg(d, fp, &err, &d->rmsg_free);
- if (!d->rmsg) {
- if (uio->uio_resid != org_resid)
- /* Done something, claim success. */
- break;
- goto end;
- }
- }
-
- uint32_t avail = sizeof(*d->rmsg) + d->rmsg->len;
- KASSERT(avail < BUFFER_SIZE*2); /* sanity check */
- KASSERT(avail > 0);
- KASSERT(d->rmsg_done <= avail);
-
- DPRINTF(("/dev/xen/xenbus: read... rmsg->len=%lu"
- " msg_done=%lu avail=%lu\n",
- (unsigned long)d->rmsg->len,
- (unsigned long)d->rmsg_done,
- (unsigned long)avail));
-
- uio->uio_offset = d->rmsg_done;
- err = uiomove((char*)d->rmsg + d->rmsg_done,
- avail - d->rmsg_done,
- uio);
- d->rmsg_done = uio->uio_offset;
- if (err)
- goto end;
-
- if (d->rmsg_done == avail) {
- DPRINTF(("/dev/xen/xenbus: read... msg complete\n"));
- d->rmsg_free(d->rmsg);
- d->rmsg = 0;
- d->rmsg_done = 0;
- }
- }
-
- err = 0;
-
-end:
- mutex_exit(&d->lock);
- DPRINTF(("/dev/xen/xenbus: read done, err=%d\n", err));
- return err;
-}
-
-/*----- more exciting reading -----*/
-
-#define RBITS (POLLIN | POLLRDNORM)
-#define WBITS (POLLOUT | POLLWRNORM)
-
-static void
-xenbus_dev_xb_wakeup(struct xenbus_event_queue *queue)
-{
- /* called with req_lock held */
- DPRINTF(("/dev/xen/xenbus: wakeup\n"));
- struct xenbus_dev_data *d =
- container_of(queue, struct xenbus_dev_data, replies);
- minios_wake_up(&d->replies.waitq);
- selnotify(&d->selinfo, RBITS, NOTE_SUBMIT);
-}
-
-static void
-xenbus_dev_restart(file_t *fp)
-{
- struct xenbus_dev_data *d = fp->f_data;
-
- DPRINTF(("/dev/xen/xenbus: restart!\n"));
-
- mutex_enter(&d->lock);
- spin_lock(&xenbus_req_lock);
-
- d->want_restart |= 1;
- minios_wake_up(&d->replies.waitq);
-
- spin_unlock(&xenbus_req_lock);
- mutex_exit(&d->lock);
-}
-
-static int
-xenbus_dev_poll(struct file *fp, int events)
-{
- struct xenbus_dev_data *d = fp->f_data;
- int revents = 0;
-
- DPRINTF(("/dev/xen/xenbus: poll events=0%o...\n", events));
-
- mutex_enter(&d->lock);
- spin_lock(&xenbus_req_lock);
-
- /* always writeable - we don't do proper blocking for writing
- * since this can only wait at most until other requests have
- * been handled by xenstored */
- revents |= events & WBITS;
-
- if (events & RBITS)
- if (d->rmsg || d->queued_enomem || d->want_restart)
- revents |= events & RBITS;
-
- if (!revents) {
- if (events & RBITS)
- selrecord(curlwp, &d->selinfo);
- }
-
- spin_unlock(&xenbus_req_lock);
- mutex_exit(&d->lock);
-
- DPRINTF(("/dev/xen/xenbus: poll events=0%o done, revents=0%o\n",
- events, revents));
- return revents;
-}
-
-/*----- setup etc. -----*/
-
-static int
-xenbus_dev_close(struct file *fp)
-{
- struct xenbus_dev_data *d = fp->f_data;
-
- DPRINTF(("/dev/xen/xenbus: close...\n"));
-
- /* Not neeeded against concurrent access (we assume!)
- * but next_event_msg will want to unlock and relock it */
- mutex_enter(&d->lock);
-
- xbd_free(d->rmsg);
- d->rmsg = 0;
-
- for (;;) {
- DPRINTF(("/dev/xen/xenbus: close loop\n"));
- /* We need to go round this again and again because
- * there might be requests in flight. Eg if the
- * user has an XS_WATCH in flight we have to wait for it
- * to be done and then unwatch it again. */
-
- struct xenbus_dev_watch *watch, *watch_tmp;
- LIST_FOREACH_SAFE(watch, &d->watches, entry, watch_tmp) {
- DPRINTF(("/dev/xen/xenbus: close watch %p %d\n",
- watch, watch->visible_to_user));
- if (watch->visible_to_user) {
- /* mirrors process_request XS_UNWATCH */
- watch->destroy.req_type = XS_UNWATCH;
- watch->visible_to_user = 0;
- make_watch_request(d, &watch->destroy, 0,
- watch);
- }
- }
-
- struct xenbus_dev_transaction *trans, *trans_tmp;
- const struct write_req trans_end_data = { "F", 2 };
- LIST_FOREACH_SAFE(trans, &d->transactions, entry, trans_tmp) {
- DPRINTF(("/dev/xen/xenbus: close transaction"
- " %p %"PRIx32"\n",
- trans, (unsigned int)trans->tx_id));
- /* mirrors process_request XS_TRANSACTION_END */
- trans->destroy.req_type = XS_TRANSACTION_END;
- trans->destroy.u.trans = trans;
- LIST_REMOVE(trans, entry);
- make_request(d, &trans->destroy, trans->tx_id,
- &trans_end_data, 1);
- }
-
- DPRINTF(("/dev/xen/xenbus: close outstanding=%d\n",
- d->outstanding_requests));
- KASSERT(d->outstanding_requests >= 0);
- if (!d->outstanding_requests)
- break;
-
- void (*dfree)(void*);
- struct xsd_sockmsg *discard = next_event_msg(d, fp, 0, &dfree);
- KASSERT(discard);
- dfree(discard);
- }
-
- KASSERT(!d->outstanding_requests);
- KASSERT(!d->rmsg);
- KASSERT(LIST_EMPTY(&d->transactions));
- KASSERT(LIST_EMPTY(&d->watches));
-
- DPRINTF(("/dev/xen/xenbus: close seldestroy outstanding=%d\n",
- d->outstanding_requests));
- seldestroy(&d->selinfo);
- xbd_free(d);
-
- DPRINTF(("/dev/xen/xenbus: close done.\n"));
- return 0;
-}
-
-const struct fileops xenbus_dev_fileops = {
- .fo_read = xenbus_dev_read,
- .fo_write = xenbus_dev_write,
- .fo_ioctl = fbadop_ioctl,
- .fo_fcntl = fnullop_fcntl,
- .fo_poll = xenbus_dev_poll,
- .fo_stat = fbadop_stat,
- .fo_close = xenbus_dev_close,
- .fo_kqfilter = fnullop_kqfilter,
- .fo_restart = xenbus_dev_restart,
-};
-
-int
-xenbus_dev_open(struct file *fp, void **fdata_r)
-{
- struct xenbus_dev_data *d;
-
- d = xbd_malloc(sizeof(*d));
- if (!d)
- return ENOMEM;
-
- mutex_init(&d->lock, MUTEX_DEFAULT, IPL_HIGH);
- d->outstanding_requests = 0;
- LIST_INIT(&d->transactions);
- LIST_INIT(&d->watches);
- xenbus_event_queue_init(&d->replies);
- d->replies.wakeup = xenbus_dev_xb_wakeup;
- d->queued_enomem = 0;
- d->want_restart = 0;
- d->wbuf_used = 0;
- d->rmsg = 0;
- d->rmsg_done = 0;
- selinit(&d->selinfo);
-
- *fdata_r = d;
- return 0;
-}
-
-/*
- * Local variables:
- * c-file-style: "linux"
- * indent-tabs-mode: t
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
+++ /dev/null
-/*
- * evtdev.c
- *
- * Driver giving user-space access to the kernel's event channel.
- *
- * Copyright (c) 2015 Wei Liu <wei.liu2@citrix.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-#include <sys/atomic.h>
-#include <sys/kmem.h>
-#include <sys/resource.h>
-#include <sys/vnode.h>
-
-#include "rumpxen_xendev.h"
-
-#include <bmk-rumpuser/rumpuser.h>
-#include <bmk-core/memalloc.h>
-
-#include <mini-os/events.h>
-#include <mini-os/wait.h>
-
-/* For ioctl interface. */
-#include "xenio3.h"
-
-/*----- data structures -----*/
-#define EVTDEV_RING_SIZE 2048
-#define EVTDEV_RING_MASK 2047
-#define BYTES_PER_PORT 4
-
-/* See the rump_evtdev_callback for locking information */
-u_int xenevt_ring[EVTDEV_RING_SIZE];
-u_int xenevt_ring_prod, xenevt_ring_cons;
-
-struct xenevt_dev_data {
- u_int ring[EVTDEV_RING_SIZE];
- u_int ring_cons;
- u_int ring_prod;
-#define EVTDEV_F_OVERFLOW 0x1 /* ring overflow */
- u_int flags;
-
- kmutex_t lock;
- kcondvar_t cv;
- struct selinfo selinfo; /* used by poll, see select(9) */
-};
-
-/* Kernel event -> device instance mapping */
-static kmutex_t devevent_lock;
-static struct xenevt_dev_data *devevents[NR_EVENT_CHANNELS];
-
-/*----- helpers -----*/
-#define WBITS (POLLOUT | POLLWRNORM)
-#define RBITS (POLLIN | POLLRDNORM)
-
-/* call with d->lock held */
-static void queue(struct xenevt_dev_data *d, u_int port)
-{
- KASSERT(mutex_owned(&d->lock));
-
- if (d->ring_cons == ((d->ring_prod + 1) & EVTDEV_RING_MASK)) {
- d->flags |= EVTDEV_F_OVERFLOW;
- printf("evtdev: ring overflow port %d\n", port);
- } else {
- d->ring[d->ring_prod] = port;
- membar_producer();
- d->ring_prod = (d->ring_prod + 1) & EVTDEV_RING_MASK;
- }
- /* notify */
- cv_signal(&d->cv);
- selnotify(&d->selinfo, RBITS, NOTE_SUBMIT);
-}
-
-/* This callback is serialised by mini-os */
-static void rump_evtdev_callback(u_int port)
-{
- if (xenevt_ring_cons == ((xenevt_ring_prod + 1) & EVTDEV_RING_MASK)) {
- printf("xenevt driver ring overflowed!\n");
- } else {
- xenevt_ring[xenevt_ring_prod] = port;
- membar_producer();
- xenevt_ring_prod = (xenevt_ring_prod + 1) & EVTDEV_RING_MASK;
- }
-
- minios_wake_up(&minios_events_waitq);
-}
-
-static void xenevt_thread_func(void *ign)
-{
- u_int prod = xenevt_ring_prod;
- u_int cons;
-
- /* give us a rump kernel context */
- rumpuser__hyp.hyp_schedule();
- rumpuser__hyp.hyp_lwproc_newlwp(0);
- rumpuser__hyp.hyp_unschedule();
-
- for (;;) {
- minios_wait_event(minios_events_waitq, xenevt_ring_prod != prod);
- prod = xenevt_ring_prod;
- cons = xenevt_ring_cons;
-
- membar_sync();
-
- while (cons != prod) {
- u_int port = xenevt_ring[cons];
- struct xenevt_dev_data *d;
-
- KASSERT(port < NR_EVENT_CHANNELS);
-
- mutex_enter(&devevent_lock);
-
- d = devevents[port];
-
- KASSERT(d);
-
- mutex_enter(&d->lock);
-
- queue(d, port);
-
- mutex_exit(&d->lock);
- mutex_exit(&devevent_lock);
-
- cons++;
- }
-
- membar_sync();
-
- xenevt_ring_cons = cons;
- }
-}
-
-/*----- request handling (writes to the device) -----*/
-static int
-xenevt_dev_write(struct file *fp, off_t *offset, struct uio *uio,
- kauth_cred_t cred, int flags)
-{
- struct xenevt_dev_data *d = fp->f_data;
- uint16_t *chans = NULL;
- int i, nentries, err;
- size_t size = 0;
-
- DPRINTF(("/dev/xenevt: write...\n"));
-
- if (uio->uio_resid == 0) {
- err = 0;
- goto out;
- }
-
- nentries = uio->uio_resid / sizeof(uint16_t);
- if (nentries > NR_EVENT_CHANNELS) {
- err = EMSGSIZE;
- goto out;
- }
-
- size = nentries * sizeof(uint16_t);
- chans = kmem_alloc(size, KM_SLEEP);
-
- err = uiomove(chans, uio->uio_resid, uio);
- if (err) goto out;
-
- mutex_enter(&devevent_lock);
- for (i = 0; i < nentries; i++) {
- if (chans[i] < NR_EVENT_CHANNELS &&
- devevents[chans[i]] == d)
- minios_unmask_evtchn(chans[i]);
- }
- mutex_exit(&devevent_lock);
-
- KASSERT(err == 0);
-out:
- DPRINTF(("/dev/xenevt: write done, err=%d\n", err));
- if (size) kmem_free(chans, size);
- return err;
-}
-
-static int
-xenevt_dev_read(struct file *fp, off_t *offset, struct uio *uio,
- kauth_cred_t cred, int read_flags)
-{
- struct xenevt_dev_data *d = fp->f_data;
- u_int cons, prod, len, uio_len;
- int err;
-
- DPRINTF(("/dev/xenevt: read...\n"));
-
- mutex_enter(&d->lock);
-
- err = 0;
- while (err == 0) {
- cons = d->ring_cons;
- prod = d->ring_prod;
-
- if (cons != prod) break; /* data available */
-
- if (d->flags & EVTDEV_F_OVERFLOW) break;
-
- /* nothing to read */
- if ((fp->f_flag & FNONBLOCK) == 0)
- err = cv_wait_sig(&d->cv, &d->lock);
- else
- err = EAGAIN;
- }
-
- if (err == 0 && (d->flags & EVTDEV_F_OVERFLOW))
- err = EFBIG;
-
- if (err) goto out;
-
- uio_len = uio->uio_resid / BYTES_PER_PORT;
- if (cons <= prod)
- len = prod - cons;
- else
- len = EVTDEV_RING_SIZE - cons;
- if (len > uio_len)
- len = uio_len;
- err = uiomove(&d->ring[cons], len * BYTES_PER_PORT, uio);
- if (err) goto out;
-
- cons = (cons + len) & EVTDEV_RING_MASK;
- uio_len = uio->uio_resid / BYTES_PER_PORT;
- if (uio_len == 0) goto done;
-
- /* ring wrapped */
- len = prod - cons;
- if (len > uio_len)
- len = uio_len;
- err = uiomove(&d->ring[cons], len * BYTES_PER_PORT, uio);
- if (err) goto out;
- cons = (cons + len) & EVTDEV_RING_MASK;
-
-done:
- d->ring_cons = cons;
-out:
- mutex_exit(&d->lock);
- DPRINTF(("/dev/xenevt: read done, err=%d\n", err));
- return err;
-}
-
-/*----- more exciting reading -----*/
-static int
-xenevt_dev_poll(struct file *fp, int events)
-{
- struct xenevt_dev_data *d = fp->f_data;
- int revents = 0;
-
- DPRINTF(("/dev/xenevt: poll events=0x%x...\n", events));
-
- mutex_enter(&d->lock);
-
- /* always writable because write is used to unmask event
- * channel */
- revents |= events & WBITS;
-
- if ((events & RBITS) && (d->ring_prod != d->ring_cons))
- revents |= events & RBITS;
-
- /* in the case caller only interests in read but no data
- * available to read */
- if (!revents && (events & RBITS))
- selrecord(curlwp, &d->selinfo);
-
- mutex_exit(&d->lock);
- DPRINTF(("/dev/xenevt: poll events=0x%x done, revents=0x%x\n",
- events, revents));
- return revents;
-}
-
-static int
-xenevt_dev_ioctl(struct file *fp, ulong cmd, void *data)
-{
- struct xenevt_dev_data *d = fp->f_data;
- int err;
-
- switch (cmd) {
- case IOCTL_EVTCHN_RESET:
- {
- mutex_enter(&d->lock);
- d->ring_cons = d->ring_prod = 0;
- d->flags = 0;
- mutex_exit(&d->lock);
- break;
- }
- case IOCTL_EVTCHN_BIND_VIRQ:
- {
- struct ioctl_evtchn_bind_virq *bind_virq = data;
- evtchn_bind_virq_t op;
-
- op.virq = bind_virq->virq;
- op.vcpu = 0;
- if ((err = minios_event_channel_op(EVTCHNOP_bind_virq, &op))) {
- printf("IOCTL_EVTCHN_BIND_VIRQ failed: virq %d error %d\n",
- bind_virq->virq, err);
- return -err;
- }
- bind_virq->port = op.port;
- mutex_enter(&devevent_lock);
- KASSERT(devevents[bind_virq->port] == NULL);
- devevents[bind_virq->port] = d;
- mutex_exit(&devevent_lock);
- minios_bind_evtchn(bind_virq->port, minios_evtdev_handler, d);
- minios_unmask_evtchn(bind_virq->port);
-
- break;
- }
- case IOCTL_EVTCHN_BIND_INTERDOMAIN:
- {
- struct ioctl_evtchn_bind_interdomain *bind_intd = data;
- evtchn_bind_interdomain_t op;
-
- op.remote_dom = bind_intd->remote_domain;
- op.remote_port = bind_intd->remote_port;
- if ((err = minios_event_channel_op(EVTCHNOP_bind_interdomain, &op))) {
- printf("IOCTL_EVTCHN_BIND_INTERDOMAIN failed: "
- "remote domain %d port %d error %d\n",
- bind_intd->remote_domain, bind_intd->remote_port, err);
- return -err;
- }
- bind_intd->port = op.local_port;
- mutex_enter(&devevent_lock);
- KASSERT(devevents[bind_intd->port] == NULL);
- devevents[bind_intd->port] = d;
- mutex_exit(&devevent_lock);
- minios_bind_evtchn(bind_intd->port, minios_evtdev_handler, d);
- minios_unmask_evtchn(bind_intd->port);
-
- break;
- }
- case IOCTL_EVTCHN_BIND_UNBOUND_PORT:
- {
- struct ioctl_evtchn_bind_unbound_port *bind_unbound = data;
- evtchn_alloc_unbound_t op;
-
- op.dom = DOMID_SELF;
- op.remote_dom = bind_unbound->remote_domain;
- if ((err = minios_event_channel_op(EVTCHNOP_alloc_unbound, &op))) {
- printf("IOCTL_EVTCHN_BIND_UNBOUND_PORT failed: "
- "remote domain %d error %d\n",
- bind_unbound->remote_domain, err);
- return -err;
- }
- bind_unbound->port = op.port;
- mutex_enter(&devevent_lock);
- KASSERT(devevents[bind_unbound->port] == NULL);
- devevents[bind_unbound->port] = d;
- mutex_exit(&devevent_lock);
- minios_bind_evtchn(bind_unbound->port, minios_evtdev_handler, d);
- minios_unmask_evtchn(bind_unbound->port);
-
- break;
- }
- case IOCTL_EVTCHN_UNBIND:
- {
- struct ioctl_evtchn_unbind *unbind = data;
-
- if (unbind->port >= NR_EVENT_CHANNELS)
- return EINVAL;
- mutex_enter(&devevent_lock);
- if (devevents[unbind->port] != d) {
- mutex_exit(&devevent_lock);
- return ENOTCONN;
- }
- devevents[unbind->port] = NULL;
- mutex_exit(&devevent_lock);
- minios_mask_evtchn(unbind->port);
- minios_unbind_evtchn(unbind->port);
-
- break;
- }
- case IOCTL_EVTCHN_NOTIFY:
- {
- struct ioctl_evtchn_notify *notify = data;
-
- if (notify->port >= NR_EVENT_CHANNELS)
- return EINVAL;
- mutex_enter(&devevent_lock);
- if (devevents[notify->port] != d) {
- mutex_exit(&devevent_lock);
- return ENOTCONN;
- }
- minios_notify_remote_via_evtchn(notify->port);
- mutex_exit(&devevent_lock);
-
- break;
- }
- default:
- return EINVAL;
- }
-
- return 0;
-}
-
-/*----- setup etc. -----*/
-
-static int
-xenevt_dev_close(struct file *fp)
-{
- struct xenevt_dev_data *d = fp->f_data;
- int i;
-
- DPRINTF(("/dev/xenevt: close...\n"));
-
- mutex_enter(&devevent_lock);
- mutex_enter(&d->lock);
- for (i = 0; i < NR_EVENT_CHANNELS; i++) {
- if (devevents[i] == d) {
- minios_unbind_evtchn(i);
- devevents[i] = NULL;
- }
- }
- mutex_exit(&d->lock);
- mutex_exit(&devevent_lock);
-
- seldestroy(&d->selinfo);
- mutex_destroy(&d->lock);
- kmem_free(d, sizeof(*d));
- cv_destroy(&d->cv);
-
- DPRINTF(("/dev/xenevt: close done.\n"));
-
- fp->f_data = NULL;
-
- return 0;
-}
-
-const struct fileops xenevt_dev_fileops = {
- .fo_read = xenevt_dev_read,
- .fo_write = xenevt_dev_write,
- .fo_ioctl = xenevt_dev_ioctl,
- .fo_fcntl = fnullop_fcntl,
- .fo_poll = xenevt_dev_poll,
- .fo_stat = fbadop_stat,
- .fo_close = xenevt_dev_close,
- .fo_kqfilter = fnullop_kqfilter,
- .fo_restart = fnullop_restart,
-};
-
-int
-xenevt_dev_open(struct file *fp, void **fdata_r)
-{
- struct xenevt_dev_data *d;
-
- d = kmem_zalloc(sizeof(*d), KM_SLEEP);
-
- mutex_init(&d->lock, MUTEX_DEFAULT, IPL_HIGH);
- selinit(&d->selinfo);
- cv_init(&d->cv, "xenevt");
-
- *fdata_r = d;
- return 0;
-}
-
-void xenevt_dev_init(void)
-{
- mutex_init(&devevent_lock, MUTEX_DEFAULT, IPL_NONE);
- minios_events_register_rump_callback(rump_evtdev_callback);
- bmk_sched_create("xenevt", NULL, 0, xenevt_thread_func, NULL,
- NULL, 0);
-}
-
-/*
- * Local variables:
- * c-file-style: "linux"
- * indent-tabs-mode: t
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
+++ /dev/null
-/* $NetBSD: privcmd.c,v 1.49 2014/10/17 16:37:02 christos Exp $ */
-
-/*-
- * Copyright (c) 2004 Christian Limpach.
- * Copyright (c) 2015 Wei Liu.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: privcmd.c,v 1.49 2014/10/17 16:37:02 christos Exp $");
-
-#include <sys/errno.h>
-#include <sys/mount.h>
-#include <sys/dirent.h>
-#include <uvm/uvm_prot.h>
-#include <sys/vnode_if.h>
-#include <sys/vnode.h>
-#include <miscfs/kernfs/kernfs.h>
-
-#include "rumpxen_xendev.h"
-
-#include <mini-os/mm.h>
-
-#include "xenio.h"
-
-static int
-xenprivcmd_xen2bsd_errno(int error)
-{
- /*
- * Xen uses System V error codes.
- * In order to keep bloat as minimal as possible,
- * only convert what really impact us.
- */
-
- switch(-error) {
- case 0:
- return 0;
- case 1:
- return EPERM;
- case 2:
- return ENOENT;
- case 3:
- return ESRCH;
- case 4:
- return EINTR;
- case 5:
- return EIO;
- case 6:
- return ENXIO;
- case 7:
- return E2BIG;
- case 8:
- return ENOEXEC;
- case 9:
- return EBADF;
- case 10:
- return ECHILD;
- case 11:
- return EAGAIN;
- case 12:
- return ENOMEM;
- case 13:
- return EACCES;
- case 14:
- return EFAULT;
- case 15:
- return ENOTBLK;
- case 16:
- return EBUSY;
- case 17:
- return EEXIST;
- case 18:
- return EXDEV;
- case 19:
- return ENODEV;
- case 20:
- return ENOTDIR;
- case 21:
- return EISDIR;
- case 22:
- return EINVAL;
- case 23:
- return ENFILE;
- case 24:
- return EMFILE;
- case 25:
- return ENOTTY;
- case 26:
- return ETXTBSY;
- case 27:
- return EFBIG;
- case 28:
- return ENOSPC;
- case 29:
- return ESPIPE;
- case 30:
- return EROFS;
- case 31:
- return EMLINK;
- case 32:
- return EPIPE;
- case 33:
- return EDOM;
- case 34:
- return ERANGE;
- case 35:
- return EDEADLK;
- case 36:
- return ENAMETOOLONG;
- case 37:
- return ENOLCK;
- case 38:
- return ENOSYS;
- case 39:
- return ENOTEMPTY;
- case 40:
- return ELOOP;
- case 42:
- return ENOMSG;
- case 43:
- return EIDRM;
- case 60:
- return ENOSTR;
- case 61:
- return ENODATA;
- case 62:
- return ETIME;
- case 63:
- return ENOSR;
- case 66:
- return EREMOTE;
- case 74:
- return EBADMSG;
- case 75:
- return EOVERFLOW;
- case 84:
- return EILSEQ;
- case 87:
- return EUSERS;
- case 88:
- return ENOTSOCK;
- case 89:
- return EDESTADDRREQ;
- case 90:
- return EMSGSIZE;
- case 91:
- return EPROTOTYPE;
- case 92:
- return ENOPROTOOPT;
- case 93:
- return EPROTONOSUPPORT;
- case 94:
- return ESOCKTNOSUPPORT;
- case 95:
- return EOPNOTSUPP;
- case 96:
- return EPFNOSUPPORT;
- case 97:
- return EAFNOSUPPORT;
- case 98:
- return EADDRINUSE;
- case 99:
- return EADDRNOTAVAIL;
- case 100:
- return ENETDOWN;
- case 101:
- return ENETUNREACH;
- case 102:
- return ENETRESET;
- case 103:
- return ECONNABORTED;
- case 104:
- return ECONNRESET;
- case 105:
- return ENOBUFS;
- case 106:
- return EISCONN;
- case 107:
- return ENOTCONN;
- case 108:
- return ESHUTDOWN;
- case 109:
- return ETOOMANYREFS;
- case 110:
- return ETIMEDOUT;
- case 111:
- return ECONNREFUSED;
- case 112:
- return EHOSTDOWN;
- case 113:
- return EHOSTUNREACH;
- case 114:
- return EALREADY;
- case 115:
- return EINPROGRESS;
- case 116:
- return ESTALE;
- case 122:
- return EDQUOT;
- default:
- printf("unknown xen error code %d\n", -error);
- return -error;
- }
-}
-
-static int
-xenprivcmd_ioctl(void *v)
-{
- int err;
- struct vop_ioctl_args *ap = v;
-
- switch (ap->a_command) {
- case IOCTL_PRIVCMD_HYPERCALL:
- {
- privcmd_hypercall_t *hc = (privcmd_hypercall_t *)ap->a_data;
-
- err = minios_hypercall(hc->op, hc->arg[0], hc->arg[1],
- hc->arg[2], hc->arg[3], hc->arg[4]);
- if (err >= 0) {
- hc->retval = err;
- err = 0;
- } else {
- err = xenprivcmd_xen2bsd_errno(err);
- hc->retval = 0;
- }
-
- break;
- }
- case IOCTL_PRIVCMD_MMAP:
- {
- int i;
- privcmd_mmap_t *mcmd = ap->a_data;
- privcmd_mmap_entry_t mentry;
-
- for (i = 0; i < mcmd->num; i++) {
- err = copyin(&mcmd->entry[i], &mentry, sizeof(mentry));
- if (err)
- return err;
-
- if (mentry.npages == 0 || mentry.va & PAGE_MASK)
- return EINVAL;
-
- /* Call with err argument == NULL will just crash
- * the domain.
- */
- minios_map_frames(mentry.va, &mentry.mfn, mentry.npages,
- 0, 0, mcmd->dom, NULL,
- minios_get_l1prot());
- }
-
- err = 0;
- break;
- }
- case IOCTL_PRIVCMD_MMAPBATCH:
- {
- privcmd_mmapbatch_t *pmb = ap->a_data;
-
- if (pmb->num == 0 || pmb->addr & PAGE_MASK)
- return EINVAL;
-
- /* Call with err argument == NULL will just crash the
- * domain.
- */
- minios_map_frames(pmb->addr, pmb->arr, pmb->num, 1, 0,
- pmb->dom, NULL, minios_get_l1prot());
- err = 0;
- break;
- }
- default:
- err = EINVAL;
- }
-
- return err;
-}
-
-static const struct kernfs_fileop xenprivcmd_fileops[] = {
- { .kf_fileop = KERNFS_FILEOP_IOCTL, .kf_vop = xenprivcmd_ioctl },
-};
-
-#define XENPRIVCMD_MODE (S_IRUSR)
-extern kernfs_parentdir_t *kernxen_pkt;
-void xenprivcmd_init(void)
-{
- kernfs_entry_t *dkt;
- kfstype kfst;
-
- kfst = KERNFS_ALLOCTYPE(xenprivcmd_fileops);
-
- KERNFS_ALLOCENTRY(dkt, M_TEMP, M_WAITOK);
- KERNFS_INITENTRY(dkt, DT_REG, "privcmd", NULL, kfst, VREG,
- XENPRIVCMD_MODE);
- kernfs_addentry(kernxen_pkt, dkt);
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Citrix
- *
- * Header for /dev/xen* in a rumpkernel.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef RUMP_DEV_XEN_H
-#define RUMP_DEV_XEN_H
-
-#include <sys/filedesc.h>
-#include <sys/malloc.h>
-#include <sys/lwp.h>
-#include <sys/device.h>
-#include <sys/conf.h>
-#include <sys/stat.h>
-#include <sys/select.h>
-#include <sys/file.h>
-#include <sys/poll.h>
-
-
-/* nicked from NetBSD sys/dev/pci/cxgb/cxgb_adapter.h */
-#ifndef container_of
-#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
-#endif
-
-//#define RUMP_DEV_XEN_DEBUG 1
-
-#ifdef RUMP_DEV_XEN_DEBUG
-#define DPRINTF(a) (printf a)
-#else
-#define DPRINTF(a) /* nothing */
-#endif
-
-
-/* Device operations, for devs table in rump_dev_xen.c */
-
-extern int xenbus_dev_open(struct file *fp, void **fdata);
-extern const struct fileops xenbus_dev_fileops;
-extern void xenevt_dev_init(void);
-extern int xenevt_dev_open(struct file *fp, void **fdata);
-extern const struct fileops xenevt_dev_fileops;
-extern void xenprivcmd_init(void);
-
-static inline void*
-xbd_malloc(size_t sz)
-{
- return malloc(sz, M_DEVBUF, M_WAITOK);
-}
-
-static inline void
-xbd_free(void *p)
-{
- if (p) /* free(9) is not like free(3)! */
- free(p, M_DEVBUF);
-}
-
-char *xbd_strdup(const char *s);
-
-#endif /*RUMP_DEV_XEN_H*/
-
-/*
- * Local variables:
- * c-file-style: "linux"
- * indent-tabs-mode: t
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
+++ /dev/null
-/*
- * rump_dev_xen.c
- *
- * Machinery for setting up the contents of /dev/xen* in a rumpkernel.
- *
- * Copyright (c) 2014 Citrix
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: $");
-
-#include "rumpxen_xendev.h"
-
-#include "rump_private.h"
-#include "rump_vfs_private.h"
-
-#include <sys/vfs_syscalls.h>
-#include <sys/dirent.h>
-#include <miscfs/kernfs/kernfs.h>
-
-char *xbd_strdup(const char *s)
-{
- char *r;
- size_t l = strlen(s) + 1;
- r = xbd_malloc(l);
- if (!r)
- return r;
- memcpy(r, s, l);
- return r;
-}
-
-#define DEV_XEN "/dev/xen"
-
-static const struct xen_dev_info {
- const char *path;
- void (*xd_init)(void);
- int (*xd_open)(struct file *fp, void **fdata_r);
- const struct fileops *fo;
-} devs[] = {
-#define XDEV(cmin, path, init_fn, component) \
- [cmin] = { path, init_fn, component##_dev_open, \
- &component##_dev_fileops }
- XDEV(0, DEV_XEN "/xenbus", NULL, xenbus),
- XDEV(1, "/dev/xenevt", xenevt_dev_init, xenevt),
-#undef XDEV
-};
-
-#define NUM_DEV_INFOS (sizeof(devs)/sizeof(devs[0]))
-
-static int
-xen_dev_open(dev_t dev, int flags, int mode, struct lwp *l)
-{
- const struct xen_dev_info *xdinfo;
- int fd, err;
- struct file *fp;
- void *fdata;
-
- DPRINTF(("xen devsw: opening minor=%lu\n", (unsigned long)minor(dev)));
-
- if (minor(dev) < 0 || minor(dev) >= NUM_DEV_INFOS)
- return ENODEV;
-
- xdinfo = &devs[minor(dev)];
-
- if (!xdinfo->xd_open)
- return ENODEV;
-
- err = fd_allocfile(&fp, &fd);
- if (err)
- return err;
-
- DPRINTF(("%s: opening...\n", xdinfo->path));
-
- err = xdinfo->xd_open(fp, &fdata);
- if (err) {
- fd_abort(curproc, fp, fd);
- return err;
- }
-
- DPRINTF(("%s: opened, fd_clone\n", xdinfo->path));
-
- return fd_clone(fp, fd, flags, xdinfo->fo, fdata);
-}
-
-static const struct cdevsw xen_dev_cdevsw = {
- .d_open = xen_dev_open,
- .d_close = nullclose,
- .d_read = noread,
- .d_write = nowrite,
- .d_ioctl = noioctl,
- .d_stop = nostop,
- .d_tty = notty,
- .d_poll = nopoll,
- .d_mmap = nommap,
- .d_kqfilter = nokqfilter,
- .d_flag = D_OTHER
-};
-
-#define DIR_MODE (S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
-kernfs_parentdir_t *kernxen_pkt;
-static void xenkernfs_init(void)
-{
- kernfs_entry_t *dkt;
- KERNFS_ALLOCENTRY(dkt, M_TEMP, M_WAITOK);
- KERNFS_INITENTRY(dkt, DT_DIR, "xen", NULL, KFSsubdir, VDIR, DIR_MODE);
- kernfs_addentry(NULL, dkt);
- kernxen_pkt = KERNFS_ENTOPARENTDIR(dkt);
-}
-
-RUMP_COMPONENT(RUMP_COMPONENT_DEV)
-{
- devmajor_t bmaj, cmaj;
- devminor_t cmin;
- int err;
- const struct xen_dev_info *xdinfo;
-
- DPRINTF(("xen devsw: attaching\n"));
-
- err = do_sys_mkdir(DEV_XEN, 0755, UIO_SYSSPACE);
- if (err && err != EEXIST)
- panic("xen devsw: mkdir " DEV_XEN " failed: %d", err);
-
- bmaj = cmaj = NODEVMAJOR;
- err = devsw_attach("xen", NULL, &bmaj, &xen_dev_cdevsw, &cmaj);
- if (err)
- panic("xen devsw: attach failed: %d", err);
-
- for (cmin = 0; cmin < NUM_DEV_INFOS; cmin++) {
- xdinfo = &devs[cmin];
- err = rump_vfs_makeonedevnode(S_IFCHR, xdinfo->path, cmaj, cmin);
- if (err)
- panic("%s: cannot create device node: %d",
- xdinfo->path, err);
- if (xdinfo->xd_init)
- xdinfo->xd_init();
- DPRINTF(("%s: created, %lu.%lu\n",
- xdinfo->path, (unsigned long)cmaj, (unsigned long)cmin));
- }
-
- xenkernfs_init();
- xenprivcmd_init();
-}
-
-/*
- * Local variables:
- * c-file-style: "linux"
- * indent-tabs-mode: t
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
-
+++ /dev/null
-/* $NetBSD: xenio.h,v 1.9 2011/01/10 11:13:03 cegger Exp $ */
-
-/******************************************************************************
- * privcmd.h
- *
- * Copyright (c) 2003-2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __XEN_XENIO_H__
-#define __XEN_XENIO_H__
-
-/* Interface to /proc/xen/privcmd */
-
-typedef struct privcmd_hypercall
-{
- unsigned long op;
- unsigned long arg[5];
- long retval;
-} privcmd_hypercall_t;
-
-typedef struct privcmd_mmap_entry {
- unsigned long va;
- unsigned long mfn;
- unsigned long npages;
-} privcmd_mmap_entry_t;
-
-typedef struct privcmd_mmap {
- int num;
- domid_t dom; /* target domain */
- privcmd_mmap_entry_t *entry;
-} privcmd_mmap_t;
-
-typedef struct privcmd_mmapbatch {
- int num; /* number of pages to populate */
- domid_t dom; /* target domain */
- unsigned long addr; /* virtual address */
- unsigned long *arr; /* array of mfns - top nibble set on err */
-} privcmd_mmapbatch_t;
-
-typedef struct privcmd_mmapbatch_v2 {
- int num; /* number of pages to populate */
- domid_t dom; /* target domain */
- uint64_t addr; /* virtual address */
- const xen_pfn_t *arr; /* array of mfns */
- int *err; /* array of error codes */
-} privcmd_mmapbatch_v2_t;
-
-typedef struct privcmd_blkmsg
-{
- unsigned long op;
- void *buf;
- int buf_size;
-} privcmd_blkmsg_t;
-
-/*
- * @cmd: IOCTL_PRIVCMD_HYPERCALL
- * @arg: &privcmd_hypercall_t
- * Return: Value returned from execution of the specified hypercall.
- */
-#define IOCTL_PRIVCMD_HYPERCALL \
- _IOWR('P', 0, privcmd_hypercall_t)
-
-#if defined(_KERNEL)
-/* compat */
-#define IOCTL_PRIVCMD_INITDOMAIN_EVTCHN_OLD \
- _IO('P', 1)
-
-typedef struct oprivcmd_hypercall
-{
- unsigned long op;
- unsigned long arg[5];
-} oprivcmd_hypercall_t;
-
-#define IOCTL_PRIVCMD_HYPERCALL_OLD \
- _IOWR('P', 0, oprivcmd_hypercall_t)
-#endif /* defined(_KERNEL) */
-
-#define IOCTL_PRIVCMD_MMAP \
- _IOW('P', 2, privcmd_mmap_t)
-#define IOCTL_PRIVCMD_MMAPBATCH \
- _IOW('P', 3, privcmd_mmapbatch_t)
-#define IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN \
- _IOR('P', 4, unsigned long)
-
-/*
- * @cmd: IOCTL_PRIVCMD_INITDOMAIN_EVTCHN
- * @arg: n/a
- * Return: Port associated with domain-controller end of control event channel
- * for the initial domain.
- */
-#define IOCTL_PRIVCMD_INITDOMAIN_EVTCHN \
- _IOR('P', 5, int)
-#define IOCTL_PRIVCMD_MMAPBATCH_V2 \
- _IOW('P', 6, privcmd_mmapbatch_v2_t)
-
-/* Interface to /dev/xenevt */
-/* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
-#define EVTCHN_RESET _IO('E', 1)
-/* EVTCHN_BIND: Bind to the specified event-channel port. */
-#define EVTCHN_BIND _IOW('E', 2, unsigned long)
-/* EVTCHN_UNBIND: Unbind from the specified event-channel port. */
-#define EVTCHN_UNBIND _IOW('E', 3, unsigned long)
-
-#endif /* __XEN_XENIO_H__ */
+++ /dev/null
-/* $NetBSD: xenio3.h,v 1.3 2010/09/03 06:07:24 cegger Exp $ */
-/******************************************************************************
- * evtchn.h
- *
- * Interface to /dev/xen/evtchn.
- *
- * Copyright (c) 2003-2005, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __XEN_XENIO3_H__
-#define __XEN_XENIO3_H__
-
-/*
- * Bind a fresh port to VIRQ @virq.
- * Return allocated port.
- */
-#define IOCTL_EVTCHN_BIND_VIRQ \
- _IOWR('E', 4, struct ioctl_evtchn_bind_virq)
-struct ioctl_evtchn_bind_virq {
- unsigned int virq;
- unsigned int port;
-};
-
-/*
- * Bind a fresh port to remote <@remote_domain, @remote_port>.
- * Return allocated port.
- */
-#define IOCTL_EVTCHN_BIND_INTERDOMAIN \
- _IOWR('E', 5, struct ioctl_evtchn_bind_interdomain)
-struct ioctl_evtchn_bind_interdomain {
- unsigned int remote_domain, remote_port;
- unsigned int port;
-};
-
-/*
- * Allocate a fresh port for binding to @remote_domain.
- * Return allocated port.
- */
-#define IOCTL_EVTCHN_BIND_UNBOUND_PORT \
- _IOWR('E', 6, struct ioctl_evtchn_bind_unbound_port)
-struct ioctl_evtchn_bind_unbound_port {
- unsigned int remote_domain;
- unsigned int port;
-};
-
-/*
- * Unbind previously allocated @port.
- */
-#define IOCTL_EVTCHN_UNBIND \
- _IOW('E', 7, struct ioctl_evtchn_unbind)
-struct ioctl_evtchn_unbind {
- unsigned int port;
-};
-
-/*
- * Send event to previously allocated @port.
- */
-#define IOCTL_EVTCHN_NOTIFY \
- _IOW('E', 8, struct ioctl_evtchn_notify)
-struct ioctl_evtchn_notify {
- unsigned int port;
-};
-
-/* Clear and reinitialise the event buffer. Clear error condition. */
-#define IOCTL_EVTCHN_RESET \
- _IO('E', 9)
-
-#endif /* __XEN_XENIO3_H__ */
+++ /dev/null
-.include <bsd.own.mk>
-
-LIB= rumpnet_xenif
-
-SRCS= if_virt.c
-SRCS+= xenif_component.c
-
-RUMPTOP= ${TOPRUMP}
-
-IFBASE= -DVIRTIF_BASE=xenif
-
-CPPFLAGS+= -I${RUMPTOP}/librump/rumpkern -I${RUMPTOP}/librump/rumpnet
-CPPFLAGS+= -I${.CURDIR}
-CPPFLAGS+= ${IFBASE}
-
-RUMPCOMP_USER_SRCS= xenif_user.c
-RUMPCOMP_USER_CPPFLAGS+= -I${.CURDIR}/..
-RUMPCOMP_USER_CPPFLAGS+= -I${.CURDIR}/../xen/include
-RUMPCOMP_USER_CPPFLAGS+= -I${.CURDIR}/../../../include
-RUMPCOMP_USER_CPPFLAGS+= ${IFBASE}
-
-# XXX
-.undef RUMPKERN_ONLY
-
-.include "${RUMPTOP}/Makefile.rump"
-.include <bsd.lib.mk>
-.include <bsd.klinks.mk>
+++ /dev/null
-/* $NetBSD: if_virt.c,v 1.36 2013/07/04 11:46:51 pooka Exp $ */
-
-/*
- * Copyright (c) 2008, 2013 Antti Kantee. All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_virt.c,v 1.36 2013/07/04 11:46:51 pooka Exp $");
-
-#include <sys/param.h>
-#include <sys/condvar.h>
-#include <sys/fcntl.h>
-#include <sys/kernel.h>
-#include <sys/kmem.h>
-#include <sys/kthread.h>
-#include <sys/mutex.h>
-#include <sys/poll.h>
-#include <sys/sockio.h>
-#include <sys/socketvar.h>
-#include <sys/cprng.h>
-
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_ether.h>
-#include <net/if_tap.h>
-
-#include <netinet/in.h>
-#include <netinet/in_var.h>
-
-#include <rump/rump.h>
-
-#include "rump_private.h"
-#include "rump_net_private.h"
-
-#include "if_virt.h"
-#include "if_virt_user.h"
-
-/*
- * Virtual interface. Uses hypercalls to shovel packets back
- * and forth. The exact method for shoveling depends on the
- * hypercall implementation.
- */
-
-static int virtif_init(struct ifnet *);
-static int virtif_ioctl(struct ifnet *, u_long, void *);
-static void virtif_start(struct ifnet *);
-static void virtif_stop(struct ifnet *, int);
-
-struct virtif_sc {
- struct ethercom sc_ec;
- struct virtif_user *sc_viu;
-};
-
-static int virtif_clone(struct if_clone *, int);
-static int virtif_unclone(struct ifnet *);
-
-struct if_clone VIF_CLONER =
- IF_CLONE_INITIALIZER(VIF_NAME, virtif_clone, virtif_unclone);
-
-static int
-virtif_clone(struct if_clone *ifc, int num)
-{
- struct virtif_sc *sc;
- struct virtif_user *viu;
- struct ifnet *ifp;
- uint8_t enaddr[ETHER_ADDR_LEN] = { 0xb2, 0x0a, 0x00, 0x0b, 0x0e, 0x01 };
- char enaddrstr[3*ETHER_ADDR_LEN];
- int error = 0;
-
- if (num >= 0x100)
- return E2BIG;
-
- enaddr[2] = cprng_fast32() & 0xff;
- enaddr[5] = num;
-
- sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
-
- if ((error = VIFHYPER_CREATE(num, sc, enaddr, &viu)) != 0) {
- kmem_free(sc, sizeof(*sc));
- return error;
- }
- sc->sc_viu = viu;
-
- ifp = &sc->sc_ec.ec_if;
- snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", VIF_NAME, num);
- ifp->if_softc = sc;
-
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = virtif_init;
- ifp->if_ioctl = virtif_ioctl;
- ifp->if_start = virtif_start;
- ifp->if_stop = virtif_stop;
- IFQ_SET_READY(&ifp->if_snd);
-
- if_attach(ifp);
- ether_ifattach(ifp, enaddr);
-
- ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr);
- aprint_normal_ifnet(ifp, "Ethernet address %s\n", enaddrstr);
-
- if (error) {
- virtif_unclone(ifp);
- }
-
- return error;
-}
-
-static int
-virtif_unclone(struct ifnet *ifp)
-{
- struct virtif_sc *sc = ifp->if_softc;
-
- VIFHYPER_DYING(sc->sc_viu);
-
- virtif_stop(ifp, 1);
- if_down(ifp);
-
- VIFHYPER_DESTROY(sc->sc_viu);
-
- kmem_free(sc, sizeof(*sc));
-
- ether_ifdetach(ifp);
- if_detach(ifp);
-
- return 0;
-}
-
-static int
-virtif_init(struct ifnet *ifp)
-{
-
- ifp->if_flags |= IFF_RUNNING;
- return 0;
-}
-
-static int
-virtif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
-{
- int s, rv;
-
- s = splnet();
- rv = ether_ioctl(ifp, cmd, data);
- if (rv == ENETRESET)
- rv = 0;
- splx(s);
-
- return rv;
-}
-
-/*
- * Output packets in-context until outgoing queue is empty.
- * Assume that VIFHYPER_SEND() is fast enough to not make it
- * necessary to drop kernel_lock.
- */
-#define LB_SH 32
-static void
-virtif_start(struct ifnet *ifp)
-{
- struct virtif_sc *sc = ifp->if_softc;
- struct mbuf *m, *m0;
- struct iovec io[LB_SH];
- int i;
-
- ifp->if_flags |= IFF_OACTIVE;
-
- for (;;) {
- IF_DEQUEUE(&ifp->if_snd, m0);
- if (!m0) {
- break;
- }
-
- m = m0;
- for (i = 0; i < LB_SH && m; i++) {
- io[i].iov_base = mtod(m, void *);
- io[i].iov_len = m->m_len;
- m = m->m_next;
- }
- if (i == LB_SH)
- panic("lazy bum");
- bpf_mtap(ifp, m0);
-
- VIFHYPER_SEND(sc->sc_viu, io, i);
-
- m_freem(m0);
- }
-
- ifp->if_flags &= ~IFF_OACTIVE;
-}
-
-static void
-virtif_stop(struct ifnet *ifp, int disable)
-{
-
- ifp->if_flags &= ~IFF_RUNNING;
-}
-
-void
-rump_virtif_pktdeliver(struct virtif_sc *sc, struct iovec *iov, size_t iovlen)
-{
- struct ifnet *ifp = &sc->sc_ec.ec_if;
- struct mbuf *m;
- size_t i;
- int off, olen;
-
- if ((ifp->if_flags & IFF_RUNNING) == 0)
- return;
-
- m = m_gethdr(M_NOWAIT, MT_DATA);
- if (m == NULL)
- return; /* drop packet */
- m->m_len = m->m_pkthdr.len = 0;
-
- for (i = 0, off = 0; i < iovlen; i++) {
- olen = m->m_pkthdr.len;
- m_copyback(m, off, iov[i].iov_len, iov[i].iov_base);
- off += iov[i].iov_len;
- if (olen + off != m->m_pkthdr.len) {
- aprint_verbose_ifnet(ifp, "m_copyback failed\n");
- m_freem(m);
- return;
- }
- }
-
- m->m_pkthdr.rcvif = ifp;
- KERNEL_LOCK(1, NULL);
- bpf_mtap(ifp, m);
- ether_input(ifp, m);
- KERNEL_UNLOCK_LAST(NULL);
-}
+++ /dev/null
-/* $NetBSD: if_virt.h,v 1.2 2013/07/04 11:58:11 pooka Exp $ */
-
-/*
- * NOTE! This file is supposed to work on !NetBSD platforms.
- */
-
-#ifndef VIRTIF_BASE
-#error Define VIRTIF_BASE
-#endif
-
-#define VIF_STRING(x) #x
-#define VIF_STRINGIFY(x) VIF_STRING(x)
-#define VIF_CONCAT(x,y) x##y
-#define VIF_CONCAT3(x,y,z) x##y##z
-#define VIF_BASENAME(x,y) VIF_CONCAT(x,y)
-#define VIF_BASENAME3(x,y,z) VIF_CONCAT3(x,y,z)
-
-#define VIF_CLONER VIF_BASENAME(VIRTIF_BASE,_cloner)
-#define VIF_NAME VIF_STRINGIFY(VIRTIF_BASE)
-
-#define VIFHYPER_CREATE VIF_BASENAME3(rumpcomp_,VIRTIF_BASE,_create)
-#define VIFHYPER_DYING VIF_BASENAME3(rumpcomp_,VIRTIF_BASE,_dying)
-#define VIFHYPER_DESTROY VIF_BASENAME3(rumpcomp_,VIRTIF_BASE,_destroy)
-#define VIFHYPER_SEND VIF_BASENAME3(rumpcomp_,VIRTIF_BASE,_send)
-
-struct virtif_sc;
-void rump_virtif_pktdeliver(struct virtif_sc *, struct iovec *, size_t);
+++ /dev/null
-/* $NetBSD: rumpcomp_user.h,v 1.4 2013/07/04 11:46:51 pooka Exp $ */
-
-/*
- * Copyright (c) 2013 Antti Kantee. All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-struct virtif_user;
-
-int VIFHYPER_CREATE(int, struct virtif_sc *, uint8_t *,
- struct virtif_user **);
-void VIFHYPER_DYING(struct virtif_user *);
-void VIFHYPER_DESTROY(struct virtif_user *);
-
-void VIFHYPER_SEND(struct virtif_user *, struct iovec *, size_t);
+++ /dev/null
-/* $NetBSD: component.c,v 1.4 2013/07/04 11:46:51 pooka Exp $ */
-
-/*
- * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
- *
- * Development of this software was supported by The Nokia Foundation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: component.c,v 1.4 2013/07/04 11:46:51 pooka Exp $");
-
-#include <sys/param.h>
-#include <sys/domain.h>
-#include <sys/protosw.h>
-
-#include <net/if.h>
-
-#include "rump_private.h"
-#include "rump_net_private.h"
-#include "if_virt.h"
-
-RUMP_COMPONENT(RUMP_COMPONENT_NET_IF)
-{
- extern struct if_clone VIF_CLONER; /* XXX */
-
- if_clone_attach(&VIF_CLONER);
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Antti Kantee. All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/* XXX */
-struct iovec {
- void *iov_base;
- unsigned long iov_len;
-};
-
-#include <mini-os/os.h>
-#include <mini-os/netfront.h>
-
-#include <bmk-core/errno.h>
-#include <bmk-core/memalloc.h>
-#include <bmk-core/string.h>
-#include <bmk-core/sched.h>
-
-#include <bmk-rumpuser/core_types.h>
-#include <bmk-rumpuser/rumpuser.h>
-
-#include "if_virt.h"
-#include "if_virt_user.h"
-
-/*
- * For now, shovel the packets from the interrupt to a
- * thread context via an intermediate set of buffers. Need
- * to fix this a bit down the road.
- */
-#define MAXPKT 2000
-struct onepkt {
- unsigned char pkt_data[MAXPKT];
- int pkt_dlen;
-};
-
-#define NBUF 64
-struct virtif_user {
- struct netfront_dev *viu_dev;
- struct bmk_thread *viu_rcvr;
- struct bmk_thread *viu_thr;
- struct virtif_sc *viu_vifsc;
-
- int viu_read;
- int viu_write;
- int viu_dying;
- struct onepkt viu_pkts[NBUF];
-};
-
-/*
- * Ok, based on how (the unmodified) netfront works, we need to
- * consume the data here. So store it locally (and revisit some day).
- */
-static void
-myrecv(struct netfront_dev *dev, unsigned char *data, int dlen)
-{
- struct virtif_user *viu = netfront_get_private(dev);
- int nextw;
-
- /* TODO: we should be at the correct spl already, assert how? */
-
- nextw = (viu->viu_write+1) % NBUF;
- /* queue full? drop packet */
- if (nextw == viu->viu_read) {
- return;
- }
-
- if (dlen > MAXPKT) {
- minios_printk("myrecv: pkt len %d too big\n", dlen);
- return;
- }
-
- bmk_memcpy(viu->viu_pkts[viu->viu_write].pkt_data, data, dlen);
- viu->viu_pkts[viu->viu_write].pkt_dlen = dlen;
- viu->viu_write = nextw;
-
- if (viu->viu_rcvr)
- bmk_sched_wake(viu->viu_rcvr);
-}
-
-static void
-pusher(void *arg)
-{
- struct virtif_user *viu = arg;
- struct iovec iov;
- struct onepkt *mypkt;
- int flags;
-
- /* give us a rump kernel context */
- rumpuser__hyp.hyp_schedule();
- rumpuser__hyp.hyp_lwproc_newlwp(0);
- rumpuser__hyp.hyp_unschedule();
-
- local_irq_save(flags);
- again:
- while (!viu->viu_dying) {
- while (viu->viu_read == viu->viu_write) {
- viu->viu_rcvr = bmk_current;
- bmk_sched_blockprepare();
- local_irq_restore(flags);
- bmk_sched_block();
- local_irq_save(flags);
- viu->viu_rcvr = NULL;
- goto again;
- }
- mypkt = &viu->viu_pkts[viu->viu_read];
- local_irq_restore(flags);
-
- iov.iov_base = mypkt->pkt_data;
- iov.iov_len = mypkt->pkt_dlen;
-
- rumpuser__hyp.hyp_schedule();
- rump_virtif_pktdeliver(viu->viu_vifsc, &iov, 1);
- rumpuser__hyp.hyp_unschedule();
-
- local_irq_save(flags);
- viu->viu_read = (viu->viu_read+1) % NBUF;
- }
- local_irq_restore(flags);
-}
-
-int
-VIFHYPER_CREATE(int devnum, struct virtif_sc *vif_sc, uint8_t *enaddr,
- struct virtif_user **viup)
-{
- struct virtif_user *viu = NULL;
- int rv, nlocks;
-
- rumpkern_unsched(&nlocks, NULL);
-
- viu = bmk_memalloc(sizeof(*viu), 0, BMK_MEMWHO_RUMPKERN);
- if (viu == NULL) {
- rv = BMK_ENOMEM;
- goto out;
- }
- bmk_memset(viu, 0, sizeof(*viu));
- viu->viu_vifsc = vif_sc;
-
- viu->viu_dev = netfront_init(NULL, myrecv, enaddr, NULL, viu);
- if (!viu->viu_dev) {
- rv = BMK_EINVAL; /* ? */
- bmk_memfree(viu, BMK_MEMWHO_RUMPKERN);
- goto out;
- }
-
- viu->viu_thr = bmk_sched_create("xenifp",
- NULL, 1, pusher, viu, NULL, 0);
- if (viu->viu_thr == NULL) {
- minios_printk("fatal thread creation failure\n"); /* XXX */
- minios_do_exit();
- }
-
- rv = 0;
-
- out:
- rumpkern_sched(nlocks, NULL);
-
- *viup = viu;
- return rv;
-}
-
-void
-VIFHYPER_SEND(struct virtif_user *viu,
- struct iovec *iov, size_t iovlen)
-{
- size_t tlen, i;
- int nlocks;
- void *d;
- char *d0;
-
- rumpkern_unsched(&nlocks, NULL);
- /*
- * netfront doesn't do scatter-gather, so just simply
- * copy the data into one lump here. drop packet if we
- * can't allocate temp memory space.
- */
- if (iovlen == 1) {
- d = iov->iov_base;
- tlen = iov->iov_len;
- } else {
- for (i = 0, tlen = 0; i < iovlen; i++) {
- tlen += iov[i].iov_len;
- }
-
- /*
- * allocate the temp space from RUMPKERN instead of BMK
- * since there are no huge repercussions if we fail or
- * succeed.
- */
- d = d0 = bmk_memalloc(tlen, 0, BMK_MEMWHO_RUMPKERN);
- if (d == NULL)
- goto out;
-
- for (i = 0; i < iovlen; i++) {
- bmk_memcpy(d0, iov[i].iov_base, iov[i].iov_len);
- d0 += iov[i].iov_len;
- }
- }
-
- netfront_xmit(viu->viu_dev, d, tlen);
-
- if (iovlen != 1)
- bmk_memfree(d, BMK_MEMWHO_RUMPKERN);
-
- out:
- rumpkern_sched(nlocks, NULL);
-}
-
-void
-VIFHYPER_DYING(struct virtif_user *viu)
-{
-
- viu->viu_dying = 1;
- if (viu->viu_rcvr)
- bmk_sched_wake(viu->viu_rcvr);
-}
-
-void
-VIFHYPER_DESTROY(struct virtif_user *viu)
-{
-
- ASSERT(viu->viu_dying == 1);
-
- bmk_sched_join(viu->viu_thr);
- netfront_shutdown(viu->viu_dev);
- bmk_memfree(viu, BMK_MEMWHO_RUMPKERN);
-}