--- /dev/null
+.include <bsd.own.mk>
+
+LIB= rumpkern_mman
+
+SRCS+= sys_mman.c mman_component.c
+
+RUMPTOP= ${TOPRUMP}
+
+CPPFLAGS+= -I${RUMPTOP}/librump/rumpkern
+
+.include "${RUMPTOP}/Makefile.rump"
+.include <bsd.lib.mk>
+.include <bsd.klinks.mk>
--- /dev/null
+/*-
+ * Copyright (c) 2016 Antti Kantee. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/syscall.h>
+
+#include "rump_private.h"
+
+extern sy_call_t sys_mmap;
+extern sy_call_t sys_munmap;
+extern sy_call_t sys___msync13;
+extern sy_call_t sys_mincore;
+extern sy_call_t sys_madvise;
+extern sy_call_t sys_mprotect;
+extern sy_call_t sys_mlock;
+extern sy_call_t sys_mlockall;
+extern sy_call_t sys_munlock;
+extern sy_call_t sys_munlockall;
+
+#define ENTRY(name) { SYS_##name, sys_##name },
+static const struct rump_onesyscall mysys[] = {
+ ENTRY(mmap)
+ ENTRY(munmap)
+ ENTRY(__msync13)
+ ENTRY(mincore)
+ ENTRY(mprotect)
+ ENTRY(mlock)
+ ENTRY(mlockall)
+ ENTRY(munlock)
+ ENTRY(munlockall)
+};
+#undef ENTRY
+
+RUMP_COMPONENT(RUMP_COMPONENT_SYSCALL)
+{
+
+ rump_syscall_boot_establish(mysys, __arraycount(mysys));
+}
--- /dev/null
+/*-
+ * Copyright (c) 2013, 2015, 2016 Antti Kantee. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Memory management syscall implementations. These are mostly ~nops,
+ * apart from mmap, which we sort of attempt to emulate since many
+ * programs reserve memory using mmap instead of malloc.
+ */
+
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#include <sys/kmem.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/syscall.h>
+#include <sys/syscallargs.h>
+
+#include "rump_private.h"
+
+#ifdef RUMPRUN_MMAP_DEBUG
+#define MMAP_PRINTF(x) printf x
+#else
+#define MMAP_PRINTF(x)
+#endif
+
+struct mmapchunk {
+ void *mm_start;
+ size_t mm_size;
+ size_t mm_pgsleft;
+
+ LIST_ENTRY(mmapchunk) mm_chunks;
+};
+/*
+ * XXX: use a tree? we don't know how many entries we get,
+ * someone might mmap pages individually ...
+ */
+static LIST_HEAD(, mmapchunk) mmc_list = LIST_HEAD_INITIALIZER(&mmc_list);
+
+static void *
+mmapmem_alloc(size_t roundedlen)
+{
+ struct mmapchunk *mc;
+ void *v;
+
+ mc = kmem_alloc(sizeof(*mc), KM_SLEEP);
+ if (mc == NULL)
+ return NULL;
+
+ v = rump_hypermalloc(roundedlen, PAGE_SIZE, true, "mmapmem");
+
+ mc->mm_start = v;
+ mc->mm_size = roundedlen;
+ mc->mm_pgsleft = roundedlen / PAGE_SIZE;
+
+ LIST_INSERT_HEAD(&mmc_list, mc, mm_chunks);
+
+ return v;
+}
+
+static int
+mmapmem_free(void *addr, size_t roundedlen)
+{
+ struct mmapchunk *mc;
+ size_t npgs;
+
+ LIST_FOREACH(mc, &mmc_list, mm_chunks) {
+ if (mc->mm_start <= addr &&
+ ((uint8_t *)mc->mm_start + mc->mm_size
+ >= (uint8_t *)addr + roundedlen))
+ break;
+ }
+ if (!mc) {
+ return EINVAL;
+ }
+
+ npgs = roundedlen / PAGE_SIZE;
+ KASSERT(npgs <= mc->mm_pgsleft);
+ mc->mm_pgsleft -= npgs;
+ if (mc->mm_pgsleft)
+ return 0;
+
+ /* no pages left => free bookkeeping chunk */
+ LIST_REMOVE(mc, mm_chunks);
+ kmem_free(mc->mm_start, mc->mm_size);
+ kmem_free(mc, sizeof(*mc));
+
+ return 0;
+}
+
+int
+sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
+{
+ size_t len = SCARG(uap, len);
+ int prot = SCARG(uap, prot);
+ int flags = SCARG(uap, flags);
+ int fd = SCARG(uap, fd);
+ off_t pos = SCARG(uap, pos);
+ struct file *fp;
+ register_t cnt;
+ void *v;
+ size_t roundedlen;
+ int error = 0;
+
+ MMAP_PRINTF(("-> mmap: %p %zu, 0x%x, 0x%x, %d, %" PRId64 "\n",
+ SCARG(uap, addr), len, prot, flags, fd, pos));
+
+ if (fd != -1 && prot != PROT_READ) {
+ MMAP_PRINTF(("mmap: trying to r/w map a file. failing!\n"));
+ return EOPNOTSUPP;
+ }
+
+ /* we're not going to even try */
+ if (flags & MAP_FIXED) {
+ return ENOMEM;
+ }
+
+ /* offset should be aligned to page size */
+ if ((pos & (PAGE_SIZE-1)) != 0) {
+ return EINVAL;
+ }
+
+ /* allocate full whatever-we-lie-to-be-pages */
+ roundedlen = roundup2(len, PAGE_SIZE);
+ if ((v = mmapmem_alloc(roundedlen)) == NULL) {
+ return ENOMEM;
+ }
+
+ *retval = (register_t)v;
+
+ if (flags & MAP_ANON) {
+ memset(v, 0, roundedlen);
+ return 0;
+ }
+
+ /*
+ * Ok, so we have a file-backed mapping case.
+ */
+
+ if ((fp = fd_getfile(fd)) == NULL)
+ return EBADF;
+
+ if ((fp->f_flag & FREAD) == 0) {
+ fd_putfile(fd);
+ return EBADF;
+ }
+ if (fp->f_type != DTYPE_VNODE) {
+ fd_putfile(fd);
+ return ENODEV;
+ }
+
+ error = dofileread(fd, fp, v, roundedlen, &pos, 0, &cnt);
+ if (error) {
+ mmapmem_free(v, roundedlen);
+ return error;
+ }
+
+ /*
+ * Memory after the end of the object until the end of the page
+ * should be 0-filled. We don't really know when the object
+ * stops (we could do a fstat(), but that's racy), so just assume
+ * that the caller knows what her or she is doing.
+ */
+ if ((size_t)cnt != roundedlen) {
+ KASSERT(cnt < roundedlen);
+ memset((uint8_t *)v+cnt, 0, roundedlen-cnt);
+ }
+
+ MMAP_PRINTF(("<- mmap: %p %d\n", v, error));
+ return error;
+}
+
+int
+sys___msync13(struct lwp *l, const struct sys___msync13_args *uap,
+ register_t *retval)
+{
+ void *addr = SCARG(uap, addr);
+ int flags = SCARG(uap, flags);
+
+ /* catch a few easy errors */
+ if (((uintptr_t)addr & (PAGE_SIZE-1)) != 0)
+ return EINVAL;
+ if ((flags & (MS_SYNC|MS_ASYNC)) == (MS_SYNC|MS_ASYNC))
+ return EINVAL;
+
+ /* otherwise just pretend that we are the champions my friend */
+ return 0;
+}
+
+int
+sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
+{
+ void *addr = SCARG(uap, addr);
+ size_t len = SCARG(uap, len);
+ int rv;
+
+ MMAP_PRINTF(("-> munmap: %p, %zu\n", addr, len));
+
+ /* addr must be page-aligned */
+ if (((uintptr_t)addr & (PAGE_SIZE-1)) != 0) {
+ rv = EINVAL;
+ goto out;
+ }
+
+ rv = mmapmem_free(addr, roundup2(len, PAGE_SIZE));
+
+ out:
+ MMAP_PRINTF(("<- munmap: %d\n", rv));
+ return rv;
+}
+
+int
+sys_mincore(struct lwp *l, const struct sys_mincore_args *uap,
+ register_t *retval)
+{
+ size_t len = SCARG(uap, len);
+ char *vec = SCARG(uap, vec);
+
+ /*
+ * Questionable if we should allocate vec + copyout().
+ * Guess that's the problem of the person why copypastes
+ * this code into the wrong place.
+ */
+ memset(vec, 0x01, (len + PAGE_SIZE - 1) / PAGE_SIZE);
+ return 0;
+}
+
+/*
+ * Rest are stubs.
+ */
+
+int
+sys_madvise(struct lwp *l, const struct sys_madvise_args *uap,
+ register_t *retval)
+{
+
+ return 0;
+}
+
+__strong_alias(sys_mprotect,sys_madvise);
+__strong_alias(sys_minherit,sys_madvise);
+__strong_alias(sys_mlock,sys_madvise);
+__strong_alias(sys_mlockall,sys_madvise);
+__strong_alias(sys_munlock,sys_madvise);
+__strong_alias(sys_munlockall,sys_madvise);
/*-
- * Copyright (c) 2013, 2015 Antti Kantee. All Rights Reserved.
+ * Copyright (c) 2016 Antti Kantee. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
*/
/*
- * Emulate a bit of memory management syscalls. Most are nops due
- * to the fact that there is virtual memory. Things like mprotect()
- * don't work 100% correctly, but we can't really do anything about it,
- * so we lie to the caller and cross our fingers.
+ * Syscall wrappers for memory management routines (not provided by
+ * standard rump kernels). These are handwritten libc-level wrappers.
+ * We should maybe try to autogenerate them some fine day ...
*/
-/* for libc namespace */
#define mmap _mmap
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/mman.h>
-#include <sys/queue.h>
+#include <sys/syscall.h>
+#include <sys/syscallargs.h>
-#include <assert.h>
#include <errno.h>
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
-#include <bmk-core/pgalloc.h>
+/* XXX */
+int rump_syscall(int, void *, size_t, register_t *);
-#ifdef RUMPRUN_MMAP_DEBUG
-#define MMAP_PRINTF(x) printf x
-#else
-#define MMAP_PRINTF(x)
+#if BYTE_ORDER == BIG_ENDIAN
+#define SPARG(p,k) ((p)->k.be.datum)
+#else /* LITTLE_ENDIAN, I hope dearly */
+#define SPARG(p,k) ((p)->k.le.datum)
#endif
-struct mmapchunk {
- void *mm_start;
- size_t mm_size;
- size_t mm_pgsleft;
-
- LIST_ENTRY(mmapchunk) mm_chunks;
-};
-/*
- * XXX: use a tree? we don't know how many entries we get,
- * someone might mmap page individually ...
- */
-static LIST_HEAD(, mmapchunk) mmc_list = LIST_HEAD_INITIALIZER(&mmc_list);
-
-/* XXX: need actual const macro */
-static inline long __constfunc
-pagesize(void)
-{
-
- return sysconf(_SC_PAGESIZE);
-}
-
-/*
- * calculate the order we need for pgalloc()
- */
-static int
-size2order(size_t wantedsize)
-{
- int npgs = wantedsize / pagesize();
- int powtwo;
-
- powtwo = 8*sizeof(npgs) - __builtin_clz(npgs);
- if ((npgs & (npgs-1)) == 0)
- powtwo--;
-
- return powtwo;
-}
-
-static void *
-mmapmem_alloc(size_t roundedlen)
-{
- struct mmapchunk *mc;
- void *v;
- int order;
-
- mc = malloc(sizeof(*mc));
- if (mc == NULL)
- return NULL;
-
- order = size2order(roundedlen);
- v = bmk_pgalloc(order);
- if (v == NULL) {
- free(mc);
- return NULL;
- }
- memset(v, 0, (1UL<<order) * pagesize());
-
- mc->mm_start = v;
- mc->mm_size = roundedlen;
- mc->mm_pgsleft = roundedlen / pagesize();
-
- LIST_INSERT_HEAD(&mmc_list, mc, mm_chunks);
-
- return v;
-}
-
-static int
-mmapmem_free(void *addr, size_t roundedlen)
-{
- struct mmapchunk *mc;
- size_t npgs;
- int order;
-
- LIST_FOREACH(mc, &mmc_list, mm_chunks) {
- if (mc->mm_start <= addr &&
- ((uint8_t *)mc->mm_start + mc->mm_size
- >= (uint8_t *)addr + roundedlen))
- break;
- }
- if (!mc) {
- return EINVAL;
- }
-
- npgs = roundedlen / pagesize();
- assert(npgs <= mc->mm_pgsleft);
- mc->mm_pgsleft -= npgs;
- if (mc->mm_pgsleft)
- return 0;
-
- /* no pages left => free bookkeeping chunk */
- LIST_REMOVE(mc, mm_chunks);
- order = size2order(mc->mm_size);
- bmk_pgfree(mc->mm_start, order);
- free(mc);
-
- return 0;
-}
-
void *
-mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off)
+mmap(void *addr, size_t len, int prot, int flags, int fd, off_t pos)
{
- void *v;
- ssize_t nn;
- size_t roundedlen, nnu;
- int error = 0;
-
- MMAP_PRINTF(("-> mmap: %p %zu, 0x%x, 0x%x, %d, %" PRId64 "\n",
- addr, len, prot, flags, fd, off));
-
- if (fd != -1 && prot != PROT_READ) {
- MMAP_PRINTF(("mmap: trying to r/w map a file. failing!\n"));
- error = ENOTSUP;
- goto out;
- }
-
- /* we're not going to even try */
- if (flags & MAP_FIXED) {
- error = ENOMEM;
- goto out;
+ struct sys_mmap_args callarg;
+ register_t retval[2];
+ int error;
+
+ memset(&callarg, 0, sizeof(callarg));
+ SPARG(&callarg, addr) = addr;
+ SPARG(&callarg, len) = len;
+ SPARG(&callarg, prot) = prot;
+ SPARG(&callarg, flags) = flags;
+ SPARG(&callarg, fd) = fd;
+ SPARG(&callarg, pos) = pos;
+
+ error = rump_syscall(SYS_mmap, &callarg, sizeof(callarg), retval);
+ errno = error;
+ if (error == 0) {
+ return (void *)retval[0];
}
-
- /* offset should be aligned to page size */
- if ((off & (pagesize()-1)) != 0) {
- error = EINVAL;
- goto out;
- }
-
- /* allocate full whatever-we-lie-to-be-pages */
- roundedlen = roundup2(len, pagesize());
- if ((v = mmapmem_alloc(roundedlen)) == NULL) {
- error = ENOMEM;
- goto out;
- }
-
- if (flags & MAP_ANON)
- goto out;
-
- if ((nn = pread(fd, v, roundedlen, off)) == -1) {
- MMAP_PRINTF(("mmap: failed to populate r/o file mapping!\n"));
- error = errno;
- assert(error != 0);
- mmapmem_free(v, roundedlen);
- goto out;
- }
- nnu = (size_t)nn;
-
- /*
- * Memory after the end of the object until the end of the page
- * should be 0-filled. We don't really know when the object
- * stops (we could do a fstat(), but that's racy), so just assume
- * that the caller knows what her or she is doing.
- */
- if (nnu != roundedlen) {
- assert(nnu < roundedlen);
- memset((uint8_t *)v+nnu, 0, roundedlen-nnu);
- }
-
- out:
- if (error) {
- errno = error;
- v = MAP_FAILED;
- }
- MMAP_PRINTF(("<- mmap: %p %d\n", v, error));
- return v;
+ return MAP_FAILED;
}
#undef mmap
__weak_alias(mmap,_mmap);
-int _sys___msync13(void *, size_t, int);
-int
-_sys___msync13(void *addr, size_t len, int flags)
-{
-
- /* catch a few easy errors */
- if (((uintptr_t)addr & (pagesize()-1)) != 0)
- return EINVAL;
- if ((flags & (MS_SYNC|MS_ASYNC)) == (MS_SYNC|MS_ASYNC))
- return EINVAL;
-
- /* otherwise just pretend that we are the champions my friend */
- return 0;
-}
-
int
munmap(void *addr, size_t len)
{
- int rv;
-
- MMAP_PRINTF(("-> munmap: %p, %zu\n", addr, len));
-
- /* addr must be page-aligned */
- if (((uintptr_t)addr & (pagesize()-1)) != 0) {
- rv = EINVAL;
- goto out;
+ struct sys_munmap_args callarg;
+ register_t retval[2];
+ int error;
+
+ memset(&callarg, 0, sizeof(callarg));
+ SPARG(&callarg, addr) = addr;
+ SPARG(&callarg, len) = len;
+
+ error = rump_syscall(SYS_munmap, &callarg, sizeof(callarg), retval);
+ errno = error;
+ if (error == 0) {
+ return (int)retval[0];
}
-
- rv = mmapmem_free(addr, roundup2(len, pagesize()));
-
- out:
- MMAP_PRINTF(("<- munmap: %d\n", rv));
- return rv;
-}
-
-int
-madvise(void *addr, size_t len, int adv)
-{
-
- return 0;
-}
-
-int
-mprotect(void *addr, size_t len, int prot)
-{
-
- return 0;
-}
-
-int
-minherit(void *addr, size_t len, int inherit)
-{
-
- return 0;
+ return -1;
}
+int _sys___msync13(void *, size_t, int);
int
-mlockall(int flags)
+_sys___msync13(void *addr, size_t len, int flags)
{
+ struct sys___msync13_args callarg;
+ register_t retval[2];
+ int error;
- return 0;
-}
-
-int
-munlockall(void)
-{
+ memset(&callarg, 0, sizeof(callarg));
+ SPARG(&callarg, addr) = addr;
+ SPARG(&callarg, len) = len;
- return 0;
+ error = rump_syscall(SYS___msync13, &callarg, sizeof(callarg), retval);
+ errno = error;
+ if (error == 0) {
+ return 0;
+ }
+ return -1;
}
+__weak_alias(___msync13,_sys___msync13);
+__weak_alias(__msync13,_sys___msync13);
int
-mlock(const void *addr, size_t len)
+mincore(void *addr, size_t len, char *vec)
{
-
- return 0;
+ struct sys_mincore_args callarg;
+ register_t retval[2];
+ int error;
+
+ memset(&callarg, 0, sizeof(callarg));
+ SPARG(&callarg, addr) = addr;
+ SPARG(&callarg, len) = len;
+ SPARG(&callarg, vec) = vec;
+
+ error = rump_syscall(SYS_mincore, &callarg, sizeof(callarg), retval);
+ errno = error;
+ if (error == 0) {
+ return 0;
+ }
+ return -1;
}
-int
-munlock(const void *addr, size_t len)
-{
-
- return 0;
-}
+/*
+ * We "know" that the following are stubs also in the kernel. Risk of
+ * them going out-of-sync is quite minimal ...
+ */
int
-mincore(void *addr, size_t length, char *vec)
+madvise(void *addr, size_t len, int adv)
{
- long page_size = sysconf(_SC_PAGESIZE);
- memset(vec, 0x01, (length + page_size - 1) / page_size);
return 0;
}
+__strong_alias(mprotect,madvise);
+__strong_alias(minherit,madvise);
+__strong_alias(mlock,madvise);
+__strong_alias(mlockall,madvise);
+__strong_alias(munlock,madvise);
+__strong_alias(munlockall,madvise);
# but building it always makes testing kernonly easier
TARGETS+= compiler_rt
INSTALLTGTS= librumpkern_bmktc_install
+INSTALLTGTS+= librumpkern_mman_install
ifneq (${KERNONLY},true)
TARGETS+= userlibs
$(eval $(call BUILDLIB_target,libbmk_core))
$(eval $(call BUILDLIB_target,libbmk_rumpuser))
$(eval $(call BUILDLIB_target,librumpkern_bmktc))
+$(eval $(call BUILDLIB_target,librumpkern_mman))
$(eval $(call BUILDLIB_target,librumprun_base))
$(eval $(call BUILDLIB_target,librumprun_tester))
$(eval $(call BUILDLIB_target,libcompiler_rt,RUMPSRC=${RUMPSRC}))
commonlibs: platformlibs userlibs
userlibs: ${PSEUDOSTUBS}.o ${RROBJLIB}/librumprun_base/librumprun_base.a ${RROBJLIB}/librumprun_tester/librumprun_tester.a ${LIBUNWIND}
platformlibs: ${RROBJLIB}/libbmk_core/libbmk_core.a ${RROBJLIB}/libbmk_rumpuser/libbmk_rumpuser.a ${RROBJ}/bmk.ldscript
-rumpkernlibs: ${RROBJLIB}/librumpkern_bmktc/librumpkern_bmktc.a
+rumpkernlibs: ${RROBJLIB}/librumpkern_bmktc/librumpkern_bmktc.a ${RROBJLIB}/librumpkern_mman/librumpkern_mman.a
compiler_rt: ${RROBJLIB}/libcompiler_rt/libcompiler_rt.a
.PHONY: buildtest