$(eval $(call _import_lib,$(CONFIG_UK_BASE)/lib/uklibparam))
$(eval $(call _import_lib,$(CONFIG_UK_BASE)/lib/uklock))
$(eval $(call _import_lib,$(CONFIG_UK_BASE)/lib/ukmmap))
+$(eval $(call _import_lib,$(CONFIG_UK_BASE)/lib/ukmmio))
$(eval $(call _import_lib,$(CONFIG_UK_BASE)/lib/ukmpi))
$(eval $(call _import_lib,$(CONFIG_UK_BASE)/lib/uknetdev))
$(eval $(call _import_lib,$(CONFIG_UK_BASE)/lib/uknofault))
#include <uk/config.h>
#include <stddef.h>
+#include <string.h>
#include <stdio.h>
#include <errno.h>
#if CONFIG_LIBUKBOOT_INITSCHEDCOOP
#include <uk/schedcoop.h>
#endif /* CONFIG_LIBUKBOOT_INITSCHEDCOOP */
+#if CONFIG_LIBUKMMIO
+#include <uk/mmio.h>
+#endif
#include <uk/arch/lcpu.h>
#include <uk/plat/bootstrap.h>
#include <uk/plat/memory.h>
uk_pr_info("Initialize platform time...\n");
ukplat_time_init();
+#if CONFIG_LIBUKMMIO
+ uk_pr_info("Searching for MMIO devices\n");
+ for (int i = 1; i < argc; i++) {
+ if (!strncmp(argv[i], "virtio_mmio.device=", 19)) {
+ uk_mmio_add_dev(argv[i]);
+ }
+ }
+#endif
+
#if !CONFIG_LIBUKBOOT_NOSCHED
uk_pr_info("Initialize scheduling...\n");
#if CONFIG_LIBUKBOOT_INITSCHEDCOOP
--- /dev/null
+config LIBUKMMIO
+ bool "ukmmio: mmio support"
+ default n
+ select LIBNOLIBC if !HAVE_LIBC
+ select LIBUKDEBUG
--- /dev/null
+$(eval $(call addlib_s,libukmmio,$(CONFIG_LIBUKMMIO)))
+
+# Register to uklibparam, sets "virtio_mmio" as parameter prefix (virtio_mmio.*)
+# $(eval $(call addlib_paramprefix,libukmmio,virtio_mmio))
+
+CINCLUDES-y += -I$(LIBUKMMIO_BASE)/include
+
+LIBUKMMIO_CFLAGS-$(call gcc_version_ge,8,0) += -Wno-cast-function-type
+
+LIBUKMMIO_SRCS-y += $(LIBUKMMIO_BASE)/mmio.c
--- /dev/null
+uk_mmio_dev_count
+uk_mmio_dev_get
+uk_mmio_add_dev
--- /dev/null
+#ifndef __UK_MMIODEV__
+#define __UK_MMIODEV__
+
+#include <sys/types.h>
+#include <uk/list.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct uk_mmio_device {
+ unsigned int id;
+ UK_TAILQ_ENTRY(struct uk_mmio_device) _list;
+
+ __u64 size;
+ __u64 base_addr;
+ unsigned long irq;
+ unsigned long dev_id;
+};
+
+/* List of MMIO devices */
+UK_TAILQ_HEAD(uk_mmio_device_list, struct uk_mmio_device);
+
+/**
+ * Get number of mmio devices.
+ *
+ * @return
+ * - int: total number of mmio devices
+ */
+unsigned int uk_mmio_dev_count(void);
+
+/**
+ * Get a reference to a Unikraft MMIO Device, based on its ID.
+ *
+ * @param id
+ * The identifier of the Unikraft MMIO device.
+ * @return
+ * - NULL: device not found in list
+ * - (struct uk_mmio_device *): reference to an Unikraft MMIO Device
+ */
+struct uk_mmio_device * uk_mmio_dev_get(unsigned int id);
+
+/**
+ * Add a Unikraft MMIO device
+ *
+ * @param device
+ * Cmdline argument represnting a virtio_mmio device.
+ * @return
+ * - 0: succesfully registered the device
+ * - != 0: error on registering the device
+ */
+int uk_mmio_add_dev(char *device);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UK_MMIODEV__ */
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <uk/alloc.h>
+#include <uk/mmio.h>
+
+#define MAX_DEV_STR 255
+const char virtio_mmio_identifier[] = "virtio_mmio.device=";
+
+struct uk_alloc *a;
+struct uk_mmio_device_list uk_mmio_device_list =
+UK_TAILQ_HEAD_INITIALIZER(uk_mmio_device_list);
+int uk_mmio_device_count = 0;
+
+unsigned int uk_mmio_dev_count(void)
+{
+ return (unsigned int) uk_mmio_device_count;
+}
+
+struct uk_mmio_device * uk_mmio_dev_get(unsigned int id)
+{
+ struct uk_mmio_device *dev;
+
+ UK_TAILQ_FOREACH(dev, &uk_mmio_device_list, _list) {
+ if (dev->id == id)
+ return dev;
+ }
+
+ return NULL;
+}
+
+__u64 get_token_until(char *str, char c, int base, char **pEnd)
+{
+ __u64 multiplier = 1;
+ char *p;
+
+ for (p = str; *p && *p != c; p++) {
+ if (*p == 'K') {
+ multiplier = 1024;
+ *p = ' ';
+ }
+ }
+ if (*p) {
+ *p = ' ';
+ }
+
+ return strtol(str, pEnd, base) * multiplier;
+}
+
+int uk_mmio_add_dev(char *device)
+{
+ __u64 size, base_addr;
+ unsigned long irq, plat_dev_id = 0;
+ char devStr[MAX_DEV_STR];
+ char *pEnd;
+ struct uk_mmio_device *dev;
+
+ if (!(a = uk_alloc_get_default())) {
+ uk_pr_err("No allocator\n");
+ return -1;
+ }
+
+ if (strncmp(device, virtio_mmio_identifier, sizeof(virtio_mmio_identifier) - 1)) {
+ uk_pr_err("Invalid mmio device cmdline argument\n");
+ return -1;
+ }
+
+ strcpy(devStr, device + sizeof(virtio_mmio_identifier) - 1);
+
+ size = get_token_until(devStr, '@', 0, &pEnd);
+ if (!size) {
+ uk_pr_err("Couldn't parse mmio device size\n");
+ return -1;
+ }
+
+ base_addr = get_token_until(pEnd, ':', 0, &pEnd);
+ if (!base_addr) {
+ uk_pr_err("Couldn't parse mmio device base addr\n");
+ return -1;
+ }
+
+ irq = get_token_until(pEnd, ':', 10, &pEnd);
+ if (!irq) {
+ uk_pr_err("Couldn't parse mmio device base irq\n");
+ return -1;
+ }
+
+ if (*pEnd) {
+ plat_dev_id = get_token_until(pEnd, 0, 10, NULL);
+ }
+
+ dev = uk_calloc(a, 1, sizeof(*dev));
+ if (!dev) {
+ return -1;
+ }
+
+ dev->id = uk_mmio_device_count++;
+ dev->base_addr = base_addr;
+ dev->size = size;
+ dev->irq = irq;
+ dev->dev_id = plat_dev_id;
+ UK_TAILQ_INSERT_TAIL(&uk_mmio_device_list, dev, _list);
+
+ uk_pr_info("New mmio device at %#lx of size %#lx and irq %lu\n", base_addr, size, irq);
+ return 0;
+}
return (h << 32) | l;
}
+/* accessing devices via memory */
+static inline __u8 readb(__u8 *addr)
+{
+ __u8 v;
+
+ __asm__ __volatile__("movb %1, %0" : "=q"(v) : "m"(*addr));
+ return v;
+}
+
+static inline __u16 readw(__u16 *addr)
+{
+ __u16 v;
+
+ __asm__ __volatile__("movw %1, %0" : "=r"(v) : "m"(*addr));
+ return v;
+}
+
+static inline __u32 readl(__u32 *addr)
+{
+ __u32 v;
+
+ __asm__ __volatile__("movl %1, %0" : "=r"(v) : "m"(*addr));
+ return v;
+}
+
+static inline __u64 readq(__u64 *addr)
+{
+ __u64 v;
+
+ __asm__ __volatile__("movq %1, %0" : "=r"(v) : "m"(*addr));
+ return v;
+}
+
+static inline void writeb(__u8 *addr, __u8 v)
+{
+ __asm__ __volatile__("movb %0, %1" : : "q"(v), "m"(*addr));
+}
+
+static inline void writew(__u16 *addr, __u16 v)
+{
+ __asm__ __volatile__("movw %0, %1" : : "r"(v), "m"(*addr));
+}
+
+static inline void writel(__u32 *addr, __u32 v)
+{
+ __asm__ __volatile__("movl %0, %1" : : "r"(v), "m"(*addr));
+}
/* accessing devices via port space */
static inline __u8 inb(__u16 port)
#include <uk/plat/memory.h>
#include <uk/plat/common/cpu.h>
#include <platform_bus.h>
+#ifdef CONFIG_ARCH_ARM_64
#include <libfdt.h>
+#endif
+#ifdef CONFIG_ARCH_ARM_64
#include <gic/gic-v2.h>
#include <ofw/fdt.h>
+#endif
#include <uk/plat/common/bootinfo.h>
static void *dtb;
static const char *pf_device_compatible_list[] = {
"virtio,mmio",
+#ifdef CONFIG_ARCH_ARM_64
"pci-host-ecam-generic",
+#endif
NULL
};
/* Search all the platform bus devices provided by fdt */
do {
+ #ifdef CONFIG_ARCH_ARM_64
fdt_pf = fdt_node_offset_idx_by_compatible_list(dtb,
fdt_pf, pf_device_compatible_list, &idx);
if (fdt_pf < 0) {
uk_pr_info("End of searching platform devices\n");
break;
}
+ #endif
/* Alloc dev */
dev = (struct pf_device *) uk_calloc(pfh.a, 1, sizeof(*dev));
return -ENOMEM;
}
+ #ifdef CONFIG_ARCH_ARM_64
dev->fdt_offset = fdt_pf;
+ #endif
/* Find drv with compatible-id match table */
drv = pf_find_driver(pf_device_compatible_list[idx]);
ret = pf_driver_add_device(drv, dev);
if (ret < 0)
uk_free(pfh.a, dev);
+ return 0;
} while (1);
return ret;
static inline void virtio_dev_drv_up(struct virtio_dev *vdev)
{
- virtio_dev_status_update(vdev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+ virtio_dev_status_update(vdev, (VIRTIO_CONFIG_STATUS_ACK | VIRTIO_CONFIG_STATUS_DRIVER | VIRTIO_CONFIG_STATUS_FEATURES_OK | VIRTIO_CONFIG_STATUS_DRIVER_OK));
}
#define VIRTIO_BUS_REGISTER_DRIVER(b) \
#define VIRTIO_CONFIG_STATUS_ACK 0x1 /* recognize device as virtio */
#define VIRTIO_CONFIG_STATUS_DRIVER 0x2 /* driver for the device found*/
#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x4 /* initialization is complete */
+#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x8
#define VIRTIO_CONFIG_STATUS_NEEDS_RESET 0x40 /* device needs reset */
#define VIRTIO_CONFIG_STATUS_FAIL 0x80 /* device something's wrong*/
}
}
}
+
+static inline void _virtio_mem_cwrite_bytes(const void *addr, const __u8 offset,
+ const void *buf, int len, int type_len)
+{
+ int i = 0;
+ __u64 io_addr;
+ int count;
+
+ count = len / type_len;
+ for (i = 0; i < count; i++) {
+ io_addr = ((unsigned long)addr) + offset + (i * type_len);
+ switch (type_len) {
+ case 1:
+ writeb((__u8 *) io_addr, ((__u8 *)buf)[i * type_len]);
+ break;
+ case 2:
+ writew((__u16 *) io_addr, ((__u16 *)buf)[i * type_len]);
+ break;
+ case 4:
+ writel((__u32 *) io_addr, ((__u32 *)buf)[i * type_len]);
+ break;
+ default:
+ UK_CRASH("Unsupported virtio write operation\n");
+ }
+ }
+}
+
+static inline void _virtio_mem_cread_bytes(const void *addr, const __u8 offset,
+ void *buf, int len, int type_len)
+{
+ int i = 0;
+ __u64 io_addr;
+ int count;
+
+ count = len / type_len;
+ for (i = 0; i < count; i++) {
+ io_addr = ((unsigned long)addr) + offset + (i * type_len);
+ switch (type_len) {
+ case 1:
+ ((__u8 *)buf)[i * type_len] = readb((__u8 *) io_addr);
+ break;
+ case 2:
+ ((__u16 *)buf)[i * type_len] = readw((__u16 *) io_addr);
+ break;
+ case 4:
+ ((__u32 *)buf)[i * type_len] = readl((__u32 *) io_addr);
+ break;
+ case 8:
+ ((__u64 *)buf)[i * type_len] = readq((__u64 *) io_addr);
+ break;
+ default:
+ UK_CRASH("Unsupported virtio read operation\n");
+ }
+ }
+}
#else /* __X86_64__ */
/* IO barriers */
}
}
+static inline void _virtio_mem_cwrite_bytes(const void *addr, const __u8 offset,
+ const void *buf, int len, int type_len)
+{
+ _virtio_cwrite_bytes(addr, offset, buf, len, type_len);
+}
+
+static inline void _virtio_mem_cread_bytes(const void *addr, const __u8 offset,
+ void *buf, int len, int type_len)
+{
+ _virtio_cread_bytes(addr, offset, buf, len, type_len);
+}
+
#endif /* __X86_64__ */
/**
_virtio_cwrite_bytes(addr, offset, &data, sizeof(data), sizeof(data));
}
+/**
+ * Read the virtio device configuration of specified length.
+ *
+ * @param addr
+ * The base address of the device.
+ * @param offset
+ * The offset with the device address space.
+ * @param buf
+ * The destination buffer to which the value has to be copied.
+ * @param len
+ * The length of the destination buffer.
+ */
+static inline int virtio_mem_cread_bytes_many(const void *addr, const __u8 offset,
+ __u8 *buf, __u32 len)
+{
+ __u8 old_buf[len];
+ int check = -1;
+ int cnt = 0;
+ __u32 i = 0;
+
+ do {
+ check = len;
+ _virtio_mem_cread_bytes(addr, offset, &old_buf[0], len, 1);
+ _virtio_mem_cread_bytes(addr, offset, buf, len, 1);
+
+ for (i = 0; i < len; i++) {
+ if (unlikely(buf[i] != old_buf[i])) {
+ check = -1; /* Need to retry configuration */
+ break;
+ }
+ }
+ cnt++;
+ } while (check == -1 && cnt < MAX_TRY_COUNT);
+
+ return check;
+}
+
+/**
+ * Read the single byte configuration.
+ *
+ * @param addr
+ * The base address of the device.
+ * @param offset
+ * The offset with the device address space.
+ * @return __u8
+ * Returns the value configuration register.
+ */
+static inline __u8 virtio_mem_cread8(const void *addr, const __u8 offset)
+{
+ __u8 buf = 0;
+
+ _virtio_mem_cread_bytes(addr, offset, &buf, sizeof(buf), sizeof(buf));
+ return buf;
+}
+
+/**
+ * Read the single word configuration.
+ *
+ * @param addr
+ * The base address of the device.
+ * @param offset
+ * The offset with the device address space.
+ * @return __u16
+ * Returns the value configuration register.
+ */
+static inline __u16 virtio_mem_cread16(const void *addr, const __u8 offset)
+{
+ __u16 buf = 0;
+
+ _virtio_mem_cread_bytes(addr, offset, &buf, sizeof(buf), sizeof(buf));
+ return buf;
+}
+
+/**
+ * Read the single long word configuration.
+ *
+ * @param addr
+ * The base address of the device.
+ * @param offset
+ * The offset with the device address space.
+ * @return __u32
+ * Returns the value configuration register.
+ */
+static inline __u32 virtio_mem_cread32(const void *addr, const __u8 offset)
+{
+ __u32 buf = 0;
+
+ _virtio_mem_cread_bytes(addr, offset, &buf, sizeof(buf), sizeof(buf));
+ return buf;
+}
+
+/**
+ * Write the configuration.
+ *
+ * @param addr
+ * The base address of the device.
+ * @param offset
+ * The offset with the device address space.
+ * @param data
+ * The value to write to the configuration.
+ */
+static inline void virtio_mem_cwrite8(const void *addr, const __u8 offset,
+ const __u8 data)
+{
+ _virtio_mem_cwrite_bytes(addr, offset, &data, sizeof(data), sizeof(data));
+}
+
+/**
+ * Write the configuration.
+ *
+ * @param addr
+ * The base address of the device.
+ * @param offset
+ * The offset with the device address space.
+ * @param data
+ * The value to write to the configuration.
+ */
+static inline void virtio_mem_cwrite16(const void *addr, const __u8 offset,
+ const __u16 data)
+{
+ _virtio_mem_cwrite_bytes(addr, offset, &data, sizeof(data), sizeof(data));
+}
+
+/**
+ * Write the configuration.
+ *
+ * @param addr
+ * The base address of the device.
+ * @param offset
+ * The offset with the device address space.
+ * @param data
+ * The value to write to the configuration.
+ */
+static inline void virtio_mem_cwrite32(const void *addr, const __u8 offset,
+ const __u32 data)
+{
+ _virtio_mem_cwrite_bytes(addr, offset, &data, sizeof(data), sizeof(data));
+}
+
#ifdef __cplusplus
}
#endif /* __cplusplus __ */
}
/* Acknowledge the virtio driver */
- rc = virtio_dev_status_update(vdev, VIRTIO_CONFIG_STATUS_DRIVER);
+ rc = virtio_dev_status_update(vdev, (VIRTIO_CONFIG_STATUS_ACK | VIRTIO_CONFIG_STATUS_DRIVER));
if (rc != 0) {
uk_pr_err("Failed to acknowledge the virtio driver %p: %d\n",
vdev, rc);
#include <uk/plat/irq.h>
#include <uk/bus.h>
#include <uk/bitops.h>
+#ifdef CONFIG_ARCH_ARM_64
#include <libfdt.h>
#include <ofw/fdt.h>
+#endif
#include <uk/plat/common/bootinfo.h>
#include <platform_bus.h>
#include <virtio/virtio_bus.h>
#include <virtio/virtqueue.h>
#include <virtio/virtio_mmio.h>
+#ifdef CONFIG_ARCH_ARM_64
#include <gic/gic-v2.h>
+#endif
+
+#if CONFIG_LIBUKMMIO
+#include <uk/mmio.h>
+#endif
/*
* The alignment to use between consumer and producer parts of vring.
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
__u64 features = 0;
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1);
- features = virtio_cread32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1);
+ features = virtio_mem_cread32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES);
features <<= 32;
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0);
- features |= virtio_cread32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0);
+ features |= virtio_mem_cread32(vm_dev->base, VIRTIO_MMIO_DEVICE_FEATURES);
return features;
}
return;
}
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES,
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES,
(__u32)(vdev->features >> 32));
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES,
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_DRIVER_FEATURES,
(__u32)vdev->features);
}
__u16 w;
__u32 l;
- if (vm_dev->version == 1) {
+ if (vm_dev->version <= 2) {
__u8 *ptr = buf;
- unsigned int i;
+ __u8 i;
for (i = 0; i < len; i++)
- ptr[i] = virtio_cread8(base, offset + i);
+ ptr[i] = virtio_mem_cread8(base, offset + i);
return len;
}
switch (len) {
case 1:
- b = virtio_cread8(base, offset);
+ b = virtio_mem_cread8(base, offset);
memcpy(buf, &b, sizeof(b));
break;
case 2:
- w = (virtio_cread16(base, offset));
+ w = (virtio_mem_cread16(base, offset));
memcpy(buf, &w, sizeof(w));
break;
case 4:
- l = (virtio_cread32(base, offset));
+ l = (virtio_mem_cread32(base, offset));
memcpy(buf, &l, sizeof(l));
break;
case 8:
- l = (virtio_cread32(base, offset));
+ l = (virtio_mem_cread32(base, offset));
memcpy(buf, &l, sizeof(l));
- l = (virtio_cread32(base, offset + sizeof(l)));
+ l = (virtio_mem_cread32(base, offset + sizeof(l)));
memcpy(buf + sizeof(l), &l, sizeof(l));
break;
default:
__u32 i;
for (i = 0; i < len; i++)
- virtio_cwrite8(base, offset + i, ptr[i]);
+ virtio_mem_cwrite8(base, offset + i, ptr[i]);
return 0;
}
switch (len) {
case 1:
memcpy(&b, buf, sizeof(b));
- virtio_cwrite8(base, offset, b);
+ virtio_mem_cwrite8(base, offset, b);
break;
case 2:
memcpy(&w, buf, sizeof(w));
- virtio_cwrite16(base, offset, w);
+ virtio_mem_cwrite16(base, offset, w);
break;
case 4:
memcpy(&l, buf, sizeof(l));
- virtio_cwrite32(base, offset, l);
+ virtio_mem_cwrite32(base, offset, l);
break;
case 8:
memcpy(&l, buf, sizeof(l));
- virtio_cwrite32(base, offset, l);
+ virtio_mem_cwrite32(base, offset, l);
memcpy(&l, buf + sizeof(l), sizeof(l));
- virtio_cwrite32(base, offset + sizeof(l), l);
+ virtio_mem_cwrite32(base, offset + sizeof(l), l);
break;
default:
_virtio_cwrite_bytes(base, offset, buf, len, 1);
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
- return virtio_cread32(vm_dev->base, VIRTIO_MMIO_STATUS) & 0xff;
+ return virtio_mem_cread32(vm_dev->base, VIRTIO_MMIO_STATUS) & 0xff;
}
static void vm_set_status(struct virtio_dev *vdev, __u8 status)
/* We should never be setting status to 0. */
UK_BUGON(status == 0);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_STATUS, status);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_STATUS, status);
}
static void vm_reset(struct virtio_dev *vdev)
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
/* 0 status means a reset. */
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_STATUS, 0);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_STATUS, 0);
}
/* Transport interface */
* We write the queue's selector into the notification register to
* signal the other end
*/
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_NOTIFY, queue_id);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_NOTIFY, queue_id);
+
return 1;
}
struct virtqueue *vq;
/* Read and acknowledge interrupts */
- status = virtio_cread32(vm_dev->base, VIRTIO_MMIO_INTERRUPT_STATUS);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_INTERRUPT_ACK, status);
+ status = virtio_mem_cread32(vm_dev->base, VIRTIO_MMIO_INTERRUPT_STATUS);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_INTERRUPT_ACK, status);
if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) {
uk_pr_warn("Unsupported config change interrupt received on virtio-mmio device %p\n",
}
/* Select the queue we're interested in */
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_SEL, queue_id);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_SEL, queue_id);
/* Activate the queue */
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_NUM, num_desc);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_NUM, num_desc);
if (vm_dev->version == 1) {
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_ALIGN, __PAGE_SIZE);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_PFN,
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_ALIGN, __PAGE_SIZE);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_PFN,
virtqueue_physaddr(vq) >> __PAGE_SHIFT);
} else {
__u64 addr;
addr = virtqueue_physaddr(vq);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_DESC_LOW, (__u32)addr);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_DESC_HIGH,
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_DESC_LOW, (__u32)addr);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_DESC_HIGH,
(__u32)(addr >> 32));
addr = virtqueue_get_avail_addr(vq);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_AVAIL_LOW, (__u32)addr);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_AVAIL_HIGH,
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_AVAIL_LOW, (__u32)addr);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_AVAIL_HIGH,
(__u32)(addr >> 32));
addr = virtqueue_get_used_addr(vq);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_USED_LOW, (__u32)addr);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_USED_HIGH,
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_USED_LOW, (__u32)addr);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_USED_HIGH,
(__u32)(addr >> 32));
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_READY, 1);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_READY, 1);
}
flags = ukplat_lcpu_save_irqf();
unsigned int irq = vm_dev->pfdev->irq;
int i, err;
int vq_cnt = 0;
-
+
err = ukplat_irq_register(irq, vm_interrupt, vm_dev);
if (err)
return err;
for (i = 0; i < num_vqs; ++i) {
/* Select the queue we're interested in */
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_SEL, i);
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_QUEUE_SEL, i);
/* Queue shouldn't already be set up. */
- if (virtio_cread32(vm_dev->base, (vm_dev->version == 1 ?
+ if (virtio_mem_cread32(vm_dev->base, (vm_dev->version == 1 ?
VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
uk_pr_err("vm_find_vqs error mmio queue not ready\n");
err = -ENOENT;
goto error_exit;
}
- qdesc_size[i] = virtio_cread32(vm_dev->base, VIRTIO_MMIO_QUEUE_NUM_MAX);
+ qdesc_size[i] = virtio_mem_cread32(vm_dev->base, VIRTIO_MMIO_QUEUE_NUM_MAX);
if (qdesc_size[i] == 0) {
err = -ENOENT;
goto error_exit;
static int virtio_mmio_probe(struct pf_device *pfdev)
{
+ #ifdef CONFIG_LIBUKMMIO
+ #ifdef CONFIG_ARCH_ARM_64
const fdt32_t *prop;
int type, hwirq, prop_len;
int fdt_vm = pfdev->fdt_offset;
__u64 reg_base;
__u64 reg_size;
+ #else
+ struct uk_mmio_device *mmio_dev;
+ #endif
void *dtb;
+ #ifdef CONFIG_ARCH_ARM_64
dtb = (void *)ukplat_bootinfo_get()->dtb;
if (fdt_vm == -FDT_ERR_NOTFOUND) {
uk_pr_info("device not found in fdt\n");
pfdev->base = reg_base;
pfdev->irq = gic_irq_translate(type, hwirq);
+ #else
+
+ mmio_dev = uk_mmio_dev_get(0);
+
+ if (!mmio_dev) {
+ uk_pr_err("mmio device not found\n");
+ goto error_exit;
+ }
+ pfdev->base = mmio_dev->base_addr;
+ pfdev->irq = mmio_dev->irq;
+
uk_pr_info("virtio mmio probe base(0x%lx) irq(%ld)\n",
pfdev->base, pfdev->irq);
+ #endif
+ #endif
+
return 0;
-
+
+#ifdef CONFIG_LIBUKMMIO
error_exit:
return -EFAULT;
+#endif
}
static int virtio_mmio_add_dev(struct pf_device *pfdev)
goto free_vmdev;
}
- magic = virtio_cread32(vm_dev->base, VIRTIO_MMIO_MAGIC_VALUE);
+ magic = virtio_mem_cread32(vm_dev->base, VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
uk_pr_err("Wrong magic value 0x%x!\n", magic);
rc = -ENODEV;
}
/* Check device version */
- vm_dev->version = virtio_cread32(vm_dev->base, VIRTIO_MMIO_VERSION);
+ vm_dev->version = virtio_mem_cread32(vm_dev->base, VIRTIO_MMIO_VERSION);
if (vm_dev->version < 1 || vm_dev->version > 2) {
uk_pr_err("Version %ld not supported!\n", vm_dev->version);
rc = -ENXIO;
goto free_vmdev;
}
- vm_dev->vdev.id.virtio_device_id = virtio_cread32(vm_dev->base, VIRTIO_MMIO_DEVICE_ID);
+ vm_dev->vdev.id.virtio_device_id = virtio_mem_cread32(vm_dev->base, VIRTIO_MMIO_DEVICE_ID);
if (vm_dev->vdev.id.virtio_device_id == 0) {
/*
* virtio-mmio device with an ID 0 is a (dummy) placeholder
rc = -ENODEV;
goto free_vmdev;
}
- vm_dev->id.vendor = virtio_cread32(vm_dev->base, VIRTIO_MMIO_VENDOR_ID);
+ vm_dev->id.vendor = virtio_mem_cread32(vm_dev->base, VIRTIO_MMIO_VENDOR_ID);
- virtio_cwrite32(vm_dev->base, VIRTIO_MMIO_GUEST_PAGE_SIZE, __PAGE_SIZE);
+ if (vm_dev->version <= 1) {
+ virtio_mem_cwrite32(vm_dev->base, VIRTIO_MMIO_GUEST_PAGE_SIZE, __PAGE_SIZE);
+ }
rc = virtio_bus_register_device(&vm_dev->vdev);
if (rc != 0) {
#include <virtio/virtio_bus.h>
#include <virtio/virtqueue.h>
#include <virtio/virtio_net.h>
+#include <uk/hexdump.h>
/**
* VIRTIO_PKT_BUFFER_LEN = VIRTIO_NET_HDR + ETH_HDR + ETH_PKT_PAYLOAD_LEN
__u8 state;
/* RX promiscuous mode. */
__u8 promisc : 1;
+ /* VirtIO modern network standard. */
+ __u8 modern;
};
/**
static int virtio_netdev_rxq_dequeue(struct uk_netdev_rx_queue *rxq,
struct uk_netbuf **netbuf);
static int virtio_netdev_rxq_enqueue(struct uk_netdev_rx_queue *rxq,
- struct uk_netbuf *netbuf);
+ struct uk_netbuf *netbuf,
+ __u8 is_modern);
static int virtio_netdev_recv_done(struct virtqueue *vq, void *priv);
static int virtio_netdev_rx_fillup(struct uk_netdev_rx_queue *rxq,
- __u16 num, int notify);
+ __u16 num, int notify,
+ __u8 is_modern);
/**
* Static global constants
static int virtio_netdev_rx_fillup(struct uk_netdev_rx_queue *rxq,
__u16 nb_desc,
- int notify)
+ int notify,
+ __u8 is_modern)
{
struct uk_netbuf *netbuf[RX_FILLUP_BATCHLEN];
int rc = 0;
for (i = 0; i < cnt; i++) {
uk_pr_debug("Enqueue netbuf %"PRIu16"/%"PRIu16" (%p) to virtqueue %p...\n",
i + 1, cnt, netbuf[i], rxq);
- rc = virtio_netdev_rxq_enqueue(rxq, netbuf[i]);
+ rc = virtio_netdev_rxq_enqueue(rxq, netbuf[i], is_modern);
if (unlikely(rc < 0)) {
uk_pr_err("Failed to add a buffer to receive virtqueue %p: %d\n",
rxq, rc);
struct uk_netdev_tx_queue *queue,
struct uk_netbuf *pkt)
{
- struct virtio_net_device *vndev __unused;
+ struct virtio_net_device *vndev;
struct virtio_net_hdr *vhdr;
struct virtio_net_hdr_padded *padded_hdr;
int16_t header_sz = sizeof(*padded_hdr);
/**
* Use the preallocated header space for the virtio header.
*/
- rc = uk_netbuf_header(pkt, header_sz);
+ rc = uk_netbuf_header(pkt, VIRTIO_HDR_LEN);
if (unlikely(rc != 1)) {
uk_pr_err("Failed to prepend virtio header\n");
rc = -ENOSPC;
* to `uk_sglist_append_netbuf()`. However, a netbuf
* chain can only once have set the PARTIAL_CSUM flag.
*/
- memset(vhdr, 0, sizeof(*vhdr));
+ memset(vhdr, 0, VIRTIO_HDR_LEN);
if (pkt->flags & UK_NETBUF_F_PARTIAL_CSUM) {
vhdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
/* `csum_start` is without header size */
* 1 for the virtio header and the other for the actual network packet.
*/
/* Appending the data to the list. */
- rc = uk_sglist_append(&queue->sg, vhdr, sizeof(*vhdr));
+
+ rc = uk_sglist_append(&queue->sg, vhdr, vndev->modern ? VIRTIO_HDR_LEN : sizeof(struct virtio_net_hdr));
if (unlikely(rc != 0)) {
uk_pr_err("Failed to append to the sg list\n");
goto err_remove_vhdr;
}
static int virtio_netdev_rxq_enqueue(struct uk_netdev_rx_queue *rxq,
- struct uk_netbuf *netbuf)
+ struct uk_netbuf *netbuf,
+ __u8 is_modern)
{
int rc = 0;
struct virtio_net_hdr_padded *rxhdr;
- int16_t header_sz = sizeof(*rxhdr);
__u8 *buf_start;
size_t buf_len = 0;
struct uk_sglist *sg;
/**
* Retrieve the buffer header length.
*/
- rc = uk_netbuf_header(netbuf, header_sz);
+ rc = uk_netbuf_header(netbuf, sizeof(*rxhdr));
if (unlikely(rc != 1)) {
uk_pr_err("Failed to allocate space to prepend virtio header\n");
return -EINVAL;
uk_sglist_reset(sg);
/* Appending the header buffer to the sglist */
- uk_sglist_append(sg, rxhdr, sizeof(struct virtio_net_hdr));
+ // uk_sglist_append(sg, rxhdr, is_modern ? VIRTIO_HDR_LEN : sizeof(struct virtio_net_hdr));
+ uk_sglist_append(sg, rxhdr, is_modern ? VIRTIO_HDR_LEN : sizeof(struct virtio_net_hdr));
/* Appending the data buffer to the sglist */
uk_sglist_append(sg, buf_start, buf_len);
*/
buf->len = len + VTNET_RX_HEADER_PAD;
rc = uk_netbuf_header(buf,
- -((int16_t)sizeof(struct virtio_net_hdr_padded)));
+ -((uint16_t)sizeof(struct virtio_net_hdr_padded)));
UK_ASSERT(rc == 1);
*netbuf = buf;
return ret;
}
-static int virtio_netdev_recv(struct uk_netdev *dev __unused,
+static int virtio_netdev_recv(struct uk_netdev *dev,
struct uk_netdev_rx_queue *queue,
struct uk_netbuf **pkt)
{
+ struct virtio_net_device *vndev;
int status = 0x0;
int rc = 0;
/* Queue interrupts have to be off when calling receive */
UK_ASSERT(!(queue->intr_enabled & VTNET_INTR_EN));
+ vndev = to_virtionetdev(dev);
rc = virtio_netdev_rxq_dequeue(queue, pkt);
if (unlikely(rc < 0)) {
uk_pr_err("Failed to dequeue the packet: %d\n", rc);
goto err_exit;
}
status |= (*pkt) ? UK_NETDEV_STATUS_SUCCESS : 0x0;
- status |= virtio_netdev_rx_fillup(queue, (queue->nb_desc - rc), 1);
+ status |= virtio_netdev_rx_fillup(queue, (queue->nb_desc - rc), 1, vndev->modern);
/* Enable interrupt only when user had previously enabled it */
if (queue->intr_enabled & VTNET_INTR_USR_EN_MASK) {
*/
status |= virtio_netdev_rx_fillup(queue,
(queue->nb_desc - rc),
- 1);
+ 1,
+ vndev->modern);
/* Need to enable the interrupt on the last packet */
rc = virtqueue_intr_enable(queue->vq);
rxq->alloc_rxpkts_argp = conf->alloc_rxpkts_argp;
/* Allocate receive buffers for this queue */
- virtio_netdev_rx_fillup(rxq, rxq->nb_desc, 0);
+ virtio_netdev_rx_fillup(rxq, rxq->nb_desc, 0, vndev->modern);
exit:
return rxq;
else
VIRTIO_FEATURE_SET(drv_features, VIRTIO_NET_F_MTU);
+ if (VIRTIO_FEATURE_HAS(host_features, VIRTIO_NET_F_STATUS)) {
+ VIRTIO_FEATURE_SET(drv_features, VIRTIO_NET_F_STATUS);
+ }
+
/**
* Gratuitous ARP
* NOTE: We tell that we will do gratuitous ARPs ourselves.
*/
- VIRTIO_FEATURE_SET(drv_features, VIRTIO_NET_F_GUEST_ANNOUNCE);
+ if (VIRTIO_FEATURE_HAS(host_features, VIRTIO_NET_F_CTRL_VQ)) {
+ VIRTIO_FEATURE_SET(drv_features, VIRTIO_NET_F_GUEST_ANNOUNCE);
+ }
/**
* Partial checksumming
VIRTIO_FEATURE_SET(drv_features, VIRTIO_NET_F_GUEST_CSUM);
}
+ // VirtIO modern standard.
+ if (VIRTIO_FEATURE_HAS(host_features, VIRTIO_F_VERSION_1)) {
+ VIRTIO_FEATURE_SET(drv_features, VIRTIO_F_VERSION_1);
+ vndev->modern = 1;
+ }
+
/**
* Use index based event supression when it's available.
* This allows a more fine-grained control when the hypervisor should
vndev->max_mtu = vndev->mtu = UK_ETH_PAYLOAD_MAXLEN;
}
+ virtio_dev_status_update(vndev->vdev, (VIRTIO_CONFIG_STATUS_ACK | VIRTIO_CONFIG_STATUS_DRIVER | VIRTIO_CONFIG_STATUS_FEATURES_OK));
+
return 0;
err_negotiate_feature:
vndev->uid = rc;
rc = 0;
vndev->promisc = 0;
+ vndev->modern = 0;
/**
* TODO:
config KVM_PF
bool "Platform Bus Driver"
default y
- depends on (ARCH_ARM_64)
select LIBUKBUS
help
Platform bus driver for probing and operating platform devices
help
Support virtio devices on PCI bus
+config VIRTIO_MMIO
+ bool "Virtio MMIO device support"
+ default n
+ depends on KVM_PF
+ select VIRTIO_BUS
+ select LIBUKMMIO if ARCH_X86_64
+ help
+ Support virtio devices on MMIO bus
+
config VIRTIO_NET
bool "Virtio Net device"
default y if LIBUKNETDEV
LIBKVMPF_CINCLUDES-$(CONFIG_ARCH_ARM_64) += -I$(LIBKVMPLAT_BASE)/include
LIBKVMPF_CINCLUDES-$(CONFIG_ARCH_ARM_64) += -I$(UK_PLAT_DRIVERS_BASE)/include
LIBKVMPF_SRCS-$(CONFIG_KVM_PF) += $(UK_PLAT_COMMON_BASE)/platform_bus.c|common
+LIBKVMPF_ASINCLUDES-y += -I$(UK_PLAT_COMMON_BASE)/include
+LIBKVMPF_CINCLUDES-y += -I$(UK_PLAT_COMMON_BASE)/include
+LIBKVMPF_CINCLUDES-y += -I$(LIBKVMPLAT_BASE)/include
+LIBKVMPF_CINCLUDES-y += -I$(UK_PLAT_DRIVERS_BASE)/include
##
## Virtio library definitions
$(UK_PLAT_DRIVERS_BASE)/virtio/virtio_pci.c
LIBKVMVIRTIO_SRCS-$(CONFIG_ARCH_ARM_64) +=\
$(UK_PLAT_DRIVERS_BASE)/virtio/virtio_mmio.c
+LIBKVMVIRTIO_SRCS-$(CONFIG_KVM_PF) +=\
+ $(UK_PLAT_DRIVERS_BASE)/virtio/virtio_mmio.c
##
## Virtio Net library definition
##