]> xenbits.xensource.com Git - unikraft/unikraft.git/commitdiff
plat/virtio: Implement support for event index notification suppression
authorMarco Schlumpp <marco@unikraft.io>
Mon, 20 Feb 2023 13:57:00 +0000 (14:57 +0100)
committerMichalis Pappas <michalis@unikraft.io>
Tue, 22 Aug 2023 09:04:44 +0000 (11:04 +0200)
This allows the drivers/device to specify the other side should
send a notification. This is required for firecracker which does not
support the original notification suppression flag.

Signed-off-by: Marco Schlumpp <marco@unikraft.io>
plat/drivers/include/virtio/virtio_ring.h
plat/drivers/include/virtio/virtqueue.h
plat/drivers/virtio/virtio_net.c
plat/drivers/virtio/virtio_ring.c

index 9f65594a71e6a4723d0841504e6187d1c4071a06..40fa38db77da28f2a8b13dc6929325127c9b8300 100644 (file)
@@ -157,7 +157,7 @@ struct vring {
  * versa. They are at the end for backwards compatibility.
  */
 #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
-#define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
+#define vring_avail_event(vr) (*(__virtio_le16 *)&(vr)->used->ring[(vr)->num])
 
 static inline void vring_init(struct vring *vr, unsigned int num, uint8_t *p,
                              unsigned long align)
@@ -167,7 +167,8 @@ static inline void vring_init(struct vring *vr, unsigned int num, uint8_t *p,
        vr->avail = (struct vring_avail *) (p +
                        num * sizeof(struct vring_desc));
        vr->used = (void *)
-       (((unsigned long) &vr->avail->ring[num] + align - 1) & ~(align - 1));
+               (((unsigned long) &vr->avail->ring[num] + sizeof(uint16_t) +
+                       align - 1) & ~(align - 1));
 }
 
 static inline unsigned int vring_size(unsigned int num, unsigned long align)
@@ -186,7 +187,8 @@ static inline unsigned int vring_size(unsigned int num, unsigned long align)
 static inline int vring_need_event(__u16 event_idx, __u16 new_idx,
                                   __u16 old_idx)
 {
-       return (new_idx - event_idx - 1) < (new_idx - old_idx);
+       return (uint16_t)(new_idx - event_idx - 1) <
+               (uint16_t)(new_idx - old_idx);
 }
 
 #ifdef __cplusplus
index 1daf659f6aef628768605eea493752c3dd13b407..4220d8b4b101693d7d07c8bb5a0958331857fba2 100644 (file)
@@ -66,6 +66,8 @@ struct virtqueue {
        virtqueue_callback_t vq_callback;
        /* Next entry of the queue */
        UK_TAILQ_ENTRY(struct virtqueue) next;
+       /* EVENT_IDX notification suppression is used */
+       __u8 uses_event_idx;
        /* Private data structure used by the driver of the queue */
        void *priv;
 };
index ab93f79a1f2b06db33ad503b6432aa5f1ee91221..3bc3f1c301f5817beed11ee69d4bbad4f25fe25d 100644 (file)
@@ -928,6 +928,16 @@ static int virtio_netdev_feature_negotiate(struct uk_netdev *n)
                VIRTIO_FEATURE_SET(drv_features, VIRTIO_NET_F_GUEST_CSUM);
        }
 
+       /**
+        * Use index based event supression when it's available.
+        * This allows a more fine-grained control when the hypervisor should
+        * notify the guest. Some hypervisors such as firecracker also do not
+        * support the original flag.
+        */
+       if (VIRTIO_FEATURE_HAS(host_features, VIRTIO_F_EVENT_IDX)) {
+               VIRTIO_FEATURE_SET(drv_features, VIRTIO_F_EVENT_IDX);
+       }
+
        /**
         * TCP Segmentation Offload
         * NOTE: This enables sending and receiving of packets marked with
index d0883b2163e2e5a955722076aa180ff5403aa65c..5f748b54285a1a3d103dcff155ac996de8ef6243 100644 (file)
@@ -43,6 +43,7 @@
 #include <uk/plat/io.h>
 #include <virtio/virtio_ring.h>
 #include <virtio/virtqueue.h>
+#include <virtio/virtio_bus.h>
 #ifdef CONFIG_LIBUKVMEM
 #include <uk/arch/paging.h>
 #include <uk/plat/paging.h>
@@ -101,6 +102,12 @@ void virtqueue_intr_disable(struct virtqueue *vq)
        UK_ASSERT(vq);
 
        vrq = to_virtqueue_vring(vq);
+
+       if (vq->uses_event_idx) {
+               vring_used_event(&vrq->vring) =
+                       vrq->last_used_desc_idx - vrq->vring.num - 1;
+               return;
+       }
        vrq->vring.avail->flags |= (VRING_AVAIL_F_NO_INTERRUPT);
 }
 
@@ -114,24 +121,30 @@ int virtqueue_intr_enable(struct virtqueue *vq)
        vrq = to_virtqueue_vring(vq);
        /* Check if there are no more packets enabled */
        if (!virtqueue_hasdata(vq)) {
-               if (vrq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
+               if (vq->uses_event_idx) {
+                       /* TODO: Add a parameter to delay interrupts. (For
+                        * example for TXQ interrupts)
+                        */
+                       vring_used_event(&vrq->vring) = vrq->last_used_desc_idx + 0;
+               } else {
                        vrq->vring.avail->flags &=
                                (~VRING_AVAIL_F_NO_INTERRUPT);
-                       /**
-                        * We enabled the interrupts. We ensure it using the
-                        * memory barrier and check if there are any further
-                        * data available in the queue. The check for data
-                        * after enabling the interrupt is to make sure we do
-                        * not miss any interrupt while transitioning to enable
-                        * interrupt. This is inline with the requirement from
-                        * virtio specification section 3.2.2
-                        */
-                       mb();
-                       /* Check if there are further descriptors */
-                       if (virtqueue_hasdata(vq)) {
-                               virtqueue_intr_disable(vq);
-                               rc = 1;
-                       }
+               }
+
+               /**
+                * We enabled the interrupts. We ensure it using the
+                * memory barrier and check if there are any further
+                * data available in the queue. The check for data
+                * after enabling the interrupt is to make sure we do
+                * not miss any interrupt while transitioning to enable
+                * interrupt. This is inline with the requirement from
+                * virtio specification section 3.2.2
+                */
+               mb();
+               /* Check if there are further descriptors */
+               if (virtqueue_hasdata(vq)) {
+                       virtqueue_intr_disable(vq);
+                       rc = 1;
                }
        } else {
                /**
@@ -186,9 +199,16 @@ static inline void virtqueue_detach_desc(struct virtqueue_vring *vrq,
 int virtqueue_notify_enabled(struct virtqueue *vq)
 {
        struct virtqueue_vring *vrq;
+       uint16_t old, new;
 
        UK_ASSERT(vq);
        vrq = to_virtqueue_vring(vq);
+       if (vq->uses_event_idx) {
+               new = vrq->vring.avail->idx;
+               old = new - 1; /* FIXME: Use actual written count */
+
+               return vring_need_event(vring_avail_event(&vrq->vring), new, old);
+       }
 
        return ((vrq->vring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
 }
@@ -233,10 +253,9 @@ __u64 virtqueue_feature_negotiate(__u64 feature_set)
 {
        __u64 feature = (1ULL << VIRTIO_TRANSPORT_F_START) - 1;
 
-       /**
-        * Currently out vring driver does not support any ring feature. We will
-        * add support to transport feature in the future.
-        */
+       /* Allow event index feature */
+       feature |= 1ULL << VIRTIO_F_EVENT_IDX;
+
        feature &= feature_set;
        return feature;
 }
@@ -450,6 +469,7 @@ struct virtqueue *virtqueue_create(__u16 queue_id, __u16 nr_descs, __u16 align,
        vq->vdev = vdev;
        vq->vq_callback = callback;
        vq->vq_notify_host = notify;
+       vq->uses_event_idx = VIRTIO_FEATURE_HAS(vdev->features, VIRTIO_F_EVENT_IDX);
        return vq;
 
 err_freevq: