* versa. They are at the end for backwards compatibility.
*/
#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
-#define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
+#define vring_avail_event(vr) (*(__virtio_le16 *)&(vr)->used->ring[(vr)->num])
static inline void vring_init(struct vring *vr, unsigned int num, uint8_t *p,
unsigned long align)
vr->avail = (struct vring_avail *) (p +
num * sizeof(struct vring_desc));
vr->used = (void *)
- (((unsigned long) &vr->avail->ring[num] + align - 1) & ~(align - 1));
+ (((unsigned long) &vr->avail->ring[num] + sizeof(uint16_t) +
+ align - 1) & ~(align - 1));
}
static inline unsigned int vring_size(unsigned int num, unsigned long align)
static inline int vring_need_event(__u16 event_idx, __u16 new_idx,
__u16 old_idx)
{
- return (new_idx - event_idx - 1) < (new_idx - old_idx);
+ return (uint16_t)(new_idx - event_idx - 1) <
+ (uint16_t)(new_idx - old_idx);
}
#ifdef __cplusplus
#include <uk/plat/io.h>
#include <virtio/virtio_ring.h>
#include <virtio/virtqueue.h>
+#include <virtio/virtio_bus.h>
#ifdef CONFIG_LIBUKVMEM
#include <uk/arch/paging.h>
#include <uk/plat/paging.h>
UK_ASSERT(vq);
vrq = to_virtqueue_vring(vq);
+
+ if (vq->uses_event_idx) {
+ vring_used_event(&vrq->vring) =
+ vrq->last_used_desc_idx - vrq->vring.num - 1;
+ return;
+ }
vrq->vring.avail->flags |= (VRING_AVAIL_F_NO_INTERRUPT);
}
vrq = to_virtqueue_vring(vq);
/* Check if there are no more packets enabled */
if (!virtqueue_hasdata(vq)) {
- if (vrq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
+ if (vq->uses_event_idx) {
+ /* TODO: Add a parameter to delay interrupts. (For
+ * example for TXQ interrupts)
+ */
+ vring_used_event(&vrq->vring) = vrq->last_used_desc_idx + 0;
+ } else {
vrq->vring.avail->flags &=
(~VRING_AVAIL_F_NO_INTERRUPT);
- /**
- * We enabled the interrupts. We ensure it using the
- * memory barrier and check if there are any further
- * data available in the queue. The check for data
- * after enabling the interrupt is to make sure we do
- * not miss any interrupt while transitioning to enable
- * interrupt. This is inline with the requirement from
- * virtio specification section 3.2.2
- */
- mb();
- /* Check if there are further descriptors */
- if (virtqueue_hasdata(vq)) {
- virtqueue_intr_disable(vq);
- rc = 1;
- }
+ }
+
+ /**
+ * We enabled the interrupts. We ensure it using the
+ * memory barrier and check if there are any further
+ * data available in the queue. The check for data
+ * after enabling the interrupt is to make sure we do
+ * not miss any interrupt while transitioning to enable
+ * interrupt. This is inline with the requirement from
+ * virtio specification section 3.2.2
+ */
+ mb();
+ /* Check if there are further descriptors */
+ if (virtqueue_hasdata(vq)) {
+ virtqueue_intr_disable(vq);
+ rc = 1;
}
} else {
/**
int virtqueue_notify_enabled(struct virtqueue *vq)
{
struct virtqueue_vring *vrq;
+ uint16_t old, new;
UK_ASSERT(vq);
vrq = to_virtqueue_vring(vq);
+ if (vq->uses_event_idx) {
+ new = vrq->vring.avail->idx;
+ old = new - 1; /* FIXME: Use actual written count */
+
+ return vring_need_event(vring_avail_event(&vrq->vring), new, old);
+ }
return ((vrq->vring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
}
{
__u64 feature = (1ULL << VIRTIO_TRANSPORT_F_START) - 1;
- /**
- * Currently out vring driver does not support any ring feature. We will
- * add support to transport feature in the future.
- */
+ /* Allow event index feature */
+ feature |= 1ULL << VIRTIO_F_EVENT_IDX;
+
feature &= feature_set;
return feature;
}
vq->vdev = vdev;
vq->vq_callback = callback;
vq->vq_notify_host = notify;
+ vq->uses_event_idx = VIRTIO_FEATURE_HAS(vdev->features, VIRTIO_F_EVENT_IDX);
return vq;
err_freevq: