uk_rwlock_wunlock(&st->iolock);
}
+/*
+ * File state contains one crucial aspect of files: event handling.
+ *
+ * This can be done by drivers in two ways: managed and polled.
+ * Managed (the default) requires the driver to signal both rising and falling
+ * edges of events, leaving ukfile to manage everything else.
+ * Polled drivers, conversely, maintain events internally and provide a callback
+ * for retrieving their instantaneous levels, and are thus only required to
+ * signal rising edges of events.
+ * Polled drivers are configured via LIBUKFILE_POLLED.
+ */
/*
* We define initializers separate from an initial values.
* The former can only be used in (static) variable initializations, while the
* latter is meant for assigning to variables or as anonymous data structures.
*/
+#if CONFIG_LIBUKFILE_POLLED
+#define UK_FILE_POLLED_STATE_INITIALIZER(name, pollfunc) { \
+ .iolock = UK_RWLOCK_INITIALIZER((name).iolock, 0), \
+ .pollq = UK_POLLQ_EDGE_INITIALIZER((name).pollq, (pollfunc)) \
+}
+#define UK_FILE_POLLED_STATE_INIT_VALUE(name, pollfunc) \
+ ((struct uk_file_state)UK_FILE_POLLED_STATE_INITIALIZER( \
+ (name), (pollfunc)))
+#endif /* CONFIG_LIBUKFILE_POLLED */
+
#define UK_FILE_STATE_EVENTS_INITIALIZER(name, ev) { \
.iolock = UK_RWLOCK_INITIALIZER((name).iolock, 0), \
- .pollq = UK_POLLQ_EVENTS_INITIALIZER((name).pollq, (ev)) \
+ .pollq = UK_POLLQ_LEVEL_EVENTS_INITIALIZER((name).pollq, (ev)) \
}
#define UK_FILE_STATE_EVENTS_INIT_VALUE(name, ev) \
((struct uk_file_state)UK_FILE_STATE_EVENTS_INITIALIZER((name), (ev)))
static inline
uk_pollevent uk_file_poll_immediate(const struct uk_file *f, uk_pollevent req)
{
- return uk_pollq_poll_immediate(&f->state->pollq, req);
+ return uk_pollq_poll_level(&f->state->pollq, req, f);
}
static inline
uk_pollevent uk_file_poll_until(const struct uk_file *f, uk_pollevent req,
__nsec deadline)
{
- return uk_pollq_poll_until(&f->state->pollq, req, deadline);
+ return uk_pollq_poll_until(&f->state->pollq, req, deadline, f);
}
static inline
*/
typedef unsigned int uk_pollevent;
+struct uk_file;
+
+#if CONFIG_LIBUKFILE_POLLED
+/**
+ * Callback that fetches events in `mask` currently set on file `f`.
+ *
+ * This function cannot (meaningfully) fail, must not block indefinitely, and
+ * should avoid taking locks or yielding execution when possible.
+ *
+ * Drivers may choose to not provide this callback, in which case they are
+ * responsible for updating the current event levels with `uk_pollq_set`,
+ * `uk_pollq_clear`, and/or `uk_pollq_assign` in-band with I/O operations.
+ *
+ * If drivers do provide this, it will be called every time the instantaneous
+ * level of events is queried. Drivers are then responsible only for notifying
+ * the rising edges of events via `uk_pollq_set`.
+ *
+ * @param f File to fetch events for.
+ * @param mask Bitmask of events to fetch.
+ *
+ * @return
+ * Bitwise AND between `mask` and the presently set events on `f`
+ */
+typedef uk_pollevent (*uk_poll_func)(const struct uk_file *f,
+ uk_pollevent mask);
+#endif /* CONFIG_LIBUKFILE_POLLED */
+
/**
* Ticket for registering on the poll waiting list.
*
#endif /* CONFIG_LIBUKFILE_CHAINUPDATE */
/* Events */
+#if CONFIG_LIBUKFILE_POLLED
+ uk_poll_func poll; /* If provided, used instead of reading .events */
+#endif /* CONFIG_LIBUKFILE_POLLED */
volatile uk_pollevent events; /* Instantaneous event levels */
uk_pollevent waitmask; /* Events waited on by threads */
#if CONFIG_LIBUKFILE_CHAINUPDATE
struct uk_rwlock waitlock; /* Wait list lock */
};
+/*
+ * Pollqueues come in two varieties: edge- and level-notified.
+ * Edge-notified queues require drivers to only notify rising edges of events,
+ * while providing a callback for fetching instantaneous levels.
+ * Level-notified queues require drivers to notify both rising and falling edges
+ * of events, with the queue itself maintaining event levels.
+ * See description of `uk_poll_func` for more details.
+ *
+ * Edge-notified queues require setting LIBUKFILE_POLLED during configuration.
+ */
/*
* We define initializers separate from an initial values.
* The former can only be used in (static) variable initializations, while the
* latter is meant for assigning to variables or as anonymous data structures.
*/
#if CONFIG_LIBUKFILE_CHAINUPDATE
-#define UK_POLLQ_EVENTS_INITIALIZER(q, ev) { \
+#if CONFIG_LIBUKFILE_POLLED
+#define _POLLQ_INIT(q, pollfunc, ev) { \
+ .wait = NULL, \
+ .waitend = &(q).wait, \
+ .prop = NULL, \
+ .propend = &(q).prop, \
+ .poll = (pollfunc), \
+ .events = (ev), \
+ .waitmask = 0, \
+ .propmask = 0, \
+ .proplock = UK_RWLOCK_INITIALIZER((q).proplock, 0), \
+ .waitlock = UK_RWLOCK_INITIALIZER((q).waitlock, 0), \
+}
+#else /* !CONFIG_LIBUKFILE_POLLED */
+#define _POLLQ_INIT(q, pollfunc, ev) { \
.wait = NULL, \
.waitend = &(q).wait, \
.prop = NULL, \
.proplock = UK_RWLOCK_INITIALIZER((q).proplock, 0), \
.waitlock = UK_RWLOCK_INITIALIZER((q).waitlock, 0), \
}
+#endif /* !CONFIG_LIBUKFILE_POLLED */
#else /* !CONFIG_LIBUKFILE_CHAINUPDATE */
-#define UK_POLLQ_EVENTS_INITIALIZER(q, ev) { \
+#if CONFIG_LIBUKFILE_POLLED
+#define _POLLQ_INIT(q, pollfunc, ev) { \
.wait = NULL, \
.waitend = &(q).wait, \
+ .poll = (pollfunc), \
.events = (ev), \
.waitmask = 0, \
.waitlock = UK_RWLOCK_INITIALIZER((q).waitlock, 0), \
}
+#else /* !CONFIG_LIBUKFILE_POLLED */
+#define _POLLQ_INIT(q, pollfunc, ev) { \
+ .wait = NULL, \
+ .waitend = &(q).wait, \
+ .events = (ev), \
+ .waitmask = 0, \
+ .waitlock = UK_RWLOCK_INITIALIZER((q).waitlock, 0), \
+}
+#endif /* !CONFIG_LIBUKFILE_POLLED */
#endif /* !CONFIG_LIBUKFILE_CHAINUPDATE */
-#define UK_POLLQ_EVENTS_INIT_VALUE(q) \
- ((struct uk_pollq)UK_POLLQ_EVENTS_INITIALIZER(q))
+#if CONFIG_LIBUKFILE_POLLED
+#define UK_POLLQ_EDGE_INITIALIZER(q, pollfunc) _POLLQ_INIT(q, pollfunc, 0)
+
+#define UK_POLLQ_EDGE_INIT_VALUE(q, pollfunc) \
+ ((struct uk_pollq)UK_POLLQ_EDGE_INITIALIZER(q, pollfunc))
+#endif /* CONFIG_LIBUKFILE_POLLED */
+
+#define UK_POLLQ_LEVEL_EVENTS_INITIALIZER(q, ev) _POLLQ_INIT(q, NULL, ev)
+#define UK_POLLQ_LEVEL_INITIALIZER(q) UK_POLLQ_LEVEL_EVENTS_INITIALIZER(q, 0)
-#define UK_POLLQ_INITIALIZER(q) UK_POLLQ_EVENTS_INITIALIZER((q), 0)
-#define UK_POLLQ_INIT_VALUE(q) UK_POLLQ_EVENTS_INIT_VALUE((q), 0)
+#define UK_POLLQ_LEVEL_EVENTS_INIT_VALUE(q, ev) \
+ ((struct uk_pollq)UK_POLLQ_LEVEL_EVENTS_INITIALIZER(q, ev))
+#define UK_POLLQ_LEVEL_INIT_VALUE(q) \
+ ((struct uk_pollq)UK_POLLQ_LEVEL_INITIALIZER(q))
/* Polling cancellation */
/* Polling */
+#if CONFIG_LIBUKFILE_POLLED
/**
- * Poll for the events in `req`; never block, always return immediately.
- *
- * @return
- * Bitwise AND between `req` and the events set in `q`.
+ * INTERNAL. Poll for the events in `req`; never block or take locks,
+ * always return immediately.
+ */
+static inline
+uk_pollevent _pollq_poll_immediate(struct uk_pollq *q, uk_pollevent req)
+{
+ return q->poll ? 0 : q->events & req;
+}
+
+/**
+ * INTERNAL. Poll for the events in `req` with the waitlock held; may block.
+ */
+static inline
+uk_pollevent _pollq_poll_locked(struct uk_pollq *q, uk_pollevent req,
+ const struct uk_file *f)
+{
+ return q->poll ? q->poll(f, req) : q->events & req;
+}
+#else /* !CONFIG_LIBUKFILE_POLLED */
+/**
+ * INTERNAL. Poll for the events in `req`; never block or take locks,
+ * always return immediately.
*/
static inline
-uk_pollevent uk_pollq_poll_immediate(struct uk_pollq *q, uk_pollevent req)
+uk_pollevent _pollq_poll_immediate(struct uk_pollq *q, uk_pollevent req)
{
return q->events & req;
}
+/**
+ * INTERNAL. Poll for the events in `req` with the waitlock held; may block.
+ */
+static inline
+uk_pollevent _pollq_poll_locked(struct uk_pollq *q, uk_pollevent req,
+ const struct uk_file *f __unused)
+{
+ return _pollq_poll_immediate(q, req);
+}
+#endif
+
/**
* INTERNAL. Atomically poll & lock if required.
*
*/
static inline
uk_pollevent _pollq_lock(struct uk_pollq *q, uk_pollevent req,
- uk_pollevent exp)
+ uk_pollevent exp, const struct uk_file *f)
{
uk_pollevent ev;
uk_rwlock_rlock(&q->waitlock);
/* Check if events were set while acquiring the lock */
- if ((ev = uk_pollq_poll_immediate(q, req) & ~exp))
+ if ((ev = _pollq_poll_locked(q, req, f) & ~exp))
uk_rwlock_runlock(&q->waitlock);
return ev;
}
return !timeout;
}
+/**
+ * Poll for the events in `req`, returning the present levels of events.
+ *
+ * May yield execution or acquire locks, but will never block indefinitely.
+ *
+ * @param q Target queue.
+ * @param req Events to poll for.
+ * @param f File to poll for events, in case of an edge-triggered `q`.
+ *
+ * @return
+ Bitwise AND between `req` and the events set in `q`
+ */
+static inline
+uk_pollevent uk_pollq_poll_level(struct uk_pollq *q, uk_pollevent req,
+ const struct uk_file *f __maybe_unused)
+{
+ uk_pollevent ev;
+
+ if ((ev = _pollq_poll_immediate(q, req)))
+ return ev;
+#if CONFIG_LIBUKFILE_POLLED
+ if (q->poll && !(ev = _pollq_lock(q, req, 0, f)))
+ uk_rwlock_runlock(&q->waitlock);
+#endif /* CONFIG_LIBUKFILE_POLLED */
+ return ev;
+}
+
/**
* Poll for the events in `req`, blocking until `deadline` or an event is set.
*
*/
static inline
uk_pollevent uk_pollq_poll_until(struct uk_pollq *q, uk_pollevent req,
- __nsec deadline)
+ __nsec deadline, const struct uk_file *f)
{
uk_pollevent ev;
do {
- if ((ev = uk_pollq_poll_immediate(q, req)))
+ if ((ev = _pollq_poll_immediate(q, req)))
return ev;
- if ((ev = _pollq_lock(q, req, 0)))
+ if ((ev = _pollq_lock(q, req, 0, f)))
return ev;
} while (_pollq_wait(q, req, deadline));
return ev;
* @return
* Bitwise AND between `req` and the events set in `q`
*/
-#define uk_pollq_poll(q, req) uk_pollq_poll_until(q, req, 0)
+#define uk_pollq_poll(q, req, f) uk_pollq_poll_until(q, req, 0, f)
#if CONFIG_LIBUKFILE_CHAINUPDATE
/* Propagation */
*/
static inline
uk_pollevent uk_pollq_poll_register(struct uk_pollq *q,
- struct uk_poll_chain *tick, int force)
+ struct uk_poll_chain *tick, int force,
+ const struct uk_file *f)
{
uk_pollevent ev;
uk_pollevent req = tick->mask;
- if (!force && (ev = uk_pollq_poll_immediate(q, req)))
+ if (!force && (ev = _pollq_poll_immediate(q, req)))
return ev;
/* Might need to register */
uk_rwlock_rlock(&q->proplock);
- if ((ev = uk_pollq_poll_immediate(q, req)) && !force)
+ if ((ev = _pollq_poll_locked(q, req, f)) && !force)
goto out;
_pollq_register(q, tick);
out:
/* Updating */
/**
- * Update events, clearing those in `clr`.
+ * Update events, setting those in `set` and handling notifications.
*
* @param q Target queue.
- * @param clr Events to clear.
+ * @param set Events to set.
+ * @param n Maximum number of threads to wake up. If < 0 wake up all threads.
+ * Chained updates have their own defined notification semantics and may
+ * notify more threads than specified in `n`.
*
* @return
* The previous event set.
*/
-uk_pollevent uk_pollq_clear(struct uk_pollq *q, uk_pollevent clr);
+uk_pollevent uk_pollq_set_n(struct uk_pollq *q, uk_pollevent set, int n);
/**
- * Update events, setting those in `set` and handling notifications.
+ * Update events, clearing those in `clr`.
+ *
+ * Only available on level-triggered queues.
*
* @param q Target queue.
- * @param set Events to set.
- * @param n Maximum number of threads to wake up. If < 0 wake up all threads.
- * Chained updates have their own defined notification semantics and may
- * notify more threads than specified in `n`.
+ * @param clr Events to clear.
*
* @return
* The previous event set.
*/
-uk_pollevent uk_pollq_set_n(struct uk_pollq *q, uk_pollevent set, int n);
+uk_pollevent uk_pollq_clear(struct uk_pollq *q, uk_pollevent clr);
/**
* Replace the events in `q` with `val` and handle notifications.
*
+ * Only available on level-triggered queues.
+ *
* @param q Target queue.
* @param val New event set.
* @param n Maximum number of threads to wake up. If < 0 wake up all threads.
/**
* Replace the events in `q` with `val` and handle notifications.
*
+ * Only available on level-triggered queues.
+ *
* @param q Target queue.
* @param val New event set.
*