/*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
+ * Copyright (c) 2014 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
taskq_t *system_taskq;
#define TASKQ_ACTIVE 0x00010000
+#define TASKQ_NAMELEN 31
struct taskq {
+ char tq_name[TASKQ_NAMELEN + 1];
kmutex_t tq_lock;
krwlock_t tq_threadlock;
kcondvar_t tq_dispatch_cv;
cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
+ (void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1);
tq->tq_flags = flags | TASKQ_ACTIVE;
tq->tq_active = nthreads;
tq->tq_nthreads = nthreads;
uint64_t *logical_ashift, uint64_t *physical_ashift);
typedef void vdev_close_func_t(vdev_t *vd);
typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize);
-typedef int vdev_io_start_func_t(zio_t *zio);
+typedef void vdev_io_start_func_t(zio_t *zio);
typedef void vdev_io_done_func_t(zio_t *zio);
typedef void vdev_state_change_func_t(vdev_t *vd, int, int);
typedef void vdev_hold_func_t(vdev_t *vd);
ZIO_PRIORITY_NOW /* non-queued I/Os (e.g. ioctl) */
} zio_priority_t;
-#define ZIO_PIPELINE_CONTINUE 0x100
-#define ZIO_PIPELINE_STOP 0x101
-
enum zio_flag {
/*
* Flags inherited by gang, ddt, and vdev children,
zio_interrupt(zio);
}
-static int
+static void
vdev_disk_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
if (dvd == NULL || (dvd->vd_ldi_offline && dvd->vd_lh == NULL)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ return;
}
if (zio->io_type == ZIO_TYPE_IOCTL) {
if (!vdev_readable(vd)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ return;
}
switch (zio->io_cmd) {
* and will call vdev_disk_ioctl_done()
* upon completion.
*/
- return (ZIO_PIPELINE_STOP);
+ return;
}
if (error == ENOTSUP || error == ENOTTY) {
zio->io_error = SET_ERROR(ENOTSUP);
}
- zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ zio_execute(zio);
+ return;
}
vb = kmem_alloc(sizeof (vdev_buf_t), KM_SLEEP);
/* ldi_strategy() will return non-zero only on programming errors */
VERIFY(ldi_strategy(dvd->vd_lh, bp) == 0);
-
- return (ZIO_PIPELINE_STOP);
}
static void
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
vd->vdev_tsd = NULL;
}
-static int
+static void
vdev_file_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
if (!vdev_readable(vd)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ return;
}
vf = vd->vdev_tsd;
zio->io_error = SET_ERROR(ENOTSUP);
}
- zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ zio_execute(zio);
+ return;
}
zio->io_error = vn_rdwr(zio->io_type == ZIO_TYPE_READ ?
zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+#ifdef illumos
+ VERIFY3U(taskq_dispatch(system_taskq, vdev_file_io_strategy, bp,
+ TQ_SLEEP), !=, 0);
+#endif
}
/* ARGSUSED */
zio_interrupt(zio);
}
-static int
+static void
vdev_geom_io_start(zio_t *zio)
{
vdev_t *vd;
/* XXPOLICY */
if (!vdev_readable(vd)) {
zio->io_error = SET_ERROR(ENXIO);
+ zio_interrupt(zio);
+ return;
} else {
switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE:
}
}
- zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ zio_execute(zio);
+ return;
case ZIO_TYPE_FREE:
if (vd->vdev_notrim) {
zio->io_error = SET_ERROR(ENOTSUP);
} else if (!vdev_geom_bio_delete_disable) {
goto sendreq;
}
- zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ zio_execute(zio);
+ return;
}
sendreq:
cp = vd->vdev_tsd;
if (cp == NULL) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ return;
}
bp = g_alloc_bio();
bp->bio_caller1 = zio;
bp->bio_done = vdev_geom_io_intr;
g_io_request(bp, cp);
-
- return (ZIO_PIPELINE_STOP);
}
static void
*/
/*
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
return (-1);
}
-static int
+static void
vdev_mirror_io_start(zio_t *zio)
{
mirror_map_t *mm;
zio->io_type, zio->io_priority, 0,
vdev_mirror_scrub_done, mc));
}
- zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ zio_execute(zio);
+ return;
}
/*
* For normal reads just pick one child.
c++;
}
- zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ zio_execute(zio);
}
static int
*/
/*
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/
/*
}
/* ARGSUSED */
-static int
+static void
vdev_missing_io_start(zio_t *zio)
{
zio->io_error = SET_ERROR(ENOTSUP);
- zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ zio_execute(zio);
}
/* ARGSUSED */
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
*/
* vdevs have had errors, then create zio read operations to the parity
* columns' VDevs as well.
*/
-static int
+static void
vdev_raidz_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_raidz_child_done, rc));
}
- zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ zio_execute(zio);
+ return;
}
if (zio->io_type == ZIO_TYPE_WRITE) {
ZIO_FLAG_NODATA | ZIO_FLAG_OPTIONAL, NULL, NULL));
}
- zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ zio_execute(zio);
+ return;
}
ASSERT(zio->io_type == ZIO_TYPE_READ);
}
}
- zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ zio_execute(zio);
}
extern vmem_t *zio_alloc_arena;
#endif
+#define ZIO_PIPELINE_CONTINUE 0x100
+#define ZIO_PIPELINE_STOP 0x101
+
/*
* The following actions directly effect the spa's sync-to-convergence logic.
* The values below define the sync pass when we start performing the action.
* Read, write and delete to physical devices
* ==========================================================================
*/
+
+
+/*
+ * Issue an I/O to the underlying vdev. Typically the issue pipeline
+ * stops after this stage and will resume upon I/O completion.
+ * However, there are instances where the vdev layer may need to
+ * continue the pipeline when an I/O was not issued. Since the I/O
+ * that was sent to the vdev layer might be different than the one
+ * currently active in the pipeline (see vdev_queue_io()), we explicitly
+ * force the underlying vdev layers to call either zio_execute() or
+ * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
+ */
static int
zio_vdev_io_start(zio_t *zio)
{
/*
* The mirror_ops handle multiple DVAs in a single BP.
*/
- return (vdev_mirror_ops.vdev_op_io_start(zio));
+ vdev_mirror_ops.vdev_op_io_start(zio);
+ return (ZIO_PIPELINE_STOP);
}
if (vd->vdev_ops->vdev_op_leaf && zio->io_type == ZIO_TYPE_FREE &&
* can quickly react to certain workloads. In particular, we care
* about non-scrubbing, top-level reads and writes with the following
* characteristics:
- * - synchronous writes of user data to non-slog devices
+ * - synchronous writes of user data to non-slog devices
* - any reads of user data
* When these conditions are met, adjust the timestamp of spa_last_io
* which allows the scan thread to adjust its workload accordingly.
return (ZIO_PIPELINE_STOP);
}
- ret = vd->vdev_ops->vdev_op_io_start(zio);
- ASSERT(ret == ZIO_PIPELINE_STOP);
-
- return (ret);
+ vd->vdev_ops->vdev_op_io_start(zio);
+ return (ZIO_PIPELINE_STOP);
}
static int