/*-
* Copyright (c) 2003-2009 Silicon Graphics International Corp.
* Copyright (c) 2012 The FreeBSD Foundation
+ * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed by Edward Tomasz Napierala
struct ctl_softc *control_softc = NULL;
-/*
- * Size and alignment macros needed for Copan-specific HA hardware. These
- * can go away when the HA code is re-written, and uses busdma for any
- * hardware.
- */
-#define CTL_ALIGN_8B(target, source, type) \
- if (((uint32_t)source & 0x7) != 0) \
- target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\
- else \
- target = (type)source;
-
-#define CTL_SIZE_8B(target, size) \
- if ((size & 0x7) != 0) \
- target = size + (0x8 - (size & 0x7)); \
- else \
- target = size;
-
-#define CTL_ALIGN_8B_MARGIN 16
-
/*
* Template mode pages.
*/
}
};
-/*
- * XXX KDM move these into the softc.
- */
-static int rcv_sync_msg;
-static uint8_t ctl_pause_rtr;
-
SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
static int worker_threads = -1;
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
*/
#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10
-#ifdef notyet
static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
int param);
static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
-#endif
+static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest);
static int ctl_init(void);
void ctl_shutdown(void);
static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
static int ctl_free_lun(struct ctl_lun *lun);
static void ctl_create_lun(struct ctl_be_lun *be_lun);
static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr);
-/**
-static void ctl_failover_change_pages(struct ctl_softc *softc,
- struct ctl_scsiio *ctsio, int master);
-**/
static int ctl_do_mode_select(union ctl_io *io);
static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
static int ctl_scsiio_lun_check(struct ctl_lun *lun,
const struct ctl_cmd_entry *entry,
struct ctl_scsiio *ctsio);
-//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
-#ifdef notyet
-static void ctl_failover(void);
-#endif
+static void ctl_failover_lun(struct ctl_lun *lun);
+static void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
+static void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
+static void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
+static void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
static void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
ctl_ua_type ua_type);
static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
static void ctl_enqueue_incoming(union ctl_io *io);
static void ctl_enqueue_rtr(union ctl_io *io);
static void ctl_enqueue_done(union ctl_io *io);
-#ifdef notyet
static void ctl_enqueue_isc(union ctl_io *io);
-#endif
static const struct ctl_cmd_entry *
ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa);
static const struct ctl_cmd_entry *
static int ctl_cmd_applicable(uint8_t lun_type,
const struct ctl_cmd_entry *entry);
+static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx);
+static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx);
+static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx);
+static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key);
+
/*
* Load the serialization table. This isn't very pretty, but is probably
* the easiest way to do it.
DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
MODULE_VERSION(ctl, 1);
-#ifdef notyet
+static struct ctl_frontend ha_frontend =
+{
+ .name = "ha",
+};
+
static void
ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
union ctl_ha_msg *msg_info)
ctsio->sense_residual = msg_info->scsi.sense_residual;
ctsio->residual = msg_info->scsi.residual;
memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
- sizeof(ctsio->sense_data));
+ msg_info->scsi.sense_len);
memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
&msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
ctl_enqueue_isc((union ctl_io *)ctsio);
}
ctsio = &msg_info->hdr.serializing_sc->scsiio;
-#if 0
- /*
- * Attempt to catch the situation where an I/O has
- * been freed, and we're using it again.
- */
- if (ctsio->io_hdr.io_type == 0xff) {
- union ctl_io *tmp_io;
- tmp_io = (union ctl_io *)ctsio;
- printf("%s: %p use after free!\n", __func__,
- ctsio);
- printf("%s: type %d msg %d cdb %x iptl: "
- "%u:%u:%u tag 0x%04x "
- "flag %#x status %x\n",
- __func__,
- tmp_io->io_hdr.io_type,
- tmp_io->io_hdr.msg_type,
- tmp_io->scsiio.cdb[0],
- tmp_io->io_hdr.nexus.initid,
- tmp_io->io_hdr.nexus.targ_port,
- tmp_io->io_hdr.nexus.targ_lun,
- (tmp_io->io_hdr.io_type ==
- CTL_IO_TASK) ?
- tmp_io->taskio.tag_num :
- tmp_io->scsiio.tag_num,
- tmp_io->io_hdr.flags,
- tmp_io->io_hdr.status);
- }
-#endif
ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
ctl_enqueue_isc((union ctl_io *)ctsio);
}
+void
+ctl_isc_announce_lun(struct ctl_lun *lun)
+{
+ struct ctl_softc *softc = lun->ctl_softc;
+ union ctl_ha_msg *msg;
+ struct ctl_ha_msg_lun_pr_key pr_key;
+ int i, k;
+
+ if (softc->ha_link != CTL_HA_LINK_ONLINE)
+ return;
+ mtx_lock(&lun->lun_lock);
+ i = sizeof(msg->lun);
+ if (lun->lun_devid)
+ i += lun->lun_devid->len;
+ i += sizeof(pr_key) * lun->pr_key_count;
+alloc:
+ mtx_unlock(&lun->lun_lock);
+ msg = malloc(i, M_CTL, M_WAITOK);
+ mtx_lock(&lun->lun_lock);
+ k = sizeof(msg->lun);
+ if (lun->lun_devid)
+ k += lun->lun_devid->len;
+ k += sizeof(pr_key) * lun->pr_key_count;
+ if (i < k) {
+ free(msg, M_CTL);
+ i = k;
+ goto alloc;
+ }
+ bzero(&msg->lun, sizeof(msg->lun));
+ msg->hdr.msg_type = CTL_MSG_LUN_SYNC;
+ msg->hdr.nexus.targ_lun = lun->lun;
+ msg->hdr.nexus.targ_mapped_lun = lun->lun;
+ msg->lun.flags = lun->flags;
+ msg->lun.pr_generation = lun->PRGeneration;
+ msg->lun.pr_res_idx = lun->pr_res_idx;
+ msg->lun.pr_res_type = lun->res_type;
+ msg->lun.pr_key_count = lun->pr_key_count;
+ i = 0;
+ if (lun->lun_devid) {
+ msg->lun.lun_devid_len = lun->lun_devid->len;
+ memcpy(&msg->lun.data[i], lun->lun_devid->data,
+ msg->lun.lun_devid_len);
+ i += msg->lun.lun_devid_len;
+ }
+ for (k = 0; k < CTL_MAX_INITIATORS; k++) {
+ if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0)
+ continue;
+ pr_key.pr_iid = k;
+ memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key));
+ i += sizeof(pr_key);
+ }
+ mtx_unlock(&lun->lun_lock);
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
+ M_WAITOK);
+ free(msg, M_CTL);
+}
+
+void
+ctl_isc_announce_port(struct ctl_port *port)
+{
+ struct ctl_softc *softc = control_softc;
+ union ctl_ha_msg *msg;
+ int i;
+
+ if (port->targ_port < softc->port_min ||
+ port->targ_port >= softc->port_max ||
+ softc->ha_link != CTL_HA_LINK_ONLINE)
+ return;
+ i = sizeof(msg->port) + strlen(port->port_name) + 1;
+ if (port->lun_map)
+ i += sizeof(uint32_t) * CTL_MAX_LUNS;
+ if (port->port_devid)
+ i += port->port_devid->len;
+ if (port->target_devid)
+ i += port->target_devid->len;
+ msg = malloc(i, M_CTL, M_WAITOK);
+ bzero(&msg->port, sizeof(msg->port));
+ msg->hdr.msg_type = CTL_MSG_PORT_SYNC;
+ msg->hdr.nexus.targ_port = port->targ_port;
+ msg->port.port_type = port->port_type;
+ msg->port.physical_port = port->physical_port;
+ msg->port.virtual_port = port->virtual_port;
+ msg->port.status = port->status;
+ i = 0;
+ msg->port.name_len = sprintf(&msg->port.data[i],
+ "%d:%s", softc->ha_id, port->port_name) + 1;
+ i += msg->port.name_len;
+ if (port->lun_map) {
+ msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS;
+ memcpy(&msg->port.data[i], port->lun_map,
+ msg->port.lun_map_len);
+ i += msg->port.lun_map_len;
+ }
+ if (port->port_devid) {
+ msg->port.port_devid_len = port->port_devid->len;
+ memcpy(&msg->port.data[i], port->port_devid->data,
+ msg->port.port_devid_len);
+ i += msg->port.port_devid_len;
+ }
+ if (port->target_devid) {
+ msg->port.target_devid_len = port->target_devid->len;
+ memcpy(&msg->port.data[i], port->target_devid->data,
+ msg->port.target_devid_len);
+ i += msg->port.target_devid_len;
+ }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
+ M_WAITOK);
+ free(msg, M_CTL);
+}
+
+static void
+ctl_isc_ha_link_up(struct ctl_softc *softc)
+{
+ struct ctl_port *port;
+ struct ctl_lun *lun;
+
+ STAILQ_FOREACH(port, &softc->port_list, links)
+ ctl_isc_announce_port(port);
+ STAILQ_FOREACH(lun, &softc->lun_list, links)
+ ctl_isc_announce_lun(lun);
+}
+
+static void
+ctl_isc_ha_link_down(struct ctl_softc *softc)
+{
+ struct ctl_port *port;
+ struct ctl_lun *lun;
+ union ctl_io *io;
+
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
+ lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
+ mtx_unlock(&lun->lun_lock);
+
+ mtx_unlock(&softc->ctl_lock);
+ io = ctl_alloc_io(softc->othersc_pool);
+ mtx_lock(&softc->ctl_lock);
+ ctl_zero_io(io);
+ io->io_hdr.msg_type = CTL_MSG_FAILOVER;
+ io->io_hdr.nexus.targ_mapped_lun = lun->lun;
+ ctl_enqueue_isc(io);
+ }
+
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if (port->targ_port >= softc->port_min &&
+ port->targ_port < softc->port_max)
+ continue;
+ port->status &= ~CTL_PORT_STATUS_ONLINE;
+ }
+ mtx_unlock(&softc->ctl_lock);
+}
+
+static void
+ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+ struct ctl_lun *lun;
+ uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
+
+ if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS &&
+ (lun = softc->ctl_luns[msg->hdr.nexus.targ_lun]) != NULL) {
+ if (msg->ua.ua_all) {
+ if (msg->ua.ua_set)
+ ctl_est_ua_all(lun, iid, msg->ua.ua_type);
+ else
+ ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
+ } else {
+ if (msg->ua.ua_set)
+ ctl_est_ua(lun, iid, msg->ua.ua_type);
+ else
+ ctl_clr_ua(lun, iid, msg->ua.ua_type);
+ }
+ }
+}
+
+static void
+ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+ struct ctl_lun *lun;
+ struct ctl_ha_msg_lun_pr_key pr_key;
+ int i, k;
+
+ lun = softc->ctl_luns[msg->hdr.nexus.targ_lun];
+ if (lun == NULL) {
+ CTL_DEBUG_PRINT(("%s: Unknown LUN %d\n", __func__,
+ msg->hdr.nexus.targ_lun));
+ } else {
+ mtx_lock(&lun->lun_lock);
+ i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0;
+ if (msg->lun.lun_devid_len != i || (i > 0 &&
+ memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
+ mtx_unlock(&lun->lun_lock);
+ printf("%s: Received conflicting HA LUN %d\n",
+ __func__, msg->hdr.nexus.targ_lun);
+ return;
+ } else {
+ /* Record whether peer is primary. */
+ if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) &&
+ (msg->lun.flags & CTL_LUN_DISABLED) == 0)
+ lun->flags |= CTL_LUN_PEER_SC_PRIMARY;
+ else
+ lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
+
+ /* If peer is primary and we are not -- use data */
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
+ (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) {
+ lun->PRGeneration = msg->lun.pr_generation;
+ lun->pr_res_idx = msg->lun.pr_res_idx;
+ lun->res_type = msg->lun.pr_res_type;
+ lun->pr_key_count = msg->lun.pr_key_count;
+ for (k = 0; k < CTL_MAX_INITIATORS; k++)
+ ctl_clr_prkey(lun, k);
+ for (k = 0; k < msg->lun.pr_key_count; k++) {
+ memcpy(&pr_key, &msg->lun.data[i],
+ sizeof(pr_key));
+ ctl_alloc_prkey(lun, pr_key.pr_iid);
+ ctl_set_prkey(lun, pr_key.pr_iid,
+ pr_key.pr_key);
+ i += sizeof(pr_key);
+ }
+ }
+
+ mtx_unlock(&lun->lun_lock);
+ CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
+ __func__, msg->hdr.nexus.targ_lun,
+ (msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
+ "primary" : "secondary"));
+
+ /* If we are primary but peer doesn't know -- notify */
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) &&
+ (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0)
+ ctl_isc_announce_lun(lun);
+ }
+ }
+}
+
+static void
+ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
+{
+ struct ctl_port *port;
+ int i, new;
+
+ port = softc->ctl_ports[msg->hdr.nexus.targ_port];
+ if (port == NULL) {
+ CTL_DEBUG_PRINT(("%s: New port %d\n", __func__,
+ msg->hdr.nexus.targ_port));
+ new = 1;
+ port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO);
+ port->frontend = &ha_frontend;
+ port->targ_port = msg->hdr.nexus.targ_port;
+ } else if (port->frontend == &ha_frontend) {
+ CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__,
+ msg->hdr.nexus.targ_port));
+ new = 0;
+ } else {
+ printf("%s: Received conflicting HA port %d\n",
+ __func__, msg->hdr.nexus.targ_port);
+ return;
+ }
+ port->port_type = msg->port.port_type;
+ port->physical_port = msg->port.physical_port;
+ port->virtual_port = msg->port.virtual_port;
+ port->status = msg->port.status;
+ i = 0;
+ free(port->port_name, M_CTL);
+ port->port_name = strndup(&msg->port.data[i], msg->port.name_len,
+ M_CTL);
+ i += msg->port.name_len;
+ if (msg->port.lun_map_len != 0) {
+ if (port->lun_map == NULL)
+ port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
+ M_CTL, M_WAITOK);
+ memcpy(port->lun_map, &msg->port.data[i],
+ sizeof(uint32_t) * CTL_MAX_LUNS);
+ i += msg->port.lun_map_len;
+ } else {
+ free(port->lun_map, M_CTL);
+ port->lun_map = NULL;
+ }
+ if (msg->port.port_devid_len != 0) {
+ if (port->port_devid == NULL ||
+ port->port_devid->len != msg->port.port_devid_len) {
+ free(port->port_devid, M_CTL);
+ port->port_devid = malloc(sizeof(struct ctl_devid) +
+ msg->port.port_devid_len, M_CTL, M_WAITOK);
+ }
+ memcpy(port->port_devid->data, &msg->port.data[i],
+ msg->port.port_devid_len);
+ port->port_devid->len = msg->port.port_devid_len;
+ i += msg->port.port_devid_len;
+ } else {
+ free(port->port_devid, M_CTL);
+ port->port_devid = NULL;
+ }
+ if (msg->port.target_devid_len != 0) {
+ if (port->target_devid == NULL ||
+ port->target_devid->len != msg->port.target_devid_len) {
+ free(port->target_devid, M_CTL);
+ port->target_devid = malloc(sizeof(struct ctl_devid) +
+ msg->port.target_devid_len, M_CTL, M_WAITOK);
+ }
+ memcpy(port->target_devid->data, &msg->port.data[i],
+ msg->port.target_devid_len);
+ port->target_devid->len = msg->port.target_devid_len;
+ i += msg->port.target_devid_len;
+ } else {
+ free(port->port_devid, M_CTL);
+ port->port_devid = NULL;
+ }
+ if (new) {
+ if (ctl_port_register(port) != 0) {
+ printf("%s: ctl_port_register() failed with error\n",
+ __func__);
+ }
+ }
+}
+
/*
* ISC (Inter Shelf Communication) event handler. Events from the HA
* subsystem come in here.
ctl_ha_status isc_status;
softc = control_softc;
- io = NULL;
-
-
-#if 0
- printf("CTL: Isc Msg event %d\n", event);
-#endif
+ CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event));
if (event == CTL_HA_EVT_MSG_RECV) {
- union ctl_ha_msg msg_info;
+ union ctl_ha_msg *msg, msgbuf;
- isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), /*wait*/ 0);
-#if 0
- printf("CTL: msg_type %d\n", msg_info.msg_type);
-#endif
- if (isc_status != 0) {
- printf("Error receiving message, status = %d\n",
- isc_status);
+ if (param > sizeof(msgbuf))
+ msg = malloc(param, M_CTL, M_WAITOK);
+ else
+ msg = &msgbuf;
+ isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param,
+ M_WAITOK);
+ if (isc_status != CTL_HA_STATUS_SUCCESS) {
+ printf("%s: Error receiving message: %d\n",
+ __func__, isc_status);
+ if (msg != &msgbuf)
+ free(msg, M_CTL);
return;
}
- switch (msg_info.hdr.msg_type) {
+ CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type));
+ switch (msg->hdr.msg_type) {
case CTL_MSG_SERIALIZE:
-#if 0
- printf("Serialize\n");
-#endif
- io = ctl_alloc_io_nowait(softc->othersc_pool);
- if (io == NULL) {
- printf("ctl_isc_event_handler: can't allocate "
- "ctl_io!\n");
- /* Bad Juju */
- /* Need to set busy and send msg back */
- msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
- msg_info.hdr.status = CTL_SCSI_ERROR;
- msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
- msg_info.scsi.sense_len = 0;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){
- }
- goto bailout;
- }
+ io = ctl_alloc_io(softc->othersc_pool);
ctl_zero_io(io);
- // populate ctsio from msg_info
+ // populate ctsio from msg
io->io_hdr.io_type = CTL_IO_SCSI;
io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
- io->io_hdr.original_sc = msg_info.hdr.original_sc;
-#if 0
- printf("pOrig %x\n", (int)msg_info.original_sc);
-#endif
+ io->io_hdr.original_sc = msg->hdr.original_sc;
io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
CTL_FLAG_IO_ACTIVE;
/*
*
* XXX KDM add another flag that is more specific.
*/
- if (softc->ha_mode == CTL_HA_MODE_SER_ONLY)
+ if (softc->ha_mode != CTL_HA_MODE_XFER)
io->io_hdr.flags |= CTL_FLAG_INT_COPY;
- io->io_hdr.nexus = msg_info.hdr.nexus;
+ io->io_hdr.nexus = msg->hdr.nexus;
#if 0
printf("port %u, iid %u, lun %u\n",
io->io_hdr.nexus.targ_port,
io->io_hdr.nexus.initid,
io->io_hdr.nexus.targ_lun);
#endif
- io->scsiio.tag_num = msg_info.scsi.tag_num;
- io->scsiio.tag_type = msg_info.scsi.tag_type;
- memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
+ io->scsiio.tag_num = msg->scsi.tag_num;
+ io->scsiio.tag_type = msg->scsi.tag_type;
+#ifdef CTL_TIME_IO
+ io->io_hdr.start_time = time_uptime;
+ getbintime(&io->io_hdr.start_bt);
+#endif /* CTL_TIME_IO */
+ io->scsiio.cdb_len = msg->scsi.cdb_len;
+ memcpy(io->scsiio.cdb, msg->scsi.cdb,
CTL_MAX_CDBLEN);
if (softc->ha_mode == CTL_HA_MODE_XFER) {
const struct ctl_cmd_entry *entry;
struct ctl_sg_entry *sgl;
int i, j;
- io = msg_info.hdr.original_sc;
+ io = msg->hdr.original_sc;
if (io == NULL) {
printf("%s: original_sc == NULL!\n", __func__);
/* XXX KDM do something here */
* Keep track of this, we need to send it back over
* when the datamove is complete.
*/
- io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
-
- if (msg_info.dt.sg_sequence == 0) {
- /*
- * XXX KDM we use the preallocated S/G list
- * here, but we'll need to change this to
- * dynamic allocation if we need larger S/G
- * lists.
- */
- if (msg_info.dt.kern_sg_entries >
- sizeof(io->io_hdr.remote_sglist) /
- sizeof(io->io_hdr.remote_sglist[0])) {
- printf("%s: number of S/G entries "
- "needed %u > allocated num %zd\n",
- __func__,
- msg_info.dt.kern_sg_entries,
- sizeof(io->io_hdr.remote_sglist)/
- sizeof(io->io_hdr.remote_sglist[0]));
-
- /*
- * XXX KDM send a message back to
- * the other side to shut down the
- * DMA. The error will come back
- * through via the normal channel.
- */
- break;
- }
- sgl = io->io_hdr.remote_sglist;
- memset(sgl, 0,
- sizeof(io->io_hdr.remote_sglist));
+ io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
+
+ if (msg->dt.sg_sequence == 0) {
+ i = msg->dt.kern_sg_entries +
+ io->scsiio.kern_data_len /
+ CTL_HA_DATAMOVE_SEGMENT + 1;
+ sgl = malloc(sizeof(*sgl) * i, M_CTL,
+ M_WAITOK | M_ZERO);
+ io->io_hdr.remote_sglist = sgl;
+ io->io_hdr.local_sglist =
+ &sgl[msg->dt.kern_sg_entries];
io->scsiio.kern_data_ptr = (uint8_t *)sgl;
io->scsiio.kern_sg_entries =
- msg_info.dt.kern_sg_entries;
+ msg->dt.kern_sg_entries;
io->scsiio.rem_sg_entries =
- msg_info.dt.kern_sg_entries;
+ msg->dt.kern_sg_entries;
io->scsiio.kern_data_len =
- msg_info.dt.kern_data_len;
+ msg->dt.kern_data_len;
io->scsiio.kern_total_len =
- msg_info.dt.kern_total_len;
+ msg->dt.kern_total_len;
io->scsiio.kern_data_resid =
- msg_info.dt.kern_data_resid;
+ msg->dt.kern_data_resid;
io->scsiio.kern_rel_offset =
- msg_info.dt.kern_rel_offset;
- /*
- * Clear out per-DMA flags.
- */
- io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK;
- /*
- * Add per-DMA flags that are set for this
- * particular DMA request.
- */
- io->io_hdr.flags |= msg_info.dt.flags &
- CTL_FLAG_RDMA_MASK;
+ msg->dt.kern_rel_offset;
+ io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR;
+ io->io_hdr.flags |= msg->dt.flags &
+ CTL_FLAG_BUS_ADDR;
} else
sgl = (struct ctl_sg_entry *)
io->scsiio.kern_data_ptr;
- for (i = msg_info.dt.sent_sg_entries, j = 0;
- i < (msg_info.dt.sent_sg_entries +
- msg_info.dt.cur_sg_entries); i++, j++) {
- sgl[i].addr = msg_info.dt.sg_list[j].addr;
- sgl[i].len = msg_info.dt.sg_list[j].len;
+ for (i = msg->dt.sent_sg_entries, j = 0;
+ i < (msg->dt.sent_sg_entries +
+ msg->dt.cur_sg_entries); i++, j++) {
+ sgl[i].addr = msg->dt.sg_list[j].addr;
+ sgl[i].len = msg->dt.sg_list[j].len;
#if 0
printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n",
__func__,
- msg_info.dt.sg_list[j].addr,
- msg_info.dt.sg_list[j].len,
+ msg->dt.sg_list[j].addr,
+ msg->dt.sg_list[j].len,
sgl[i].addr, sgl[i].len, j, i);
#endif
}
-#if 0
- memcpy(&sgl[msg_info.dt.sent_sg_entries],
- msg_info.dt.sg_list,
- sizeof(*sgl) * msg_info.dt.cur_sg_entries);
-#endif
/*
* If this is the last piece of the I/O, we've got
* the full S/G list. Queue processing in the thread.
* Otherwise wait for the next piece.
*/
- if (msg_info.dt.sg_last != 0)
+ if (msg->dt.sg_last != 0)
ctl_enqueue_isc(io);
break;
}
/* Performed on the Serializing (primary) SC, XFER mode only */
case CTL_MSG_DATAMOVE_DONE: {
- if (msg_info.hdr.serializing_sc == NULL) {
+ if (msg->hdr.serializing_sc == NULL) {
printf("%s: serializing_sc == NULL!\n",
__func__);
/* XXX KDM now what? */
* there was a failure, so we can return status
* back to the initiator.
*/
- io = msg_info.hdr.serializing_sc;
+ io = msg->hdr.serializing_sc;
io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
- io->io_hdr.status = msg_info.hdr.status;
- io->scsiio.scsi_status = msg_info.scsi.scsi_status;
- io->scsiio.sense_len = msg_info.scsi.sense_len;
- io->scsiio.sense_residual =msg_info.scsi.sense_residual;
- io->io_hdr.port_status = msg_info.scsi.fetd_status;
- io->scsiio.residual = msg_info.scsi.residual;
- memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
- sizeof(io->scsiio.sense_data));
+ io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
+ io->io_hdr.port_status = msg->scsi.fetd_status;
+ io->scsiio.residual = msg->scsi.residual;
+ if (msg->hdr.status != CTL_STATUS_NONE) {
+ io->io_hdr.status = msg->hdr.status;
+ io->scsiio.scsi_status = msg->scsi.scsi_status;
+ io->scsiio.sense_len = msg->scsi.sense_len;
+ io->scsiio.sense_residual =msg->scsi.sense_residual;
+ memcpy(&io->scsiio.sense_data,
+ &msg->scsi.sense_data,
+ msg->scsi.sense_len);
+ }
ctl_enqueue_isc(io);
break;
}
/* Preformed on Originating SC, SER_ONLY mode */
case CTL_MSG_R2R:
- io = msg_info.hdr.original_sc;
+ io = msg->hdr.original_sc;
if (io == NULL) {
- printf("%s: Major Bummer\n", __func__);
- return;
- } else {
-#if 0
- printf("pOrig %x\n",(int) ctsio);
-#endif
+ printf("%s: original_sc == NULL!\n",
+ __func__);
+ break;
}
+ io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
io->io_hdr.msg_type = CTL_MSG_R2R;
- io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
+ io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
ctl_enqueue_isc(io);
break;
*/
case CTL_MSG_FINISH_IO:
if (softc->ha_mode == CTL_HA_MODE_XFER)
- ctl_isc_handler_finish_xfer(softc,
- &msg_info);
+ ctl_isc_handler_finish_xfer(softc, msg);
else
- ctl_isc_handler_finish_ser_only(softc,
- &msg_info);
+ ctl_isc_handler_finish_ser_only(softc, msg);
break;
/* Preformed on Originating SC */
case CTL_MSG_BAD_JUJU:
- io = msg_info.hdr.original_sc;
+ io = msg->hdr.original_sc;
if (io == NULL) {
printf("%s: Bad JUJU!, original_sc is NULL!\n",
__func__);
break;
}
- ctl_copy_sense_data(&msg_info, io);
+ ctl_copy_sense_data(msg, io);
/*
* IO should have already been cleaned up on other
* SC so clear this flag so we won't send a message
io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
- /* io = msg_info.hdr.serializing_sc; */
+ /* io = msg->hdr.serializing_sc; */
io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
ctl_enqueue_isc(io);
break;
/* Handle resets sent from the other side */
case CTL_MSG_MANAGE_TASKS: {
struct ctl_taskio *taskio;
- taskio = (struct ctl_taskio *)ctl_alloc_io_nowait(
+ taskio = (struct ctl_taskio *)ctl_alloc_io(
softc->othersc_pool);
- if (taskio == NULL) {
- printf("ctl_isc_event_handler: can't allocate "
- "ctl_io!\n");
- /* Bad Juju */
- /* should I just call the proper reset func
- here??? */
- goto bailout;
- }
ctl_zero_io((union ctl_io *)taskio);
taskio->io_hdr.io_type = CTL_IO_TASK;
taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
- taskio->io_hdr.nexus = msg_info.hdr.nexus;
- taskio->task_action = msg_info.task.task_action;
- taskio->tag_num = msg_info.task.tag_num;
- taskio->tag_type = msg_info.task.tag_type;
+ taskio->io_hdr.nexus = msg->hdr.nexus;
+ taskio->task_action = msg->task.task_action;
+ taskio->tag_num = msg->task.tag_num;
+ taskio->tag_type = msg->task.tag_type;
#ifdef CTL_TIME_IO
taskio->io_hdr.start_time = time_uptime;
getbintime(&taskio->io_hdr.start_bt);
-#if 0
- cs_prof_gettime(&taskio->io_hdr.start_ticks);
-#endif
#endif /* CTL_TIME_IO */
ctl_run_task((union ctl_io *)taskio);
break;
}
/* Persistent Reserve action which needs attention */
case CTL_MSG_PERS_ACTION:
- presio = (struct ctl_prio *)ctl_alloc_io_nowait(
+ presio = (struct ctl_prio *)ctl_alloc_io(
softc->othersc_pool);
- if (presio == NULL) {
- printf("ctl_isc_event_handler: can't allocate "
- "ctl_io!\n");
- /* Bad Juju */
- /* Need to set busy and send msg back */
- goto bailout;
- }
ctl_zero_io((union ctl_io *)presio);
presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
- presio->pr_msg = msg_info.pr;
+ presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
+ presio->io_hdr.nexus = msg->hdr.nexus;
+ presio->pr_msg = msg->pr;
ctl_enqueue_isc((union ctl_io *)presio);
break;
- case CTL_MSG_SYNC_FE:
- rcv_sync_msg = 1;
+ case CTL_MSG_UA:
+ ctl_isc_ua(softc, msg, param);
+ break;
+ case CTL_MSG_PORT_SYNC:
+ ctl_isc_port_sync(softc, msg, param);
+ break;
+ case CTL_MSG_LUN_SYNC:
+ ctl_isc_lun_sync(softc, msg, param);
break;
default:
- printf("How did I get here?\n");
+ printf("Received HA message of unknown type %d\n",
+ msg->hdr.msg_type);
+ break;
}
- } else if (event == CTL_HA_EVT_MSG_SENT) {
- if (param != CTL_HA_STATUS_SUCCESS) {
- printf("Bad status from ctl_ha_msg_send status %d\n",
- param);
+ if (msg != &msgbuf)
+ free(msg, M_CTL);
+ } else if (event == CTL_HA_EVT_LINK_CHANGE) {
+ printf("CTL: HA link status changed from %d to %d\n",
+ softc->ha_link, param);
+ if (param == softc->ha_link)
+ return;
+ if (softc->ha_link == CTL_HA_LINK_ONLINE) {
+ softc->ha_link = param;
+ ctl_isc_ha_link_down(softc);
+ } else {
+ softc->ha_link = param;
+ if (softc->ha_link == CTL_HA_LINK_ONLINE)
+ ctl_isc_ha_link_up(softc);
}
return;
- } else if (event == CTL_HA_EVT_DISCONNECT) {
- printf("CTL: Got a disconnect from Isc\n");
- return;
} else {
printf("ctl_isc_event_handler: Unknown event %d\n", event);
return;
}
-
-bailout:
- return;
}
static void
ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
{
- struct scsi_sense_data *sense;
- sense = &dest->scsiio.sense_data;
- bcopy(&src->scsi.sense_data, sense, sizeof(*sense));
+ memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data,
+ src->scsi.sense_len);
dest->scsiio.scsi_status = src->scsi.scsi_status;
dest->scsiio.sense_len = src->scsi.sense_len;
dest->io_hdr.status = src->hdr.status;
}
-#endif
+
+static void
+ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest)
+{
+
+ memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data,
+ src->scsiio.sense_len);
+ dest->scsi.scsi_status = src->scsiio.scsi_status;
+ dest->scsi.sense_len = src->scsiio.sense_len;
+ dest->hdr.status = src->io_hdr.status;
+}
static void
ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
{
+ struct ctl_softc *softc = lun->ctl_softc;
ctl_ua_type *pu;
+ if (initidx < softc->init_min || initidx >= softc->init_max)
+ return;
mtx_assert(&lun->lun_lock, MA_OWNED);
pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
if (pu == NULL)
static void
ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
{
+ struct ctl_softc *softc = lun->ctl_softc;
int i, j;
mtx_assert(&lun->lun_lock, MA_OWNED);
- for (i = 0; i < CTL_MAX_PORTS; i++) {
+ for (i = softc->port_min; i < softc->port_max; i++) {
if (lun->pending_ua[i] == NULL)
continue;
for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
static void
ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
{
+ struct ctl_softc *softc = lun->ctl_softc;
ctl_ua_type *pu;
+ if (initidx < softc->init_min || initidx >= softc->init_max)
+ return;
mtx_assert(&lun->lun_lock, MA_OWNED);
pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
if (pu == NULL)
static void
ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
{
+ struct ctl_softc *softc = lun->ctl_softc;
int i, j;
mtx_assert(&lun->lun_lock, MA_OWNED);
- for (i = 0; i < CTL_MAX_PORTS; i++) {
+ for (i = softc->port_min; i < softc->port_max; i++) {
if (lun->pending_ua[i] == NULL)
continue;
for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
}
static int
-ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS)
+ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS)
{
struct ctl_softc *softc = (struct ctl_softc *)arg1;
struct ctl_lun *lun;
+ struct ctl_lun_req ireq;
int error, value;
- if (softc->flags & CTL_FLAG_ACTIVE_SHELF)
- value = 0;
- else
- value = 1;
-
+ value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1;
error = sysctl_handle_int(oidp, &value, 0, req);
if ((error != 0) || (req->newptr == NULL))
return (error);
else
softc->flags &= ~CTL_FLAG_ACTIVE_SHELF;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
- mtx_lock(&lun->lun_lock);
- ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
- mtx_unlock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+ bzero(&ireq, sizeof(ireq));
+ ireq.reqtype = CTL_LUNREQ_MODIFY;
+ ireq.reqdata.modify.lun_id = lun->lun;
+ lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0,
+ curthread);
+ if (ireq.status != CTL_LUN_OK) {
+ printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n",
+ __func__, ireq.status, ireq.error_str);
+ }
+ mtx_lock(&softc->ctl_lock);
}
mtx_unlock(&softc->ctl_lock);
return (0);
struct ctl_softc *softc;
void *other_pool;
int i, error, retval;
- //int isc_retval;
retval = 0;
- ctl_pause_rtr = 0;
- rcv_sync_msg = 0;
-
control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
M_WAITOK | M_ZERO);
softc = control_softc;
softc->dev->si_drv1 = softc;
- /*
- * By default, return a "bad LUN" peripheral qualifier for unknown
- * LUNs. The user can override this default using the tunable or
- * sysctl. See the comment in ctl_inquiry_std() for more details.
- */
- softc->inquiry_pq_no_lun = 1;
- TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun",
- &softc->inquiry_pq_no_lun);
sysctl_ctx_init(&softc->sysctl_ctx);
softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
return (ENOMEM);
}
- SYSCTL_ADD_INT(&softc->sysctl_ctx,
- SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
- "inquiry_pq_no_lun", CTLFLAG_RW,
- &softc->inquiry_pq_no_lun, 0,
- "Report no lun possible for invalid LUNs");
-
mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
*/
softc->flags = CTL_FLAG_REAL_SYNC;
+ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0,
+ "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)");
+
/*
* In Copan's HA scheme, the "master" and "slave" roles are
* figured out through the slot the controller is in. Although it
SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0,
"HA head ID (0 - no HA)");
- if (softc->ha_id == 0) {
+ if (softc->ha_id == 0 || softc->ha_id > NUM_TARGET_PORT_GROUPS) {
softc->flags |= CTL_FLAG_ACTIVE_SHELF;
softc->is_single = 1;
- softc->port_offset = 0;
- } else
- softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS;
- softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT;
+ softc->port_cnt = CTL_MAX_PORTS;
+ softc->port_min = 0;
+ } else {
+ softc->port_cnt = CTL_MAX_PORTS / NUM_TARGET_PORT_GROUPS;
+ softc->port_min = (softc->ha_id - 1) * softc->port_cnt;
+ }
+ softc->port_max = softc->port_min + softc->port_cnt;
+ softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT;
+ softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT;
+
+ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
+ OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0,
+ "HA link state (0 - offline, 1 - unknown, 2 - online)");
STAILQ_INIT(&softc->lun_list);
STAILQ_INIT(&softc->pending_lun_queue);
}
SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
- OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN,
- softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head");
+ OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN,
+ softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head");
+
+ if (softc->is_single == 0) {
+ ctl_frontend_register(&ha_frontend);
+ if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
+ printf("ctl_init: ctl_ha_msg_init failed.\n");
+ softc->is_single = 1;
+ } else
+ if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
+ != CTL_HA_STATUS_SUCCESS) {
+ printf("ctl_init: ctl_ha_msg_register failed.\n");
+ softc->is_single = 1;
+ }
+ }
return (0);
}
softc = (struct ctl_softc *)control_softc;
+ if (softc->is_single == 0) {
+ if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL)
+ != CTL_HA_STATUS_SUCCESS) {
+ printf("ctl_shutdown: ctl_ha_msg_deregister failed.\n");
+ }
+ if (ctl_ha_msg_shutdown(softc) != CTL_HA_STATUS_SUCCESS) {
+ printf("ctl_shutdown: ctl_ha_msg_shutdown failed.\n");
+ }
+ ctl_frontend_deregister(&ha_frontend);
+ }
+
mtx_lock(&softc->ctl_lock);
/*
return (0);
}
-int
-ctl_port_enable(ctl_port_type port_type)
-{
- struct ctl_softc *softc = control_softc;
- struct ctl_port *port;
-
- if (softc->is_single == 0) {
- union ctl_ha_msg msg_info;
- int isc_retval;
-
-#if 0
- printf("%s: HA mode, synchronizing frontend enable\n",
- __func__);
-#endif
- msg_info.hdr.msg_type = CTL_MSG_SYNC_FE;
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) {
- printf("Sync msg send error retval %d\n", isc_retval);
- }
- if (!rcv_sync_msg) {
- isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 1);
- }
-#if 0
- printf("CTL:Frontend Enable\n");
- } else {
- printf("%s: single mode, skipping frontend synchronization\n",
- __func__);
-#endif
- }
-
- STAILQ_FOREACH(port, &softc->port_list, links) {
- if (port_type & port->port_type)
- {
-#if 0
- printf("port %d\n", port->targ_port);
-#endif
- ctl_port_online(port);
- }
- }
-
- return (0);
-}
-
-int
-ctl_port_disable(ctl_port_type port_type)
-{
- struct ctl_softc *softc;
- struct ctl_port *port;
-
- softc = control_softc;
-
- STAILQ_FOREACH(port, &softc->port_list, links) {
- if (port_type & port->port_type)
- ctl_port_offline(port);
- }
-
- return (0);
-}
-
-/*
- * Returns 0 for success, 1 for failure.
- * Currently the only failure mode is if there aren't enough entries
- * allocated. So, in case of a failure, look at num_entries_dropped,
- * reallocate and try again.
- */
-int
-ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
- int *num_entries_filled, int *num_entries_dropped,
- ctl_port_type port_type, int no_virtual)
-{
- struct ctl_softc *softc;
- struct ctl_port *port;
- int entries_dropped, entries_filled;
- int retval;
- int i;
-
- softc = control_softc;
-
- retval = 0;
- entries_filled = 0;
- entries_dropped = 0;
-
- i = 0;
- mtx_lock(&softc->ctl_lock);
- STAILQ_FOREACH(port, &softc->port_list, links) {
- struct ctl_port_entry *entry;
-
- if ((port->port_type & port_type) == 0)
- continue;
-
- if ((no_virtual != 0)
- && (port->virtual_port != 0))
- continue;
-
- if (entries_filled >= num_entries_alloced) {
- entries_dropped++;
- continue;
- }
- entry = &entries[i];
-
- entry->port_type = port->port_type;
- strlcpy(entry->port_name, port->port_name,
- sizeof(entry->port_name));
- entry->physical_port = port->physical_port;
- entry->virtual_port = port->virtual_port;
- entry->wwnn = port->wwnn;
- entry->wwpn = port->wwpn;
-
- i++;
- entries_filled++;
- }
-
- mtx_unlock(&softc->ctl_lock);
-
- if (entries_dropped > 0)
- retval = 1;
-
- *num_entries_dropped = entries_dropped;
- *num_entries_filled = entries_filled;
-
- return (retval);
-}
-
/*
* Remove an initiator by port number and initiator ID.
* Returns 0 for success, -1 for failure.
struct ctl_softc *softc;
union ctl_ha_msg msg_info;
struct ctl_lun *lun;
+ const struct ctl_cmd_entry *entry;
int retval = 0;
uint32_t targ_lun;
softc = control_softc;
targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
- lun = softc->ctl_luns[targ_lun];
- if (lun==NULL)
- {
+ if ((targ_lun < CTL_MAX_LUNS) &&
+ ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
+ /*
+ * If the LUN is invalid, pretend that it doesn't exist.
+ * It will go away as soon as all pending I/O has been
+ * completed.
+ */
+ mtx_lock(&lun->lun_lock);
+ if (lun->flags & CTL_LUN_DISABLED) {
+ mtx_unlock(&lun->lun_lock);
+ lun = NULL;
+ }
+ } else
+ lun = NULL;
+ if (lun == NULL) {
/*
* Why isn't LUN defined? The other side wouldn't
* send a cmd if the LUN is undefined.
*/
printf("%s: Bad JUJU!, LUN is NULL!\n", __func__);
- /* "Logical unit not supported" */
- ctl_set_sense_data(&msg_info.scsi.sense_data,
- lun,
- /*sense_format*/SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
- /*asc*/ 0x25,
- /*ascq*/ 0x00,
- SSD_ELEM_NONE);
-
- msg_info.scsi.sense_len = SSD_FULL_SIZE;
- msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
- msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ ctl_set_unsupported_lun(ctsio);
+ ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
msg_info.hdr.serializing_sc = NULL;
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
return(1);
+ }
+ entry = ctl_get_cmd_entry(ctsio, NULL);
+ if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
+ msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
+ msg_info.hdr.serializing_sc = NULL;
+ msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
+ return(1);
}
- mtx_lock(&lun->lun_lock);
- TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
+ ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun;
+
+ /*
+ * Every I/O goes into the OOA queue for a
+ * particular LUN, and stays there until completion.
+ */
+#ifdef CTL_TIME_IO
+ if (TAILQ_EMPTY(&lun->ooa_queue))
+ lun->idle_time += getsbinuptime() - lun->last_busy;
+#endif
+ TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
(union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
blocked_links);
+ mtx_unlock(&lun->lun_lock);
break;
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP:
if (softc->ha_mode == CTL_HA_MODE_XFER) {
ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
ctl_enqueue_rtr((union ctl_io *)ctsio);
+ mtx_unlock(&lun->lun_lock);
} else {
+ ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+ mtx_unlock(&lun->lun_lock);
/* send msg back to other side */
msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
msg_info.hdr.msg_type = CTL_MSG_R2R;
-#if 0
- printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc);
-#endif
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.hdr), M_WAITOK);
}
break;
case CTL_ACTION_OVERLAP:
- /* OVERLAPPED COMMANDS ATTEMPTED */
- ctl_set_sense_data(&msg_info.scsi.sense_data,
- lun,
- /*sense_format*/SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
- /*asc*/ 0x4E,
- /*ascq*/ 0x00,
- SSD_ELEM_NONE);
-
- msg_info.scsi.sense_len = SSD_FULL_SIZE;
- msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
- msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ mtx_unlock(&lun->lun_lock);
+ retval = 1;
+
+ ctl_set_overlapped_cmd(ctsio);
+ ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
msg_info.hdr.serializing_sc = NULL;
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-#if 0
- printf("BAD JUJU:Major Bummer Overlap\n");
-#endif
- TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
- retval = 1;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
break;
case CTL_ACTION_OVERLAP_TAG:
- /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
- ctl_set_sense_data(&msg_info.scsi.sense_data,
- lun,
- /*sense_format*/SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
- /*asc*/ 0x4D,
- /*ascq*/ ctsio->tag_num & 0xff,
- SSD_ELEM_NONE);
-
- msg_info.scsi.sense_len = SSD_FULL_SIZE;
- msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
- msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ mtx_unlock(&lun->lun_lock);
+ retval = 1;
+ ctl_set_overlapped_tag(ctsio, ctsio->tag_num);
+ ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
msg_info.hdr.serializing_sc = NULL;
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-#if 0
- printf("BAD JUJU:Major Bummer Overlap Tag\n");
-#endif
- TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
- retval = 1;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
break;
case CTL_ACTION_ERROR:
default:
- /* "Internal target failure" */
- ctl_set_sense_data(&msg_info.scsi.sense_data,
- lun,
- /*sense_format*/SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
- /*asc*/ 0x44,
- /*ascq*/ 0x00,
- SSD_ELEM_NONE);
-
- msg_info.scsi.sense_len = SSD_FULL_SIZE;
- msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
- msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
+ TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
+ mtx_unlock(&lun->lun_lock);
+ retval = 1;
+
+ ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+ ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
msg_info.hdr.serializing_sc = NULL;
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
-#if 0
- printf("BAD JUJU:Major Bummer HW Error\n");
-#endif
- TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
- retval = 1;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
break;
}
- mtx_unlock(&lun->lun_lock);
return (retval);
}
STAILQ_FOREACH(port, &softc->port_list, links) {
int action, done;
+ if (port->targ_port < softc->port_min ||
+ port->targ_port >= softc->port_max)
+ continue;
+
action = 0;
done = 0;
-
if ((entry->port_type == CTL_PORT_NONE)
&& (entry->targ_port == port->targ_port)) {
/*
break;
}
}
- if (action != 0) {
- /*
- * XXX KDM we have to drop the lock here,
- * because the online/offline operations
- * can potentially block. We need to
- * reference count the frontends so they
- * can't go away,
- */
+ if (action == 0)
+ continue;
+
+ /*
+ * XXX KDM we have to drop the lock here, because
+ * the online/offline operations can potentially
+ * block. We need to reference count the frontends
+ * so they can't go away,
+ */
+ if (cmd == CTL_ENABLE_PORT) {
+ mtx_unlock(&softc->ctl_lock);
+ ctl_port_online(port);
+ mtx_lock(&softc->ctl_lock);
+ } else if (cmd == CTL_DISABLE_PORT) {
mtx_unlock(&softc->ctl_lock);
-
- if (cmd == CTL_ENABLE_PORT) {
- ctl_port_online(port);
- } else if (cmd == CTL_DISABLE_PORT) {
- ctl_port_offline(port);
- }
-
+ ctl_port_offline(port);
mtx_lock(&softc->ctl_lock);
-
- if (cmd == CTL_SET_PORT_WWNS)
- ctl_port_set_wwns(port,
- (entry->flags & CTL_PORT_WWNN_VALID) ?
- 1 : 0, entry->wwnn,
- (entry->flags & CTL_PORT_WWPN_VALID) ?
- 1 : 0, entry->wwpn);
+ } else if (cmd == CTL_SET_PORT_WWNS) {
+ ctl_port_set_wwns(port,
+ (entry->flags & CTL_PORT_WWNN_VALID) ?
+ 1 : 0, entry->wwnn,
+ (entry->flags & CTL_PORT_WWPN_VALID) ?
+ 1 : 0, entry->wwpn);
}
if (done != 0)
break;
|| ((lun->flags & CTL_LUN_DISABLED) != 0))
continue;
- for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
+ for (j = 0; j < CTL_MAX_PORTS; j++) {
if (lun->pr_keys[j] == NULL)
continue;
for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
}
}
- retval = fe->ioctl(dev, cmd, addr, flag, td);
+ if (fe->ioctl)
+ retval = fe->ioctl(dev, cmd, addr, flag, td);
+ else
+ retval = ENODEV;
if (req->num_args > 0) {
ctl_copyout_args(req->num_args, req->kern_args);
struct ctl_port *port;
mtx_lock(&softc->ctl_lock);
- if (lm->port >= CTL_MAX_PORTS ||
+ if (lm->port < softc->port_min ||
+ lm->port >= softc->port_max ||
(port = softc->ctl_ports[lm->port]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return (ENXIO);
uint32_t
ctl_get_initindex(struct ctl_nexus *nexus)
-{
- if (nexus->targ_port < CTL_MAX_PORTS)
- return (nexus->initid +
- (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
- else
- return (nexus->initid +
- ((nexus->targ_port - CTL_MAX_PORTS) *
- CTL_MAX_INIT_PER_PORT));
-}
-
-uint32_t
-ctl_get_resindex(struct ctl_nexus *nexus)
{
return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
}
-uint32_t
-ctl_port_idx(int port_num)
-{
- if (port_num < CTL_MAX_PORTS)
- return(port_num);
- else
- return(port_num - CTL_MAX_PORTS);
-}
-
int
ctl_lun_map_init(struct ctl_port *port)
{
return (ENOMEM);
for (i = 0; i < CTL_MAX_LUNS; i++)
port->lun_map[i] = UINT32_MAX;
- if (port->status & CTL_PORT_STATUS_ONLINE &&
- port->lun_disable != NULL) {
- STAILQ_FOREACH(lun, &softc->lun_list, links)
- port->lun_disable(port->targ_lun_arg, lun->lun);
+ if (port->status & CTL_PORT_STATUS_ONLINE) {
+ if (port->lun_disable != NULL) {
+ STAILQ_FOREACH(lun, &softc->lun_list, links)
+ port->lun_disable(port->targ_lun_arg, lun->lun);
+ }
+ ctl_isc_announce_port(port);
}
return (0);
}
return (0);
free(port->lun_map, M_CTL);
port->lun_map = NULL;
- if (port->status & CTL_PORT_STATUS_ONLINE &&
- port->lun_enable != NULL) {
- STAILQ_FOREACH(lun, &softc->lun_list, links)
- port->lun_enable(port->targ_lun_arg, lun->lun);
+ if (port->status & CTL_PORT_STATUS_ONLINE) {
+ if (port->lun_enable != NULL) {
+ STAILQ_FOREACH(lun, &softc->lun_list, links)
+ port->lun_enable(port->targ_lun_arg, lun->lun);
+ }
+ ctl_isc_announce_port(port);
}
return (0);
}
}
old = port->lun_map[plun];
port->lun_map[plun] = glun;
- if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS &&
- port->lun_enable != NULL)
- port->lun_enable(port->targ_lun_arg, plun);
+ if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) {
+ if (port->lun_enable != NULL)
+ port->lun_enable(port->targ_lun_arg, plun);
+ ctl_isc_announce_port(port);
+ }
return (0);
}
return (0);
old = port->lun_map[plun];
port->lun_map[plun] = UINT32_MAX;
- if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS &&
- port->lun_disable != NULL)
- port->lun_disable(port->targ_lun_arg, plun);
+ if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) {
+ if (port->lun_disable != NULL)
+ port->lun_disable(port->targ_lun_arg, plun);
+ ctl_isc_announce_port(port);
+ }
return (0);
}
static struct ctl_port *
ctl_io_port(struct ctl_io_hdr *io_hdr)
{
- int port_num;
- port_num = io_hdr->nexus.targ_port;
- return (control_softc->ctl_ports[ctl_port_idx(port_num)]);
+ return (control_softc->ctl_ports[io_hdr->nexus.targ_port]);
}
-/*
- * Note: This only works for bitmask sizes that are at least 32 bits, and
- * that are a power of 2.
- */
int
-ctl_ffz(uint32_t *mask, uint32_t size)
+ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last)
{
- uint32_t num_chunks, num_pieces;
- int i, j;
-
- num_chunks = (size >> 5);
- if (num_chunks == 0)
- num_chunks++;
- num_pieces = MIN((sizeof(uint32_t) * 8), size);
+ int i;
- for (i = 0; i < num_chunks; i++) {
- for (j = 0; j < num_pieces; j++) {
- if ((mask[i] & (1 << j)) == 0)
- return ((i << 5) + j);
- }
+ for (i = first; i < last; i++) {
+ if ((mask[i / 32] & (1 << (i % 32))) == 0)
+ return (i);
}
-
return (-1);
}
}
lun_number = be_lun->req_lun_id;
} else {
- lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS);
+ lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS);
if (lun_number == -1) {
mtx_unlock(&ctl_softc->ctl_lock);
printf("ctl: can't allocate LUN, out of LUNs\n");
free(lun->lun_devid, M_CTL);
for (i = 0; i < CTL_MAX_PORTS; i++)
free(lun->pending_ua[i], M_CTL);
- for (i = 0; i < 2 * CTL_MAX_PORTS; i++)
+ for (i = 0; i < CTL_MAX_PORTS; i++)
free(lun->pr_keys[i], M_CTL);
free(lun->write_buffer, M_CTL);
if (lun->flags & CTL_LUN_MALLOCED)
}
mtx_unlock(&softc->ctl_lock);
+ ctl_isc_announce_lun(lun);
return (0);
}
}
mtx_unlock(&softc->ctl_lock);
+ ctl_isc_announce_lun(lun);
return (0);
}
return (0);
}
+int
+ctl_lun_primary(struct ctl_be_lun *be_lun)
+{
+ struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&lun->lun_lock);
+ lun->flags |= CTL_LUN_PRIMARY_SC;
+ mtx_unlock(&lun->lun_lock);
+ ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
+ ctl_isc_announce_lun(lun);
+ return (0);
+}
+
+int
+ctl_lun_secondary(struct ctl_be_lun *be_lun)
+{
+ struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
+
+ mtx_lock(&lun->lun_lock);
+ lun->flags &= ~CTL_LUN_PRIMARY_SC;
+ mtx_unlock(&lun->lun_lock);
+ ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
+ ctl_isc_announce_lun(lun);
+ return (0);
+}
+
int
ctl_invalidate_lun(struct ctl_be_lun *be_lun)
{
ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
{
struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
+ union ctl_ha_msg msg;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED);
mtx_unlock(&lun->lun_lock);
+ if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+ /* Send msg to other side. */
+ bzero(&msg.ua, sizeof(msg.ua));
+ msg.hdr.msg_type = CTL_MSG_UA;
+ msg.hdr.nexus.initid = -1;
+ msg.hdr.nexus.targ_port = -1;
+ msg.hdr.nexus.targ_lun = lun->lun;
+ msg.hdr.nexus.targ_mapped_lun = lun->lun;
+ msg.ua.ua_all = 1;
+ msg.ua.ua_set = 1;
+ msg.ua.ua_type = CTL_UA_CAPACITY_CHANGED;
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
+ M_WAITOK);
+ }
}
/*
io->io_hdr.port_status);
}
+ if (ctl_debug & CTL_DEBUG_CDB_DATA)
+ ctl_data_print(io);
if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ||
((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
(io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ||
*
* - Call some other function once the data is in?
*/
- if (ctl_debug & CTL_DEBUG_CDB_DATA)
- ctl_data_print(io);
/*
* XXX KDM call ctl_scsiio() again for now, and check flag
CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
- residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
switch (ctsio->cdb[0]) {
CTL_DEBUG_PRINT(("ctl_reserve\n"));
- residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
switch (ctsio->cdb[0]) {
&& ((cdb->how & SSS_START)==0)) {
uint32_t residx;
- residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
if (ctl_get_prkey(lun, residx) == 0
|| (lun->pr_res_idx!=residx && lun->res_type < 4)) {
{
struct scsi_maintenance_in *cdb;
int retval;
- int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os;
+ int alloc_len, ext, total_len = 0, g, pc, pg, gs, os;
int num_target_port_groups, num_target_ports;
struct ctl_lun *lun;
struct ctl_softc *softc;
total_len = sizeof(struct scsi_target_group_data);
total_len += sizeof(struct scsi_target_port_group_descriptor) *
num_target_port_groups +
- sizeof(struct scsi_target_port_descriptor) *
- num_target_ports * num_target_port_groups;
+ sizeof(struct scsi_target_port_descriptor) * num_target_ports;
alloc_len = scsi_4btoul(cdb->length);
}
mtx_lock(&softc->ctl_lock);
- pg = softc->port_offset / CTL_MAX_PORTS;
- if (softc->flags & CTL_FLAG_ACTIVE_SHELF) {
- if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) {
- gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
- os = TPG_ASYMMETRIC_ACCESS_STANDBY;
- } else if (lun->flags & CTL_LUN_PRIMARY_SC) {
- gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
- os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- } else {
- gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
- }
- } else {
+ pg = softc->port_min / softc->port_cnt;
+ if (softc->ha_link == CTL_HA_LINK_OFFLINE)
+ gs = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
+ else if (softc->ha_link == CTL_HA_LINK_UNKNOWN)
+ gs = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
+ else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY)
gs = TPG_ASYMMETRIC_ACCESS_STANDBY;
+ else
+ gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
+ if (lun->flags & CTL_LUN_PRIMARY_SC) {
+ os = gs;
+ gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
+ } else
os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
- }
for (g = 0; g < num_target_port_groups; g++) {
tpg_desc->pref_state = (g == pg) ? gs : os;
- tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP;
+ tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
+ TPG_U_SUP | TPG_T_SUP;
scsi_ulto2b(g + 1, tpg_desc->target_port_group);
tpg_desc->status = TPG_IMPLICIT;
pc = 0;
STAILQ_FOREACH(port, &softc->port_list, links) {
+ if (port->targ_port < g * softc->port_cnt ||
+ port->targ_port >= (g + 1) * softc->port_cnt)
+ continue;
if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
continue;
if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
continue;
- p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
- scsi_ulto2b(p, tpg_desc->descriptors[pc].
+ scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
relative_target_port_identifier);
pc++;
}
scsi_ulto4b(sizeof(struct scsi_per_res_key) *
lun->pr_key_count, res_keys->header.length);
- for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) {
if ((key = ctl_get_prkey(lun, i)) == 0)
continue;
scsi_ulto4b(lun->PRGeneration, res_status->header.generation);
res_desc = &res_status->desc[0];
- for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if ((key = ctl_get_prkey(lun, i)) == 0)
continue;
scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
res_desc->rel_trgt_port_id);
len = 0;
- port = softc->ctl_ports[
- ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)];
+ port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT];
if (port != NULL)
len = ctl_create_iid(port,
i % CTL_MAX_INIT_PER_PORT,
return (CTL_RETVAL_COMPLETE);
}
-static void
-ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua)
-{
- int off = lun->ctl_softc->persis_offset;
-
- if (residx >= off && residx < off + CTL_MAX_INITIATORS)
- ctl_est_ua(lun, residx - off, ua);
-}
-
/*
* Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
* it should return.
struct scsi_per_res_out_parms* param)
{
union ctl_ha_msg persis_io;
- int retval, i;
- int isc_retval;
-
- retval = 0;
+ int i;
mtx_lock(&lun->lun_lock);
if (sa_res_key == 0) {
* Unregister everybody else and build UA for
* them
*/
- for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for(i = 0; i < CTL_MAX_INITIATORS; i++) {
if (i == residx || ctl_get_prkey(lun, i) == 0)
continue;
ctl_clr_prkey(lun, i);
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
lun->pr_key_count = 1;
lun->res_type = type;
if (lun->res_type != SPR_TYPE_WR_EX_AR
&& lun->res_type != SPR_TYPE_EX_AC_AR)
lun->pr_res_idx = residx;
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
/* send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned "
- "from ctl_ha_msg_send %d\n",
- isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
} else {
/* not all registrants */
mtx_unlock(&lun->lun_lock);
return (1);
}
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if (ctl_get_prkey(lun, i) != sa_res_key)
continue;
found = 1;
ctl_clr_prkey(lun, i);
lun->pr_key_count--;
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
if (!found) {
mtx_unlock(&lun->lun_lock);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
+
/* send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
} else {
/* Reserved but not all registrants */
/* sa_res_key is res holder */
* except don't unregister the res holder.
*/
- for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for(i = 0; i < CTL_MAX_INITIATORS; i++) {
if (i == residx || ctl_get_prkey(lun, i) == 0)
continue;
if (sa_res_key == ctl_get_prkey(lun, i)) {
ctl_clr_prkey(lun, i);
lun->pr_key_count--;
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
} else if (type != lun->res_type
&& (lun->res_type == SPR_TYPE_WR_EX_RO
|| lun->res_type ==SPR_TYPE_EX_AC_RO)){
- ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE);
+ ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
}
}
lun->res_type = type;
lun->pr_res_idx = residx;
else
lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned "
- "from ctl_ha_msg_send %d\n",
- isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
} else {
/*
* sa_res_key is not the res holder just
*/
int found=0;
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if (sa_res_key != ctl_get_prkey(lun, i))
continue;
found = 1;
ctl_clr_prkey(lun, i);
lun->pr_key_count--;
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
if (!found) {
ctl_done((union ctl_io *)ctsio);
return (1);
}
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
+
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned "
- "from ctl_ha_msg_send %d\n",
- isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
}
}
-
- lun->PRGeneration++;
- mtx_unlock(&lun->lun_lock);
-
- return (retval);
+ return (0);
}
static void
* Unregister everybody else and build UA for
* them
*/
- for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for(i = 0; i < CTL_MAX_INITIATORS; i++) {
if (i == msg->pr.pr_info.residx ||
ctl_get_prkey(lun, i) == 0)
continue;
ctl_clr_prkey(lun, i);
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
lun->pr_key_count = 1;
&& lun->res_type != SPR_TYPE_EX_AC_AR)
lun->pr_res_idx = msg->pr.pr_info.residx;
} else {
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if (sa_res_key == ctl_get_prkey(lun, i))
continue;
ctl_clr_prkey(lun, i);
lun->pr_key_count--;
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
}
} else {
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if (i == msg->pr.pr_info.residx ||
ctl_get_prkey(lun, i) == 0)
continue;
if (sa_res_key == ctl_get_prkey(lun, i)) {
ctl_clr_prkey(lun, i);
lun->pr_key_count--;
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
} else if (msg->pr.pr_info.res_type != lun->res_type
&& (lun->res_type == SPR_TYPE_WR_EX_RO
|| lun->res_type == SPR_TYPE_EX_AC_RO)) {
- ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE);
+ ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
}
}
lun->res_type = msg->pr.pr_info.res_type;
ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
{
int retval;
- int isc_retval;
u_int32_t param_len;
struct scsi_per_res_out *cdb;
struct ctl_lun *lun;
param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
- residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
res_key = scsi_8btou64(param->res_key.key);
sa_res_key = scsi_8btou64(param->serv_act_res_key);
* RELEASED
*/
- for (i = 0; i < CTL_MAX_INITIATORS;i++){
- if (ctl_get_prkey(lun, i +
- softc->persis_offset) == 0)
+ for (i = softc->init_min; i < softc->init_max; i++){
+ if (ctl_get_prkey(lun, i) == 0)
continue;
ctl_est_ua(lun, i,
CTL_UA_RES_RELEASE);
lun->pr_res_idx = CTL_PR_NO_RESERVATION;
}
}
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
+
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
persis_io.pr.pr_info.residx = residx;
- if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0 )) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
} else /* sa_res_key != 0 */ {
/*
if (ctl_get_prkey(lun, residx) == 0)
lun->pr_key_count++;
ctl_set_prkey(lun, residx, sa_res_key);
+ lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
}
- lun->PRGeneration++;
- mtx_unlock(&lun->lun_lock);
break;
}
persis_io.pr.pr_info.action = CTL_PR_RESERVE;
persis_io.pr.pr_info.residx = lun->pr_res_idx;
persis_io.pr.pr_info.res_type = type;
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &persis_io, sizeof(persis_io), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
}
break;
*/
if (type != SPR_TYPE_EX_AC
&& type != SPR_TYPE_WR_EX) {
- for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- if (i == residx ||
- ctl_get_prkey(lun,
- i + softc->persis_offset) == 0)
+ for (i = softc->init_min; i < softc->init_max; i++) {
+ if (i == residx || ctl_get_prkey(lun, i) == 0)
continue;
ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
}
}
mtx_unlock(&lun->lun_lock);
+
/* Send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_RELEASE;
- if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io,
- sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
break;
case SPRO_CLEAR:
lun->pr_res_idx = CTL_PR_NO_RESERVATION;
ctl_clr_prkey(lun, residx);
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
+ for (i = 0; i < CTL_MAX_INITIATORS; i++)
if (ctl_get_prkey(lun, i) != 0) {
ctl_clr_prkey(lun, i);
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
lun->PRGeneration++;
mtx_unlock(&lun->lun_lock);
+
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_CLEAR;
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
- sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Persis Out error returned from "
- "ctl_ha_msg_send %d\n", isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
+ sizeof(persis_io.pr), M_WAITOK);
break;
case SPRO_PREEMPT:
* RELEASED
*/
- for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- if (ctl_get_prkey(lun, i +
- softc->persis_offset) == 0)
+ for (i = softc->init_min; i < softc->init_max; i++) {
+ if (ctl_get_prkey(lun, i) == 0)
continue;
ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
*/
if (lun->res_type != SPR_TYPE_EX_AC
&& lun->res_type != SPR_TYPE_WR_EX) {
- for (i = 0; i < CTL_MAX_INITIATORS; i++)
- if (ctl_get_prkey(lun, i + softc->persis_offset) != 0)
+ for (i = softc->init_min; i < softc->init_max; i++)
+ if (ctl_get_prkey(lun, i) != 0)
ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
}
lun->pr_key_count = 0;
lun->pr_res_idx = CTL_PR_NO_RESERVATION;
- for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
+ for (i=0; i < CTL_MAX_INITIATORS; i++) {
if (ctl_get_prkey(lun, i) == 0)
continue;
ctl_clr_prkey(lun, i);
- ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
+ ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
}
lun->PRGeneration++;
break;
return (CTL_RETVAL_COMPLETE);
}
-#ifdef notyet
-static int
-ctl_cmddt_inquiry(struct ctl_scsiio *ctsio)
-{
-
-}
-#endif
-
/*
* SCSI VPD page 0x00, the Supported VPD Pages page.
*/
softc = control_softc;
- port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
+ port = ctl_io_port(&ctsio->io_hdr);
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
data_len = sizeof(struct scsi_vpd_device_id) +
sizeof(struct scsi_vpd_id_trgt_port_grp_id);
if (lun && lun->lun_devid)
data_len += lun->lun_devid->len;
- if (port->port_devid)
+ if (port && port->port_devid)
data_len += port->port_devid->len;
- if (port->target_devid)
+ if (port && port->target_devid)
data_len += port->target_devid->len;
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
devid_ptr->page_code = SVPD_DEVICE_ID;
scsi_ulto2b(data_len - 4, devid_ptr->length);
- if (port->port_type == CTL_PORT_FC)
+ if (port && port->port_type == CTL_PORT_FC)
proto = SCSI_PROTO_FC << 4;
- else if (port->port_type == CTL_PORT_ISCSI)
+ else if (port && port->port_type == CTL_PORT_ISCSI)
proto = SCSI_PROTO_ISCSI << 4;
else
proto = SCSI_PROTO_SPI << 4;
/*
* This is for the WWPN which is a port association.
*/
- if (port->port_devid) {
+ if (port && port->port_devid) {
memcpy(desc, port->port_devid->data, port->port_devid->len);
desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
port->port_devid->len);
desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
SVPD_ID_TYPE_TPORTGRP;
desc->length = 4;
- scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1,
+ scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / softc->port_cnt + 1,
&desc->identifier[2]);
desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
sizeof(struct scsi_vpd_id_trgt_port_grp_id));
/*
* This is for the Target identifier
*/
- if (port->target_devid) {
+ if (port && port->target_devid) {
memcpy(desc, port->target_devid->data, port->target_devid->len);
}
struct scsi_vpd_port_designation_cont *pdc;
struct ctl_lun *lun;
struct ctl_port *port;
- int data_len, num_target_ports, iid_len, id_len, g, pg, p;
- int num_target_port_groups;
+ int data_len, num_target_ports, iid_len, id_len;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- if (softc->is_single)
- num_target_port_groups = 1;
- else
- num_target_port_groups = NUM_TARGET_PORT_GROUPS;
num_target_ports = 0;
iid_len = 0;
id_len = 0;
}
mtx_unlock(&softc->ctl_lock);
- data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups *
+ data_len = sizeof(struct scsi_vpd_scsi_ports) +
num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
pd = &sp->design[0];
mtx_lock(&softc->ctl_lock);
- pg = softc->port_offset / CTL_MAX_PORTS;
- for (g = 0; g < num_target_port_groups; g++) {
- STAILQ_FOREACH(port, &softc->port_list, links) {
- if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
- continue;
- if (lun != NULL &&
- ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
- continue;
- p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
- scsi_ulto2b(p, pd->relative_port_id);
- if (port->init_devid && g == pg) {
- iid_len = port->init_devid->len;
- memcpy(pd->initiator_transportid,
- port->init_devid->data, port->init_devid->len);
- } else
- iid_len = 0;
- scsi_ulto2b(iid_len, pd->initiator_transportid_length);
- pdc = (struct scsi_vpd_port_designation_cont *)
- (&pd->initiator_transportid[iid_len]);
- if (port->port_devid && g == pg) {
- id_len = port->port_devid->len;
- memcpy(pdc->target_port_descriptors,
- port->port_devid->data, port->port_devid->len);
- } else
- id_len = 0;
- scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
- pd = (struct scsi_vpd_port_designation *)
- ((uint8_t *)pdc->target_port_descriptors + id_len);
- }
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+ continue;
+ if (lun != NULL &&
+ ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
+ continue;
+ scsi_ulto2b(port->targ_port, pd->relative_port_id);
+ if (port->init_devid) {
+ iid_len = port->init_devid->len;
+ memcpy(pd->initiator_transportid,
+ port->init_devid->data, port->init_devid->len);
+ } else
+ iid_len = 0;
+ scsi_ulto2b(iid_len, pd->initiator_transportid_length);
+ pdc = (struct scsi_vpd_port_designation_cont *)
+ (&pd->initiator_transportid[iid_len]);
+ if (port->port_devid) {
+ id_len = port->port_devid->len;
+ memcpy(pdc->target_port_descriptors,
+ port->port_devid->data, port->port_devid->len);
+ } else
+ id_len = 0;
+ scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
+ pd = (struct scsi_vpd_port_designation *)
+ ((uint8_t *)pdc->target_port_descriptors + id_len);
}
mtx_unlock(&softc->ctl_lock);
struct scsi_inquiry_data *inq_ptr;
struct scsi_inquiry *cdb;
struct ctl_softc *softc;
+ struct ctl_port *port;
struct ctl_lun *lun;
char *val;
uint32_t alloc_len, data_len;
* We treat the ioctl front end, and any SCSI adapters, as packetized
* SCSI front ends.
*/
- port_type = softc->ctl_ports[
- ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type;
+ port = ctl_io_port(&ctsio->io_hdr);
+ if (port != NULL)
+ port_type = port->port_type;
+ else
+ port_type = CTL_PORT_SCSI;
if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
port_type = CTL_PORT_SCSI;
ctsio->kern_total_len = alloc_len;
}
- /*
- * If we have a LUN configured, report it as connected. Otherwise,
- * report that it is offline or no device is supported, depending
- * on the value of inquiry_pq_no_lun.
- *
- * According to the spec (SPC-4 r34), the peripheral qualifier
- * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario:
- *
- * "A peripheral device having the specified peripheral device type
- * is not connected to this logical unit. However, the device
- * server is capable of supporting the specified peripheral device
- * type on this logical unit."
- *
- * According to the same spec, the peripheral qualifier
- * SID_QUAL_BAD_LU (011b) is used in this scenario:
- *
- * "The device server is not capable of supporting a peripheral
- * device on this logical unit. For this peripheral qualifier the
- * peripheral device type shall be set to 1Fh. All other peripheral
- * device type values are reserved for this peripheral qualifier."
- *
- * Given the text, it would seem that we probably want to report that
- * the LUN is offline here. There is no LUN connected, but we can
- * support a LUN at the given LUN number.
- *
- * In the real world, though, it sounds like things are a little
- * different:
- *
- * - Linux, when presented with a LUN with the offline peripheral
- * qualifier, will create an sg driver instance for it. So when
- * you attach it to CTL, you wind up with a ton of sg driver
- * instances. (One for every LUN that Linux bothered to probe.)
- * Linux does this despite the fact that it issues a REPORT LUNs
- * to LUN 0 to get the inventory of supported LUNs.
- *
- * - There is other anecdotal evidence (from Emulex folks) about
- * arrays that use the offline peripheral qualifier for LUNs that
- * are on the "passive" path in an active/passive array.
- *
- * So the solution is provide a hopefully reasonable default
- * (return bad/no LUN) and allow the user to change the behavior
- * with a tunable/sysctl variable.
- */
- if (lun != NULL)
- inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
- lun->be_lun->lun_type;
- else if (softc->inquiry_pq_no_lun == 0)
- inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
- else
+ if (lun != NULL) {
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) ||
+ softc->ha_link >= CTL_HA_LINK_UNKNOWN) {
+ inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+ lun->be_lun->lun_type;
+ } else {
+ inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) |
+ lun->be_lun->lun_type;
+ }
+ } else
inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
/* RMB in byte 2 is 0 */
static int
ctl_check_blocked(struct ctl_lun *lun)
{
+ struct ctl_softc *softc = lun->ctl_softc;
union ctl_io *cur_blocked, *next_blocked;
mtx_assert(&lun->lun_lock, MA_OWNED);
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP: {
const struct ctl_cmd_entry *entry;
- int isc_retval;
/*
* The skip case shouldn't happen, this transaction
blocked_links);
cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
- if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){
+ if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
+ (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){
/*
* Need to send IO back to original side to
* run
*/
union ctl_ha_msg msg_info;
+ cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
msg_info.hdr.original_sc =
cur_blocked->io_hdr.original_sc;
msg_info.hdr.serializing_sc = cur_blocked;
msg_info.hdr.msg_type = CTL_MSG_R2R;
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- &msg_info, sizeof(msg_info), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:Check Blocked error from "
- "ctl_ha_msg_send %d\n",
- isc_retval);
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.hdr), M_NOWAIT);
break;
}
entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL);
mtx_assert(&lun->lun_lock, MA_OWNED);
/*
- * If this shelf is a secondary shelf controller, we have to reject
- * any media access commands.
+ * If this shelf is a secondary shelf controller, we may have to
+ * reject some commands disallowed by HA mode and link state.
*/
- if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 &&
- (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) {
- ctl_set_lun_standby(ctsio);
- retval = 1;
- goto bailout;
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
+ if (softc->ha_link == CTL_HA_LINK_OFFLINE &&
+ (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
+ ctl_set_lun_unavail(ctsio);
+ retval = 1;
+ goto bailout;
+ }
+ if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 &&
+ (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
+ ctl_set_lun_transit(ctsio);
+ retval = 1;
+ goto bailout;
+ }
+ if (softc->ha_mode == CTL_HA_MODE_ACT_STBY &&
+ (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) {
+ ctl_set_lun_standby(ctsio);
+ retval = 1;
+ goto bailout;
+ }
+
+ /* The rest of checks are only done on executing side */
+ if (softc->ha_mode == CTL_HA_MODE_XFER)
+ goto bailout;
}
if (entry->pattern & CTL_LUN_PAT_WRITE) {
* even on reserved LUNs, and if this initiator isn't the one who
* reserved us, reject the command with a reservation conflict.
*/
- residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
if ((lun->flags & CTL_LUN_RESERVED)
&& ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
if (lun->res_idx != residx) {
retval = 1;
goto bailout;
}
-
}
if ((lun->flags & CTL_LUN_OFFLINE)
- && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) {
+ && ((entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0)) {
ctl_set_lun_not_ready(ctsio);
retval = 1;
goto bailout;
}
- /*
- * If the LUN is stopped, see if this particular command is allowed
- * for a stopped lun. Otherwise, reject it with 0x04,0x02.
- */
if ((lun->flags & CTL_LUN_STOPPED)
&& ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) {
/* "Logical unit not ready, initializing cmd. required" */
bailout:
return (retval);
-
}
static void
ctl_done(io);
}
-#ifdef notyet
static void
-ctl_failover(void)
+ctl_failover_lun(struct ctl_lun *lun)
{
- struct ctl_lun *lun;
- struct ctl_softc *softc;
- union ctl_io *next_io, *pending_io;
- union ctl_io *io;
- int lun_idx;
-
- softc = control_softc;
-
- mtx_lock(&softc->ctl_lock);
- /*
- * Remove any cmds from the other SC from the rtr queue. These
- * will obviously only be for LUNs for which we're the primary.
- * We can't send status or get/send data for these commands.
- * Since they haven't been executed yet, we can just remove them.
- * We'll either abort them or delete them below, depending on
- * which HA mode we're in.
- */
-#ifdef notyet
- mtx_lock(&softc->queue_lock);
- for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue);
- io != NULL; io = next_io) {
- next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
- if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
- STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
- ctl_io_hdr, links);
- }
- mtx_unlock(&softc->queue_lock);
-#endif
-
- for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) {
- lun = softc->ctl_luns[lun_idx];
- if (lun==NULL)
- continue;
-
- /*
- * Processor LUNs are primary on both sides.
- * XXX will this always be true?
- */
- if (lun->be_lun->lun_type == T_PROCESSOR)
- continue;
-
- if ((lun->flags & CTL_LUN_PRIMARY_SC)
- && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
- printf("FAILOVER: primary lun %d\n", lun_idx);
- /*
- * Remove all commands from the other SC. First from the
- * blocked queue then from the ooa queue. Once we have
- * removed them. Call ctl_check_blocked to see if there
- * is anything that can run.
- */
- for (io = (union ctl_io *)TAILQ_FIRST(
- &lun->blocked_queue); io != NULL; io = next_io) {
-
- next_io = (union ctl_io *)TAILQ_NEXT(
- &io->io_hdr, blocked_links);
-
- if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
- TAILQ_REMOVE(&lun->blocked_queue,
- &io->io_hdr,blocked_links);
- io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
- TAILQ_REMOVE(&lun->ooa_queue,
- &io->io_hdr, ooa_links);
-
- ctl_free_io(io);
- }
- }
-
- for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
- io != NULL; io = next_io) {
-
- next_io = (union ctl_io *)TAILQ_NEXT(
- &io->io_hdr, ooa_links);
-
- if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
-
- TAILQ_REMOVE(&lun->ooa_queue,
- &io->io_hdr,
- ooa_links);
-
- ctl_free_io(io);
+ struct ctl_softc *softc = lun->ctl_softc;
+ struct ctl_io_hdr *io, *next_io;
+
+ CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", lun->lun));
+ if (softc->ha_mode == CTL_HA_MODE_XFER) {
+ TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
+ /* We are master */
+ if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+ if (io->flags & CTL_FLAG_IO_ACTIVE) {
+ io->flags |= CTL_FLAG_ABORT;
+ } else { /* This can be only due to DATAMOVE */
+ io->msg_type = CTL_MSG_DATAMOVE_DONE;
+ io->flags |= CTL_FLAG_IO_ACTIVE;
+ io->port_status = 31340;
+ ctl_enqueue_isc((union ctl_io *)io);
}
}
- ctl_check_blocked(lun);
- } else if ((lun->flags & CTL_LUN_PRIMARY_SC)
- && (softc->ha_mode == CTL_HA_MODE_XFER)) {
-
- printf("FAILOVER: primary lun %d\n", lun_idx);
- /*
- * Abort all commands from the other SC. We can't
- * send status back for them now. These should get
- * cleaned up when they are completed or come out
- * for a datamove operation.
- */
- for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
- io != NULL; io = next_io) {
- next_io = (union ctl_io *)TAILQ_NEXT(
- &io->io_hdr, ooa_links);
-
- if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
- io->io_hdr.flags |= CTL_FLAG_ABORT;
- }
- } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
- && (softc->ha_mode == CTL_HA_MODE_XFER)) {
-
- printf("FAILOVER: secondary lun %d\n", lun_idx);
-
- lun->flags |= CTL_LUN_PRIMARY_SC;
-
- /*
- * We send all I/O that was sent to this controller
- * and redirected to the other side back with
- * busy status, and have the initiator retry it.
- * Figuring out how much data has been transferred,
- * etc. and picking up where we left off would be
- * very tricky.
- *
- * XXX KDM need to remove I/O from the blocked
- * queue as well!
- */
- for (pending_io = (union ctl_io *)TAILQ_FIRST(
- &lun->ooa_queue); pending_io != NULL;
- pending_io = next_io) {
-
- next_io = (union ctl_io *)TAILQ_NEXT(
- &pending_io->io_hdr, ooa_links);
-
- pending_io->io_hdr.flags &=
- ~CTL_FLAG_SENT_2OTHER_SC;
-
- if (pending_io->io_hdr.flags &
- CTL_FLAG_IO_ACTIVE) {
- pending_io->io_hdr.flags |=
- CTL_FLAG_FAILOVER;
+ /* We are slave */
+ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
+ io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+ if (io->flags & CTL_FLAG_IO_ACTIVE) {
+ io->flags |= CTL_FLAG_FAILOVER;
} else {
- ctl_set_busy(&pending_io->scsiio);
- ctl_done(pending_io);
+ ctl_set_busy(&((union ctl_io *)io)->
+ scsiio);
+ ctl_done((union ctl_io *)io);
}
}
-
- ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
- } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
- && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
- printf("FAILOVER: secondary lun %d\n", lun_idx);
- /*
- * if the first io on the OOA is not on the RtR queue
- * add it.
- */
- lun->flags |= CTL_LUN_PRIMARY_SC;
-
- pending_io = (union ctl_io *)TAILQ_FIRST(
- &lun->ooa_queue);
- if (pending_io==NULL) {
- printf("Nothing on OOA queue\n");
- continue;
- }
-
- pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
- if ((pending_io->io_hdr.flags &
- CTL_FLAG_IS_WAS_ON_RTR) == 0) {
- pending_io->io_hdr.flags |=
- CTL_FLAG_IS_WAS_ON_RTR;
- ctl_enqueue_rtr(pending_io);
+ }
+ } else { /* SERIALIZE modes */
+ TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links,
+ next_io) {
+ /* We are master */
+ if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+ TAILQ_REMOVE(&lun->blocked_queue, io,
+ blocked_links);
+ io->flags &= ~CTL_FLAG_BLOCKED;
+ TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
+ ctl_free_io((union ctl_io *)io);
}
-#if 0
- else
- {
- printf("Tag 0x%04x is running\n",
- pending_io->scsiio.tag_num);
+ }
+ TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
+ /* We are master */
+ if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+ TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
+ ctl_free_io((union ctl_io *)io);
}
-#endif
-
- next_io = (union ctl_io *)TAILQ_NEXT(
- &pending_io->io_hdr, ooa_links);
- for (pending_io=next_io; pending_io != NULL;
- pending_io = next_io) {
- pending_io->io_hdr.flags &=
- ~CTL_FLAG_SENT_2OTHER_SC;
- next_io = (union ctl_io *)TAILQ_NEXT(
- &pending_io->io_hdr, ooa_links);
- if (pending_io->io_hdr.flags &
- CTL_FLAG_IS_WAS_ON_RTR) {
-#if 0
- printf("Tag 0x%04x is running\n",
- pending_io->scsiio.tag_num);
-#endif
- continue;
- }
-
- switch (ctl_check_ooa(lun, pending_io,
- (union ctl_io *)TAILQ_PREV(
- &pending_io->io_hdr, ctl_ooaq,
- ooa_links))) {
-
- case CTL_ACTION_BLOCK:
- TAILQ_INSERT_TAIL(&lun->blocked_queue,
- &pending_io->io_hdr,
- blocked_links);
- pending_io->io_hdr.flags |=
- CTL_FLAG_BLOCKED;
- break;
- case CTL_ACTION_PASS:
- case CTL_ACTION_SKIP:
- pending_io->io_hdr.flags |=
- CTL_FLAG_IS_WAS_ON_RTR;
- ctl_enqueue_rtr(pending_io);
- break;
- case CTL_ACTION_OVERLAP:
- ctl_set_overlapped_cmd(
- (struct ctl_scsiio *)pending_io);
- ctl_done(pending_io);
- break;
- case CTL_ACTION_OVERLAP_TAG:
- ctl_set_overlapped_tag(
- (struct ctl_scsiio *)pending_io,
- pending_io->scsiio.tag_num & 0xff);
- ctl_done(pending_io);
- break;
- case CTL_ACTION_ERROR:
- default:
- ctl_set_internal_failure(
- (struct ctl_scsiio *)pending_io,
- 0, // sks_valid
- 0); //retry count
- ctl_done(pending_io);
- break;
+ /* We are slave */
+ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
+ io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+ if (!(io->flags & CTL_FLAG_IO_ACTIVE)) {
+ ctl_set_busy(&((union ctl_io *)io)->
+ scsiio);
+ ctl_done((union ctl_io *)io);
}
}
-
- ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
- } else {
- panic("Unhandled HA mode failover, LUN flags = %#x, "
- "ha_mode = #%x", lun->flags, softc->ha_mode);
}
+ ctl_check_blocked(lun);
}
- ctl_pause_rtr = 0;
- mtx_unlock(&softc->ctl_lock);
}
-#endif
static int
ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
lun->be_lun;
- if (lun->be_lun->lun_type == T_PROCESSOR) {
- ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
- }
/*
* Every I/O goes into the OOA queue for a
* find it easily. Something similar will need be done on the other
* side so when we are done we can find the copy.
*/
- if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
+ (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0) {
union ctl_ha_msg msg_info;
int isc_retval;
ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
+ ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+ mtx_unlock(&lun->lun_lock);
msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
- msg_info.hdr.original_sc = (union ctl_io *)ctsio;
-#if 0
- printf("1. ctsio %p\n", ctsio);
-#endif
+ msg_info.hdr.original_sc = (union ctl_io *)ctsio;
msg_info.hdr.serializing_sc = NULL;
msg_info.hdr.nexus = ctsio->io_hdr.nexus;
msg_info.scsi.tag_num = ctsio->tag_num;
msg_info.scsi.tag_type = ctsio->tag_type;
+ msg_info.scsi.cdb_len = ctsio->cdb_len;
memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
- ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
-
- if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info, sizeof(msg_info), 0)) >
- CTL_HA_STATUS_SUCCESS) {
- printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
- isc_retval);
- printf("CTL:opcode is %x\n", ctsio->cdb[0]);
- } else {
-#if 0
- printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
-#endif
+ if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data),
+ M_WAITOK)) > CTL_HA_STATUS_SUCCESS) {
+ ctl_set_busy(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
}
-
- /*
- * XXX KDM this I/O is off the incoming queue, but hasn't
- * been inserted on any other queue. We may need to come
- * up with a holding queue while we wait for serialization
- * so that we have an idea of what we're waiting for from
- * the other side.
- */
- mtx_unlock(&lun->lun_lock);
return (retval);
}
if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
union ctl_ha_msg msg_info;
- io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
msg_info.hdr.nexus = io->io_hdr.nexus;
if (ua_type==CTL_UA_TARG_RESET)
msg_info.task.task_action = CTL_TASK_TARGET_RESET;
msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
- if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info, sizeof(msg_info), 0)) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.task), M_WAITOK);
}
retval = 0;
msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
- ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info, sizeof(msg_info), 0);
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.task), M_NOWAIT);
}
}
}
{
struct ctl_softc *softc = control_softc;
struct ctl_lun *lun;
- uint32_t initidx, residx;
+ uint32_t initidx;
initidx = ctl_get_initindex(&io->io_hdr.nexus);
- residx = ctl_get_resindex(&io->io_hdr.nexus);
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
mtx_lock(&lun->lun_lock);
#ifdef CTL_WITH_CA
ctl_clear_mask(lun->have_ca, initidx);
#endif
- if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx))
+ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx))
lun->flags &= ~CTL_LUN_RESERVED;
ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS);
mtx_unlock(&lun->lun_lock);
!(lun->flags & CTL_LUN_PRIMARY_SC)) {
union ctl_ha_msg msg_info;
- io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
msg_info.hdr.nexus = io->io_hdr.nexus;
msg_info.task.task_action = CTL_TASK_ABORT_TASK;
msg_info.task.tag_num = io->taskio.tag_num;
#if 0
printf("Sent Abort to other side\n");
#endif
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info, sizeof(msg_info), 0) !=
- CTL_HA_STATUS_SUCCESS) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.task), M_NOWAIT);
}
#if 0
printf("ctl_abort_task: found I/O to abort\n");
retval = 1;
break;
}
+ retval = ctl_lun_reset(lun, io, CTL_UA_LUN_RESET);
+ mtx_unlock(&softc->ctl_lock);
- if (!(io->io_hdr.flags &
- CTL_FLAG_FROM_OTHER_SC)) {
+ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
union ctl_ha_msg msg_info;
- io->io_hdr.flags |=
- CTL_FLAG_SENT_2OTHER_SC;
- msg_info.hdr.msg_type =
- CTL_MSG_MANAGE_TASKS;
+ msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.nexus = io->io_hdr.nexus;
- msg_info.task.task_action =
- CTL_TASK_LUN_RESET;
+ msg_info.task.task_action = CTL_TASK_LUN_RESET;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
- if (CTL_HA_STATUS_SUCCESS !=
- ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info,
- sizeof(msg_info), 0)) {
- }
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.task), M_WAITOK);
}
-
- retval = ctl_lun_reset(lun, io,
- CTL_UA_LUN_RESET);
- mtx_unlock(&softc->ctl_lock);
break;
}
case CTL_TASK_TARGET_RESET:
free_io = 0;
io->scsiio.be_move_done(io);
break;
+ case CTL_MSG_FAILOVER:
+ mtx_lock(&lun->lun_lock);
+ ctl_failover_lun(lun);
+ mtx_unlock(&lun->lun_lock);
+ free_io = 1;
+ break;
default:
free_io = 1;
printf("%s: Invalid message type %d\n",
#ifdef CTL_IO_DELAY
if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
- struct ctl_lun *lun;
-
- lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
} else {
struct ctl_lun *lun;
*/
if (io->scsiio.kern_sg_entries == 0) {
msg.dt.kern_sg_entries = 1;
- /*
- * If this is in cached memory, flush the cache
- * before we send the DMA request to the other
- * controller. We want to do this in either the
- * read or the write case. The read case is
- * straightforward. In the write case, we want to
- * make sure nothing is in the local cache that
- * could overwrite the DMAed data.
- */
- if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
- /*
- * XXX KDM use bus_dmamap_sync() here.
- */
- }
-
+#if 0
/*
* Convert to a physical address if this is a
* virtual address.
/*
* XXX KDM use busdma here!
*/
-#if 0
msg.dt.sg_list[0].addr = (void *)
vtophys(io->scsiio.kern_data_ptr);
-#endif
}
+#else
+ KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
+ ("HA does not support BUS_ADDR"));
+ msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
+#endif
msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
do_sg_copy = 0;
} else {
- struct ctl_sg_entry *sgl;
-
- do_sg_copy = 1;
msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
- sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
- if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
- /*
- * XXX KDM use bus_dmamap_sync() here.
- */
- }
+ do_sg_copy = 1;
}
msg.dt.kern_data_len = io->scsiio.kern_data_len;
for (i = sg_entries_sent, j = 0;
i < msg.dt.cur_sg_entries; i++, j++) {
- if ((io->io_hdr.flags &
- CTL_FLAG_NO_DATASYNC) == 0) {
- /*
- * XXX KDM use bus_dmamap_sync()
- */
- }
+#if 0
if ((io->io_hdr.flags &
CTL_FLAG_BUS_ADDR) == 0) {
/*
* XXX KDM use busdma.
*/
-#if 0
msg.dt.sg_list[j].addr =(void *)
vtophys(sgl[i].addr);
-#endif
} else {
msg.dt.sg_list[j].addr =
sgl[i].addr;
}
+#else
+ KASSERT((io->io_hdr.flags &
+ CTL_FLAG_BUS_ADDR) == 0,
+ ("HA does not support BUS_ADDR"));
+ msg.dt.sg_list[j].addr = sgl[i].addr;
+#endif
msg.dt.sg_list[j].len = sgl[i].len;
}
}
else
msg.dt.sg_last = 0;
- /*
- * XXX KDM drop and reacquire the lock here?
- */
if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
- sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
- /*
- * XXX do something here.
- */
+ sizeof(msg.dt) - sizeof(msg.dt.sg_list) +
+ sizeof(struct ctl_sg_entry)*msg.dt.cur_sg_entries,
+ M_WAITOK) > CTL_HA_STATUS_SUCCESS) {
+ io->io_hdr.port_status = 31341;
+ io->scsiio.be_move_done(io);
+ return;
}
msg.dt.sent_sg_entries = sg_entries_sent;
}
io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
- if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
- ctl_failover_io(io, /*have_lock*/ 0);
-
} else {
/*
* Lookup the fe_datamove() function for this particular
* front end.
*/
- fe_datamove =
- control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
+ fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
fe_datamove(io);
}
ctl_send_datamove_done(union ctl_io *io, int have_lock)
{
union ctl_ha_msg msg;
- int isc_status;
memset(&msg, 0, sizeof(msg));
msg.scsi.tag_type = io->scsiio.tag_type;
msg.scsi.scsi_status = io->scsiio.scsi_status;
memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
- sizeof(io->scsiio.sense_data));
+ io->scsiio.sense_len);
msg.scsi.sense_len = io->scsiio.sense_len;
msg.scsi.sense_residual = io->scsiio.sense_residual;
msg.scsi.fetd_status = io->io_hdr.port_status;
return;
}
- isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0);
- if (isc_status > CTL_HA_STATUS_SUCCESS) {
- /* XXX do something if this fails */
- }
-
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+ sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
+ msg.scsi.sense_len, M_WAITOK);
}
/*
ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
{
union ctl_io *io;
+ int i;
io = rq->context;
ctl_dt_req_free(rq);
- /*
- * In this case, we had to malloc the memory locally. Free it.
- */
- if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
- int i;
- for (i = 0; i < io->scsiio.kern_sg_entries; i++)
- free(io->io_hdr.local_sglist[i].addr, M_CTL);
- }
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+ free(io->io_hdr.local_sglist[i].addr, M_CTL);
+ free(io->io_hdr.remote_sglist, M_CTL);
+ io->io_hdr.remote_sglist = NULL;
+ io->io_hdr.local_sglist = NULL;
+
/*
* The data is in local and remote memory, so now we need to send
* status (good or back) back to the other side.
*/
io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
- fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
+ fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
fe_datamove(io);
char path_str[64];
struct sbuf sb;
#endif
+ int i;
- /*
- * In this case, we had to malloc the memory locally. Free it.
- */
- if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
- int i;
- for (i = 0; i < io->scsiio.kern_sg_entries; i++)
- free(io->io_hdr.local_sglist[i].addr, M_CTL);
- }
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+ free(io->io_hdr.local_sglist[i].addr, M_CTL);
+ free(io->io_hdr.remote_sglist, M_CTL);
+ io->io_hdr.remote_sglist = NULL;
+ io->io_hdr.local_sglist = NULL;
#if 0
scsi_path_string(io, path_str, sizeof(path_str));
io = rq->context;
if (rq->ret != CTL_HA_STATUS_SUCCESS) {
- printf("%s: ISC DMA read failed with error %d", __func__,
+ printf("%s: ISC DMA read failed with error %d\n", __func__,
rq->ret);
ctl_set_internal_failure(&io->scsiio,
/*sks_valid*/ 1,
/* XXX KDM add checks like the ones in ctl_datamove? */
- fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
+ fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
fe_datamove(io);
}
ctl_datamove_remote_sgl_setup(union ctl_io *io)
{
struct ctl_sg_entry *local_sglist, *remote_sglist;
- struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist;
struct ctl_softc *softc;
+ uint32_t len_to_go;
int retval;
int i;
retval = 0;
softc = control_softc;
-
local_sglist = io->io_hdr.local_sglist;
- local_dma_sglist = io->io_hdr.local_dma_sglist;
remote_sglist = io->io_hdr.remote_sglist;
- remote_dma_sglist = io->io_hdr.remote_dma_sglist;
-
- if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) {
- for (i = 0; i < io->scsiio.kern_sg_entries; i++) {
- local_sglist[i].len = remote_sglist[i].len;
-
- /*
- * XXX Detect the situation where the RS-level I/O
- * redirector on the other side has already read the
- * data off of the AOR RS on this side, and
- * transferred it to remote (mirror) memory on the
- * other side. Since we already have the data in
- * memory here, we just need to use it.
- *
- * XXX KDM this can probably be removed once we
- * get the cache device code in and take the
- * current AOR implementation out.
- */
-#ifdef NEEDTOPORT
- if ((remote_sglist[i].addr >=
- (void *)vtophys(softc->mirr->addr))
- && (remote_sglist[i].addr <
- ((void *)vtophys(softc->mirr->addr) +
- CacheMirrorOffset))) {
- local_sglist[i].addr = remote_sglist[i].addr -
- CacheMirrorOffset;
- if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
- CTL_FLAG_DATA_IN)
- io->io_hdr.flags |= CTL_FLAG_REDIR_DONE;
- } else {
- local_sglist[i].addr = remote_sglist[i].addr +
- CacheMirrorOffset;
- }
-#endif
-#if 0
- printf("%s: local %p, remote %p, len %d\n",
- __func__, local_sglist[i].addr,
- remote_sglist[i].addr, local_sglist[i].len);
-#endif
- }
- } else {
- uint32_t len_to_go;
-
- /*
- * In this case, we don't have automatically allocated
- * memory for this I/O on this controller. This typically
- * happens with internal CTL I/O -- e.g. inquiry, mode
- * sense, etc. Anything coming from RAIDCore will have
- * a mirror area available.
- */
- len_to_go = io->scsiio.kern_data_len;
-
- /*
- * Clear the no datasync flag, we have to use malloced
- * buffers.
- */
- io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC;
-
- /*
- * The difficult thing here is that the size of the various
- * S/G segments may be different than the size from the
- * remote controller. That'll make it harder when DMAing
- * the data back to the other side.
- */
- for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) /
- sizeof(io->io_hdr.remote_sglist[0])) &&
- (len_to_go > 0); i++) {
- local_sglist[i].len = MIN(len_to_go, 131072);
- CTL_SIZE_8B(local_dma_sglist[i].len,
- local_sglist[i].len);
- local_sglist[i].addr =
- malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK);
-
- local_dma_sglist[i].addr = local_sglist[i].addr;
-
- if (local_sglist[i].addr == NULL) {
- int j;
+ len_to_go = io->scsiio.kern_data_len;
- printf("malloc failed for %zd bytes!",
- local_dma_sglist[i].len);
- for (j = 0; j < i; j++) {
- free(local_sglist[j].addr, M_CTL);
- }
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 1,
- /*retry_count*/ 4857);
- retval = 1;
- goto bailout_error;
-
- }
- /* XXX KDM do we need a sync here? */
+ /*
+ * The difficult thing here is that the size of the various
+ * S/G segments may be different than the size from the
+ * remote controller. That'll make it harder when DMAing
+ * the data back to the other side.
+ */
+ for (i = 0; len_to_go > 0; i++) {
+ local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT);
+ local_sglist[i].addr =
+ malloc(local_sglist[i].len, M_CTL, M_WAITOK);
- len_to_go -= local_sglist[i].len;
- }
- /*
- * Reset the number of S/G entries accordingly. The
- * original number of S/G entries is available in
- * rem_sg_entries.
- */
- io->scsiio.kern_sg_entries = i;
+ len_to_go -= local_sglist[i].len;
+ }
+ /*
+ * Reset the number of S/G entries accordingly. The original
+ * number of S/G entries is available in rem_sg_entries.
+ */
+ io->scsiio.kern_sg_entries = i;
#if 0
- printf("%s: kern_sg_entries = %d\n", __func__,
- io->scsiio.kern_sg_entries);
- for (i = 0; i < io->scsiio.kern_sg_entries; i++)
- printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i,
- local_sglist[i].addr, local_sglist[i].len,
- local_dma_sglist[i].len);
+ printf("%s: kern_sg_entries = %d\n", __func__,
+ io->scsiio.kern_sg_entries);
+ for (i = 0; i < io->scsiio.kern_sg_entries; i++)
+ printf("%s: sg[%d] = %p, %d\n", __func__, i,
+ local_sglist[i].addr, local_sglist[i].len);
#endif
- }
-
-
- return (retval);
-
-bailout_error:
-
- ctl_send_datamove_done(io, /*have_lock*/ 0);
return (retval);
}
{
struct ctl_ha_dt_req *rq;
struct ctl_sg_entry *remote_sglist, *local_sglist;
- struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist;
uint32_t local_used, remote_used, total_used;
- int retval;
- int i, j;
-
- retval = 0;
+ int i, j, isc_ret;
rq = ctl_dt_req_alloc();
ctl_send_datamove_done(io, /*have_lock*/ 0);
- retval = 1;
-
- goto bailout;
+ return (1);
}
local_sglist = io->io_hdr.local_sglist;
- local_dma_sglist = io->io_hdr.local_dma_sglist;
remote_sglist = io->io_hdr.remote_sglist;
- remote_dma_sglist = io->io_hdr.remote_dma_sglist;
local_used = 0;
remote_used = 0;
total_used = 0;
- if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) {
- rq->ret = CTL_HA_STATUS_SUCCESS;
- rq->context = io;
- callback(rq);
- goto bailout;
- }
-
/*
* Pull/push the data over the wire from/to the other controller.
* This takes into account the possibility that the local and
* both the local and remote sglists is identical. Otherwise, we've
* essentially got a coding error of some sort.
*/
+ isc_ret = CTL_HA_STATUS_SUCCESS;
for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
- int isc_ret;
- uint32_t cur_len, dma_length;
+ uint32_t cur_len;
uint8_t *tmp_ptr;
- rq->id = CTL_HA_DATA_CTL;
rq->command = command;
rq->context = io;
*/
cur_len = MIN(local_sglist[i].len - local_used,
remote_sglist[j].len - remote_used);
-
- /*
- * In this case, we have a size issue and need to decrease
- * the size, except in the case where we actually have less
- * than 8 bytes left. In that case, we need to increase
- * the DMA length to get the last bit.
- */
- if ((cur_len & 0x7) != 0) {
- if (cur_len > 0x7) {
- cur_len = cur_len - (cur_len & 0x7);
- dma_length = cur_len;
- } else {
- CTL_SIZE_8B(dma_length, cur_len);
- }
-
- } else
- dma_length = cur_len;
-
- /*
- * If we had to allocate memory for this I/O, instead of using
- * the non-cached mirror memory, we'll need to flush the cache
- * before trying to DMA to the other controller.
- *
- * We could end up doing this multiple times for the same
- * segment if we have a larger local segment than remote
- * segment. That shouldn't be an issue.
- */
- if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
- /*
- * XXX KDM use bus_dmamap_sync() here.
- */
- }
-
- rq->size = dma_length;
+ rq->size = cur_len;
tmp_ptr = (uint8_t *)local_sglist[i].addr;
tmp_ptr += local_used;
+#if 0
/* Use physical addresses when talking to ISC hardware */
if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
/* XXX KDM use busdma */
-#if 0
rq->local = vtophys(tmp_ptr);
-#endif
} else
rq->local = tmp_ptr;
+#else
+ KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
+ ("HA does not support BUS_ADDR"));
+ rq->local = tmp_ptr;
+#endif
tmp_ptr = (uint8_t *)remote_sglist[j].addr;
tmp_ptr += remote_used;
if (total_used >= io->scsiio.kern_data_len)
rq->callback = callback;
- if ((rq->size & 0x7) != 0) {
- printf("%s: warning: size %d is not on 8b boundary\n",
- __func__, rq->size);
- }
- if (((uintptr_t)rq->local & 0x7) != 0) {
- printf("%s: warning: local %p not on 8b boundary\n",
- __func__, rq->local);
- }
- if (((uintptr_t)rq->remote & 0x7) != 0) {
- printf("%s: warning: remote %p not on 8b boundary\n",
- __func__, rq->local);
- }
#if 0
printf("%s: %s: local %#x remote %#x size %d\n", __func__,
(command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
#endif
isc_ret = ctl_dt_single(rq);
- if (isc_ret == CTL_HA_STATUS_WAIT)
- continue;
-
- if (isc_ret == CTL_HA_STATUS_DISCONNECT) {
- rq->ret = CTL_HA_STATUS_SUCCESS;
- } else {
- rq->ret = isc_ret;
- }
+ if (isc_ret > CTL_HA_STATUS_SUCCESS)
+ break;
+ }
+ if (isc_ret != CTL_HA_STATUS_WAIT) {
+ rq->ret = isc_ret;
callback(rq);
- goto bailout;
}
-bailout:
- return (retval);
-
+ return (0);
}
static void
retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
ctl_datamove_remote_read_cb);
- if ((retval != 0)
- && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) {
+ if (retval != 0) {
/*
* Make sure we free memory if there was an error.. The
* ctl_datamove_remote_xfer() function will send the
*/
for (i = 0; i < io->scsiio.kern_sg_entries; i++)
free(io->io_hdr.local_sglist[i].addr, M_CTL);
+ free(io->io_hdr.remote_sglist, M_CTL);
+ io->io_hdr.remote_sglist = NULL;
+ io->io_hdr.local_sglist = NULL;
}
return;
static void
ctl_datamove_remote(union ctl_io *io)
{
- struct ctl_softc *softc;
- softc = control_softc;
+ mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
- mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
+ if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
+ ctl_failover_io(io, /*have_lock*/ 0);
+ return;
+ }
/*
* Note that we look for an aborted I/O here, but don't do some of
return;
}
- if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
ctl_datamove_remote_write(io);
- } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
+ else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
ctl_datamove_remote_read(io);
- } else {
- union ctl_ha_msg msg;
- struct scsi_sense_data *sense;
- uint8_t sks[3];
- int retry_count;
-
- memset(&msg, 0, sizeof(msg));
-
- msg.hdr.msg_type = CTL_MSG_BAD_JUJU;
- msg.hdr.status = CTL_SCSI_ERROR;
- msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
-
- retry_count = 4243;
-
- sense = &msg.scsi.sense_data;
- sks[0] = SSD_SCS_VALID;
- sks[1] = (retry_count >> 8) & 0xff;
- sks[2] = retry_count & 0xff;
-
- /* "Internal target failure" */
- scsi_set_sense_data(sense,
- /*sense_format*/ SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
- /*asc*/ 0x44,
- /*ascq*/ 0x00,
- /*type*/ SSD_ELEM_SKS,
- /*size*/ sizeof(sks),
- /*data*/ sks,
- SSD_ELEM_NONE);
-
- io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
- if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
- ctl_failover_io(io, /*have_lock*/ 1);
- return;
- }
-
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
- CTL_HA_STATUS_SUCCESS) {
- /* XXX KDM what to do if this fails? */
- }
- return;
+ else {
+ io->io_hdr.port_status = 31339;
+ ctl_send_datamove_done(io, /*have_lock*/ 0);
}
-
}
static int
struct ctl_lun *lun;
struct ctl_softc *softc = control_softc;
void (*fe_done)(union ctl_io *io);
- uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port);
+ union ctl_ha_msg msg;
+ uint32_t targ_port = io->io_hdr.nexus.targ_port;
CTL_DEBUG_PRINT(("ctl_process_done\n"));
- fe_done = softc->ctl_ports[targ_port]->fe_done;
+ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0)
+ fe_done = softc->ctl_ports[targ_port]->fe_done;
+ else
+ fe_done = NULL;
#ifdef CTL_TIME_IO
if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
* Tell the FETD or the other shelf controller we're done with this
* command. Note that only SCSI commands get to this point. Task
* management commands are completed above.
- *
- * We only send status to the other controller if we're in XFER
- * mode. In SER_ONLY mode, the I/O is done on the controller that
- * received the I/O (from CTL's perspective), and so the status is
- * generated there.
- *
- * XXX KDM if we hold the lock here, we could cause a deadlock
- * if the frontend comes back in in this context to queue
- * something.
*/
+ if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
+ (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) {
+ memset(&msg, 0, sizeof(msg));
+ msg.hdr.msg_type = CTL_MSG_FINISH_IO;
+ msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
+ msg.hdr.nexus = io->io_hdr.nexus;
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+ sizeof(msg.scsi) - sizeof(msg.scsi.sense_data),
+ M_WAITOK);
+ }
if ((softc->ha_mode == CTL_HA_MODE_XFER)
&& (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
- union ctl_ha_msg msg;
-
memset(&msg, 0, sizeof(msg));
msg.hdr.msg_type = CTL_MSG_FINISH_IO;
msg.hdr.original_sc = io->io_hdr.original_sc;
msg.scsi.sense_residual = io->scsiio.sense_residual;
msg.scsi.residual = io->scsiio.residual;
memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
- sizeof(io->scsiio.sense_data));
+ io->scsiio.sense_len);
/*
* We copy this whether or not this is an I/O-related
* command. Otherwise, we'd have to go and check to see
&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
sizeof(msg.scsi.lbalen));
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
- sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
- /* XXX do something here */
- }
-
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+ sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
+ msg.scsi.sense_len, M_WAITOK);
ctl_free_io(io);
} else
fe_done(io);
if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
return;
- /*
- * We need to send a msg to the serializing shelf to finish the IO
- * as well. We don't send a finish message to the other shelf if
- * this is a task management command. Task management commands
- * aren't serialized in the OOA queue, but rather just executed on
- * both shelf controllers for commands that originated on that
- * controller.
- */
- if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)
- && (io->io_hdr.io_type != CTL_IO_TASK)) {
- union ctl_ha_msg msg_io;
-
- msg_io.hdr.msg_type = CTL_MSG_FINISH_IO;
- msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc;
- if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io,
- sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) {
- }
- /* continue on to finish IO */
- }
#ifdef CTL_IO_DELAY
if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
struct ctl_lun *lun;
ctl_enqueue_done(io);
}
-int
-ctl_isc(struct ctl_scsiio *ctsio)
-{
- struct ctl_lun *lun;
- int retval;
-
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
-
- CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0]));
-
- CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n"));
-
- retval = lun->backend->data_submit((union ctl_io *)ctsio);
-
- return (retval);
-}
-
-
static void
ctl_work_thread(void *arg)
{
ctl_scsiio_precheck(softc, &io->scsiio);
continue;
}
- if (!ctl_pause_rtr) {
- io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
- if (io != NULL) {
- STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
- mtx_unlock(&thr->queue_lock);
- retval = ctl_scsiio(&io->scsiio);
- if (retval != CTL_RETVAL_COMPLETE)
- CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
- continue;
- }
+ io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
+ if (io != NULL) {
+ STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
+ mtx_unlock(&thr->queue_lock);
+ retval = ctl_scsiio(&io->scsiio);
+ if (retval != CTL_RETVAL_COMPLETE)
+ CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
+ continue;
}
/* Sleep until we have something to do. */
struct scsi_da_rw_recovery_page *rwpage;
struct ctl_logical_block_provisioning_page *page;
const char *attr;
+ union ctl_ha_msg msg;
uint64_t thres, val;
- int i, e;
+ int i, e, set;
CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
(lun->flags & CTL_LUN_OFFLINE) ||
lun->backend->lun_attr == NULL)
continue;
+ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
+ softc->ha_mode == CTL_HA_MODE_XFER)
+ continue;
rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT];
if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0)
continue;
time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) {
lun->lasttpt = time_uptime;
ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
- }
+ set = 1;
+ } else
+ set = 0;
} else {
lun->lasttpt = 0;
ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
+ set = -1;
}
mtx_unlock(&lun->lun_lock);
+ if (set != 0 &&
+ lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
+ /* Send msg to other side. */
+ bzero(&msg.ua, sizeof(msg.ua));
+ msg.hdr.msg_type = CTL_MSG_UA;
+ msg.hdr.nexus.initid = -1;
+ msg.hdr.nexus.targ_port = -1;
+ msg.hdr.nexus.targ_lun = lun->lun;
+ msg.hdr.nexus.targ_mapped_lun = lun->lun;
+ msg.ua.ua_all = 1;
+ msg.ua.ua_set = (set > 0);
+ msg.ua.ua_type = CTL_UA_THIN_PROV_THRES;
+ mtx_unlock(&softc->ctl_lock); // XXX
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
+ sizeof(msg.ua), M_WAITOK);
+ mtx_lock(&softc->ctl_lock);
+ }
}
mtx_unlock(&softc->ctl_lock);
pause("-", CTL_LBP_PERIOD * hz);
wakeup(thr);
}
-#ifdef notyet
static void
ctl_enqueue_isc(union ctl_io *io)
{
wakeup(thr);
}
-/* Initialization and failover */
-
-void
-ctl_init_isc_msg(void)
-{
- printf("CTL: Still calling this thing\n");
-}
-
-/*
- * Init component
- * Initializes component into configuration defined by bootMode
- * (see hasc-sv.c)
- * returns hasc_Status:
- * OK
- * ERROR - fatal error
- */
-static ctl_ha_comp_status
-ctl_isc_init(struct ctl_ha_component *c)
-{
- ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
-
- c->status = ret;
- return ret;
-}
-
-/* Start component
- * Starts component in state requested. If component starts successfully,
- * it must set its own state to the requestrd state
- * When requested state is HASC_STATE_HA, the component may refine it
- * by adding _SLAVE or _MASTER flags.
- * Currently allowed state transitions are:
- * UNKNOWN->HA - initial startup
- * UNKNOWN->SINGLE - initial startup when no parter detected
- * HA->SINGLE - failover
- * returns ctl_ha_comp_status:
- * OK - component successfully started in requested state
- * FAILED - could not start the requested state, failover may
- * be possible
- * ERROR - fatal error detected, no future startup possible
- */
-static ctl_ha_comp_status
-ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state)
-{
- ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
-
- printf("%s: go\n", __func__);
-
- // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap)
- if (c->state == CTL_HA_STATE_UNKNOWN ) {
- control_softc->is_single = 0;
- if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
- != CTL_HA_STATUS_SUCCESS) {
- printf("ctl_isc_start: ctl_ha_msg_create failed.\n");
- ret = CTL_HA_COMP_STATUS_ERROR;
- }
- } else if (CTL_HA_STATE_IS_HA(c->state)
- && CTL_HA_STATE_IS_SINGLE(state)){
- // HA->SINGLE transition
- ctl_failover();
- control_softc->is_single = 1;
- } else {
- printf("ctl_isc_start:Invalid state transition %X->%X\n",
- c->state, state);
- ret = CTL_HA_COMP_STATUS_ERROR;
- }
- if (CTL_HA_STATE_IS_SINGLE(state))
- control_softc->is_single = 1;
-
- c->state = state;
- c->status = ret;
- return ret;
-}
-
-/*
- * Quiesce component
- * The component must clear any error conditions (set status to OK) and
- * prepare itself to another Start call
- * returns ctl_ha_comp_status:
- * OK
- * ERROR
- */
-static ctl_ha_comp_status
-ctl_isc_quiesce(struct ctl_ha_component *c)
-{
- int ret = CTL_HA_COMP_STATUS_OK;
-
- ctl_pause_rtr = 1;
- c->status = ret;
- return ret;
-}
-
-struct ctl_ha_component ctl_ha_component_ctlisc =
-{
- .name = "CTL ISC",
- .state = CTL_HA_STATE_UNKNOWN,
- .init = ctl_isc_init,
- .start = ctl_isc_start,
- .quiesce = ctl_isc_quiesce
-};
-#endif
-
/*
* vim: ts=8
*/
--- /dev/null
+/*-
+ * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/types.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/proc.h>
+#include <sys/conf.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/uio.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <vm/uma.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_error.h>
+
+#if (__FreeBSD_version < 1100000)
+struct mbufq {
+ struct mbuf *head;
+ struct mbuf *tail;
+};
+
+static void
+mbufq_init(struct mbufq *q, int limit)
+{
+
+ q->head = q->tail = NULL;
+}
+
+static void
+mbufq_drain(struct mbufq *q)
+{
+ struct mbuf *m;
+
+ while ((m = q->head) != NULL) {
+ q->head = m->m_nextpkt;
+ m_freem(m);
+ }
+ q->tail = NULL;
+}
+
+static struct mbuf *
+mbufq_dequeue(struct mbufq *q)
+{
+ struct mbuf *m;
+
+ m = q->head;
+ if (m) {
+ if (q->tail == m)
+ q->tail = NULL;
+ q->head = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ }
+ return (m);
+}
+
+static void
+mbufq_enqueue(struct mbufq *q, struct mbuf *m)
+{
+
+ m->m_nextpkt = NULL;
+ if (q->tail)
+ q->tail->m_nextpkt = m;
+ else
+ q->head = m;
+ q->tail = m;
+}
+
+static u_int
+sbavail(struct sockbuf *sb)
+{
+ return (sb->sb_cc);
+}
+
+#if (__FreeBSD_version < 1000000)
+#define mtodo(m, o) ((void *)(((m)->m_data) + (o)))
+#endif
+#endif
+
+struct ha_msg_wire {
+ uint32_t channel;
+ uint32_t length;
+};
+
+struct ha_dt_msg_wire {
+ ctl_ha_dt_cmd command;
+ uint32_t size;
+ uint8_t *local;
+ uint8_t *remote;
+};
+
+struct ha_softc {
+ struct ctl_softc *ha_ctl_softc;
+ ctl_evt_handler ha_handler[CTL_HA_CHAN_MAX];
+ char ha_peer[128];
+ struct sockaddr_in ha_peer_in;
+ struct socket *ha_lso;
+ struct socket *ha_so;
+ struct mbufq ha_sendq;
+ struct mbuf *ha_sending;
+ struct mtx ha_lock;
+ int ha_connect;
+ int ha_listen;
+ int ha_connected;
+ int ha_receiving;
+ int ha_wakeup;
+ int ha_disconnect;
+ TAILQ_HEAD(, ctl_ha_dt_req) ha_dts;
+} ha_softc;
+
+extern struct ctl_softc *control_softc;
+
+static void
+ctl_ha_conn_wake(struct ha_softc *softc)
+{
+
+ mtx_lock(&softc->ha_lock);
+ softc->ha_wakeup = 1;
+ mtx_unlock(&softc->ha_lock);
+ wakeup(&softc->ha_wakeup);
+}
+
+static int
+ctl_ha_lupcall(struct socket *so, void *arg, int waitflag)
+{
+ struct ha_softc *softc = arg;
+
+ ctl_ha_conn_wake(softc);
+ return (SU_OK);
+}
+
+static int
+ctl_ha_rupcall(struct socket *so, void *arg, int waitflag)
+{
+ struct ha_softc *softc = arg;
+
+ wakeup(&softc->ha_receiving);
+ return (SU_OK);
+}
+
+static int
+ctl_ha_supcall(struct socket *so, void *arg, int waitflag)
+{
+ struct ha_softc *softc = arg;
+
+ ctl_ha_conn_wake(softc);
+ return (SU_OK);
+}
+
+static void
+ctl_ha_evt(struct ha_softc *softc, ctl_ha_channel ch, ctl_ha_event evt,
+ int param)
+{
+ int i;
+
+ if (ch < CTL_HA_CHAN_MAX) {
+ if (softc->ha_handler[ch])
+ softc->ha_handler[ch](ch, evt, param);
+ return;
+ }
+ for (i = 0; i < CTL_HA_CHAN_MAX; i++) {
+ if (softc->ha_handler[i])
+ softc->ha_handler[i](i, evt, param);
+ }
+}
+
+static void
+ctl_ha_close(struct ha_softc *softc)
+{
+ struct socket *so = softc->ha_so;
+ int report = 0;
+
+ if (softc->ha_connected || softc->ha_disconnect) {
+ softc->ha_connected = 0;
+ mbufq_drain(&softc->ha_sendq);
+ m_freem(softc->ha_sending);
+ softc->ha_sending = NULL;
+ report = 1;
+ }
+ if (so) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ soupcall_clear(so, SO_RCV);
+ while (softc->ha_receiving) {
+ wakeup(&softc->ha_receiving);
+ msleep(&softc->ha_receiving, SOCKBUF_MTX(&so->so_rcv),
+ 0, "ha_rx exit", 0);
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ SOCKBUF_LOCK(&so->so_snd);
+ soupcall_clear(so, SO_SND);
+ SOCKBUF_UNLOCK(&so->so_snd);
+ softc->ha_so = NULL;
+ if (softc->ha_connect)
+ pause("reconnect", hz / 2);
+ soclose(so);
+ }
+ if (report) {
+ ctl_ha_evt(softc, CTL_HA_CHAN_MAX, CTL_HA_EVT_LINK_CHANGE,
+ (softc->ha_connect || softc->ha_listen) ?
+ CTL_HA_LINK_UNKNOWN : CTL_HA_LINK_OFFLINE);
+ }
+}
+
+static void
+ctl_ha_lclose(struct ha_softc *softc)
+{
+
+ if (softc->ha_lso) {
+ SOCKBUF_LOCK(&softc->ha_lso->so_rcv);
+ soupcall_clear(softc->ha_lso, SO_RCV);
+ SOCKBUF_UNLOCK(&softc->ha_lso->so_rcv);
+ soclose(softc->ha_lso);
+ softc->ha_lso = NULL;
+ }
+}
+
+static void
+ctl_ha_rx_thread(void *arg)
+{
+ struct ha_softc *softc = arg;
+ struct socket *so = softc->ha_so;
+ struct ha_msg_wire wire_hdr;
+ struct uio uio;
+ struct iovec iov;
+ int error, flags, next;
+
+ bzero(&wire_hdr, sizeof(wire_hdr));
+ while (1) {
+ if (wire_hdr.length > 0)
+ next = wire_hdr.length;
+ else
+ next = sizeof(wire_hdr);
+ SOCKBUF_LOCK(&so->so_rcv);
+ while (sbavail(&so->so_rcv) < next) {
+ if (softc->ha_connected == 0 || so->so_error ||
+ (so->so_rcv.sb_state & SBS_CANTRCVMORE)) {
+ goto errout;
+ }
+ so->so_rcv.sb_lowat = next;
+ msleep(&softc->ha_receiving, SOCKBUF_MTX(&so->so_rcv),
+ 0, "-", 0);
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ if (wire_hdr.length == 0) {
+ iov.iov_base = &wire_hdr;
+ iov.iov_len = sizeof(wire_hdr);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_rw = UIO_READ;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_td = curthread;
+ uio.uio_resid = sizeof(wire_hdr);
+ flags = MSG_DONTWAIT;
+ error = soreceive(softc->ha_so, NULL, &uio, NULL,
+ NULL, &flags);
+ if (error != 0) {
+ printf("%s: header receive error %d\n",
+ __func__, error);
+ SOCKBUF_LOCK(&so->so_rcv);
+ goto errout;
+ }
+ } else {
+ ctl_ha_evt(softc, wire_hdr.channel,
+ CTL_HA_EVT_MSG_RECV, wire_hdr.length);
+ wire_hdr.length = 0;
+ }
+ }
+
+errout:
+ softc->ha_receiving = 0;
+ wakeup(&softc->ha_receiving);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ ctl_ha_conn_wake(softc);
+ kthread_exit();
+}
+
+static void
+ctl_ha_send(struct ha_softc *softc)
+{
+ struct socket *so = softc->ha_so;
+ int error;
+
+ while (1) {
+ if (softc->ha_sending == NULL) {
+ mtx_lock(&softc->ha_lock);
+ softc->ha_sending = mbufq_dequeue(&softc->ha_sendq);
+ mtx_unlock(&softc->ha_lock);
+ if (softc->ha_sending == NULL) {
+ so->so_snd.sb_lowat = so->so_snd.sb_hiwat + 1;
+ break;
+ }
+ }
+ SOCKBUF_LOCK(&so->so_snd);
+ if (sbspace(&so->so_snd) < softc->ha_sending->m_pkthdr.len) {
+ so->so_snd.sb_lowat = softc->ha_sending->m_pkthdr.len;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ break;
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ error = sosend(softc->ha_so, NULL, NULL, softc->ha_sending,
+ NULL, MSG_DONTWAIT, curthread);
+ softc->ha_sending = NULL;
+ if (error != 0) {
+ printf("%s: sosend() error %d\n", __func__, error);
+ return;
+ }
+ };
+}
+
+static void
+ctl_ha_sock_setup(struct ha_softc *softc)
+{
+ struct sockopt opt;
+ struct socket *so = softc->ha_so;
+ int error, val;
+
+ val = 1024 * 1024;
+ error = soreserve(so, val, val);
+ if (error)
+ printf("%s: soreserve failed %d\n", __func__, error);
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_rcv.sb_lowat = sizeof(struct ha_msg_wire);
+ soupcall_set(so, SO_RCV, ctl_ha_rupcall, softc);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ SOCKBUF_LOCK(&so->so_snd);
+ so->so_snd.sb_lowat = sizeof(struct ha_msg_wire);
+ soupcall_set(so, SO_SND, ctl_ha_supcall, softc);
+ SOCKBUF_UNLOCK(&so->so_snd);
+
+ bzero(&opt, sizeof(struct sockopt));
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_level = SOL_SOCKET;
+ opt.sopt_name = SO_KEEPALIVE;
+ opt.sopt_val = &val;
+ opt.sopt_valsize = sizeof(val);
+ val = 1;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: KEEPALIVE setting failed %d\n", __func__, error);
+
+ opt.sopt_level = IPPROTO_TCP;
+ opt.sopt_name = TCP_NODELAY;
+ val = 1;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: NODELAY setting failed %d\n", __func__, error);
+
+ opt.sopt_name = TCP_KEEPINIT;
+ val = 3;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: KEEPINIT setting failed %d\n", __func__, error);
+
+ opt.sopt_name = TCP_KEEPIDLE;
+ val = 1;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: KEEPIDLE setting failed %d\n", __func__, error);
+
+ opt.sopt_name = TCP_KEEPINTVL;
+ val = 1;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: KEEPINTVL setting failed %d\n", __func__, error);
+
+ opt.sopt_name = TCP_KEEPCNT;
+ val = 5;
+ error = sosetopt(so, &opt);
+ if (error)
+ printf("%s: KEEPCNT setting failed %d\n", __func__, error);
+}
+
+static int
+ctl_ha_connect(struct ha_softc *softc)
+{
+ struct thread *td = curthread;
+ struct socket *so;
+ int error;
+
+ /* Create the socket */
+ error = socreate(PF_INET, &so, SOCK_STREAM,
+ IPPROTO_TCP, td->td_ucred, td);
+ if (error != 0) {
+ printf("%s: socreate() error %d\n", __func__, error);
+ return (error);
+ }
+ softc->ha_so = so;
+ ctl_ha_sock_setup(softc);
+
+ error = soconnect(so, (struct sockaddr *)&softc->ha_peer_in, td);
+ if (error != 0) {
+ printf("%s: soconnect() error %d\n", __func__, error);
+ goto out;
+ }
+ return (0);
+
+out:
+ ctl_ha_close(softc);
+ return (error);
+}
+
+static int
+ctl_ha_accept(struct ha_softc *softc)
+{
+ struct socket *so;
+ struct sockaddr *sap;
+ int error;
+
+ ACCEPT_LOCK();
+ if (softc->ha_lso->so_rcv.sb_state & SBS_CANTRCVMORE)
+ softc->ha_lso->so_error = ECONNABORTED;
+ if (softc->ha_lso->so_error) {
+ error = softc->ha_lso->so_error;
+ softc->ha_lso->so_error = 0;
+ ACCEPT_UNLOCK();
+ printf("%s: socket error %d\n", __func__, error);
+ goto out;
+ }
+ so = TAILQ_FIRST(&softc->ha_lso->so_comp);
+ if (so == NULL) {
+ ACCEPT_UNLOCK();
+ return (EWOULDBLOCK);
+ }
+ KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
+ KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
+
+ /*
+ * Before changing the flags on the socket, we have to bump the
+ * reference count. Otherwise, if the protocol calls sofree(),
+ * the socket will be released due to a zero refcount.
+ */
+ SOCK_LOCK(so); /* soref() and so_state update */
+ soref(so); /* file descriptor reference */
+
+ TAILQ_REMOVE(&softc->ha_lso->so_comp, so, so_list);
+ softc->ha_lso->so_qlen--;
+ so->so_state |= SS_NBIO;
+ so->so_qstate &= ~SQ_COMP;
+ so->so_head = NULL;
+
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+
+ sap = NULL;
+ error = soaccept(so, &sap);
+ if (error != 0) {
+ printf("%s: soaccept() error %d\n", __func__, error);
+ if (sap != NULL)
+ free(sap, M_SONAME);
+ goto out;
+ }
+ if (sap != NULL)
+ free(sap, M_SONAME);
+ softc->ha_so = so;
+ ctl_ha_sock_setup(softc);
+ return (0);
+
+out:
+ ctl_ha_lclose(softc);
+ return (error);
+}
+
+static int
+ctl_ha_listen(struct ha_softc *softc)
+{
+ struct thread *td = curthread;
+ struct sockopt opt;
+ int error, val;
+
+ /* Create the socket */
+ if (softc->ha_lso == NULL) {
+ error = socreate(PF_INET, &softc->ha_lso, SOCK_STREAM,
+ IPPROTO_TCP, td->td_ucred, td);
+ if (error != 0) {
+ printf("%s: socreate() error %d\n", __func__, error);
+ return (error);
+ }
+ bzero(&opt, sizeof(struct sockopt));
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_level = SOL_SOCKET;
+ opt.sopt_name = SO_REUSEADDR;
+ opt.sopt_val = &val;
+ opt.sopt_valsize = sizeof(val);
+ val = 1;
+ error = sosetopt(softc->ha_lso, &opt);
+ if (error) {
+ printf("%s: REUSEADDR setting failed %d\n",
+ __func__, error);
+ }
+ SOCKBUF_LOCK(&softc->ha_lso->so_rcv);
+ soupcall_set(softc->ha_lso, SO_RCV, ctl_ha_lupcall, softc);
+ SOCKBUF_UNLOCK(&softc->ha_lso->so_rcv);
+ }
+
+ error = sobind(softc->ha_lso, (struct sockaddr *)&softc->ha_peer_in, td);
+ if (error != 0) {
+ printf("%s: sobind() error %d\n", __func__, error);
+ goto out;
+ }
+ error = solisten(softc->ha_lso, 1, td);
+ if (error != 0) {
+ printf("%s: solisten() error %d\n", __func__, error);
+ goto out;
+ }
+ return (0);
+
+out:
+ ctl_ha_lclose(softc);
+ return (error);
+}
+
+static void
+ctl_ha_conn_thread(void *arg)
+{
+ struct ha_softc *softc = arg;
+ int error;
+
+ while (1) {
+ if (softc->ha_disconnect) {
+ ctl_ha_close(softc);
+ ctl_ha_lclose(softc);
+ softc->ha_disconnect = 0;
+ } else if (softc->ha_so != NULL &&
+ (softc->ha_so->so_error ||
+ softc->ha_so->so_rcv.sb_state & SBS_CANTRCVMORE))
+ ctl_ha_close(softc);
+ if (softc->ha_so == NULL) {
+ if (softc->ha_lso != NULL)
+ ctl_ha_accept(softc);
+ else if (softc->ha_listen)
+ ctl_ha_listen(softc);
+ else if (softc->ha_connect)
+ ctl_ha_connect(softc);
+ }
+ if (softc->ha_so != NULL) {
+ if (softc->ha_connected == 0 &&
+ softc->ha_so->so_error == 0 &&
+ (softc->ha_so->so_state & SS_ISCONNECTING) == 0) {
+ softc->ha_connected = 1;
+ ctl_ha_evt(softc, CTL_HA_CHAN_MAX,
+ CTL_HA_EVT_LINK_CHANGE,
+ CTL_HA_LINK_ONLINE);
+ softc->ha_receiving = 1;
+ error = kproc_kthread_add(ctl_ha_rx_thread,
+ softc, &softc->ha_ctl_softc->ctl_proc,
+ NULL, 0, 0, "ctl", "ha_rx");
+ if (error != 0) {
+ printf("Error creating CTL HA rx thread!\n");
+ softc->ha_receiving = 0;
+ softc->ha_disconnect = 1;
+ }
+ }
+ ctl_ha_send(softc);
+ }
+ mtx_lock(&softc->ha_lock);
+ if (softc->ha_so != NULL &&
+ (softc->ha_so->so_error ||
+ softc->ha_so->so_rcv.sb_state & SBS_CANTRCVMORE))
+ ;
+ else if (!softc->ha_wakeup)
+ msleep(&softc->ha_wakeup, &softc->ha_lock, 0, "-", hz);
+ softc->ha_wakeup = 0;
+ mtx_unlock(&softc->ha_lock);
+ }
+}
+
+static int
+ctl_ha_peer_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct ha_softc *softc = (struct ha_softc *)arg1;
+ struct sockaddr_in *sa;
+ int error, b1, b2, b3, b4, p, num;
+
+ error = sysctl_handle_string(oidp, softc->ha_peer,
+ sizeof(softc->ha_peer), req);
+ if ((error != 0) || (req->newptr == NULL))
+ return (error);
+
+ sa = &softc->ha_peer_in;
+ mtx_lock(&softc->ha_lock);
+ if ((num = sscanf(softc->ha_peer, "connect %d.%d.%d.%d:%d",
+ &b1, &b2, &b3, &b4, &p)) >= 4) {
+ softc->ha_connect = 1;
+ softc->ha_listen = 0;
+ } else if ((num = sscanf(softc->ha_peer, "listen %d.%d.%d.%d:%d",
+ &b1, &b2, &b3, &b4, &p)) >= 4) {
+ softc->ha_connect = 0;
+ softc->ha_listen = 1;
+ } else {
+ softc->ha_connect = 0;
+ softc->ha_listen = 0;
+ if (softc->ha_peer[0] != 0)
+ error = EINVAL;
+ }
+ if (softc->ha_connect || softc->ha_listen) {
+ memset(sa, 0, sizeof(*sa));
+ sa->sin_len = sizeof(struct sockaddr_in);
+ sa->sin_family = AF_INET;
+ sa->sin_port = htons((num >= 5) ? p : 999);
+ sa->sin_addr.s_addr =
+ htonl((b1 << 24) + (b2 << 16) + (b3 << 8) + b4);
+ }
+ softc->ha_disconnect = 1;
+ softc->ha_wakeup = 1;
+ mtx_unlock(&softc->ha_lock);
+ wakeup(&softc->ha_wakeup);
+ return (error);
+}
+
+ctl_ha_status
+ctl_ha_msg_register(ctl_ha_channel channel, ctl_evt_handler handler)
+{
+ struct ha_softc *softc = &ha_softc;
+
+ KASSERT(channel < CTL_HA_CHAN_MAX,
+ ("Wrong CTL HA channel %d", channel));
+ softc->ha_handler[channel] = handler;
+ return (CTL_HA_STATUS_SUCCESS);
+}
+
+ctl_ha_status
+ctl_ha_msg_deregister(ctl_ha_channel channel)
+{
+ struct ha_softc *softc = &ha_softc;
+
+ KASSERT(channel < CTL_HA_CHAN_MAX,
+ ("Wrong CTL HA channel %d", channel));
+ softc->ha_handler[channel] = NULL;
+ return (CTL_HA_STATUS_SUCCESS);
+}
+
+/*
+ * Receive a message of the specified size.
+ */
+ctl_ha_status
+ctl_ha_msg_recv(ctl_ha_channel channel, void *addr, size_t len,
+ int wait)
+{
+ struct ha_softc *softc = &ha_softc;
+ struct uio uio;
+ struct iovec iov;
+ int error, flags;
+
+ if (!softc->ha_connected)
+ return (CTL_HA_STATUS_DISCONNECT);
+
+ iov.iov_base = addr;
+ iov.iov_len = len;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_rw = UIO_READ;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_td = curthread;
+ uio.uio_resid = len;
+ flags = wait ? 0 : MSG_DONTWAIT;
+ error = soreceive(softc->ha_so, NULL, &uio, NULL, NULL, &flags);
+ if (error == 0)
+ return (CTL_HA_STATUS_SUCCESS);
+
+ /* Consider all errors fatal for HA sanity. */
+ mtx_lock(&softc->ha_lock);
+ if (softc->ha_connected) {
+ softc->ha_disconnect = 1;
+ softc->ha_wakeup = 1;
+ wakeup(&softc->ha_wakeup);
+ }
+ mtx_unlock(&softc->ha_lock);
+ return (CTL_HA_STATUS_ERROR);
+}
+
+/*
+ * Send a message of the specified size.
+ */
+ctl_ha_status
+ctl_ha_msg_send2(ctl_ha_channel channel, const void *addr, size_t len,
+ const void *addr2, size_t len2, int wait)
+{
+ struct ha_softc *softc = &ha_softc;
+ struct mbuf *mb, *newmb;
+ struct ha_msg_wire hdr;
+ size_t copylen, off;
+
+ if (!softc->ha_connected)
+ return (CTL_HA_STATUS_DISCONNECT);
+
+ newmb = m_getm2(NULL, sizeof(hdr) + len + len2, wait, MT_DATA,
+ M_PKTHDR);
+ if (newmb == NULL) {
+ /* Consider all errors fatal for HA sanity. */
+ mtx_lock(&softc->ha_lock);
+ if (softc->ha_connected) {
+ softc->ha_disconnect = 1;
+ softc->ha_wakeup = 1;
+ wakeup(&softc->ha_wakeup);
+ }
+ mtx_unlock(&softc->ha_lock);
+ printf("%s: Can't allocate mbuf chain\n", __func__);
+ return (CTL_HA_STATUS_ERROR);
+ }
+ hdr.channel = channel;
+ hdr.length = len + len2;
+ mb = newmb;
+ memcpy(mtodo(mb, 0), &hdr, sizeof(hdr));
+ mb->m_len += sizeof(hdr);
+ off = 0;
+ for (; mb != NULL && off < len; mb = mb->m_next) {
+ copylen = min(M_TRAILINGSPACE(mb), len - off);
+ memcpy(mtodo(mb, mb->m_len), (const char *)addr + off, copylen);
+ mb->m_len += copylen;
+ off += copylen;
+ if (off == len)
+ break;
+ }
+ KASSERT(off == len, ("%s: off (%zu) != len (%zu)", __func__,
+ off, len));
+ off = 0;
+ for (; mb != NULL && off < len2; mb = mb->m_next) {
+ copylen = min(M_TRAILINGSPACE(mb), len2 - off);
+ memcpy(mtodo(mb, mb->m_len), (const char *)addr2 + off, copylen);
+ mb->m_len += copylen;
+ off += copylen;
+ }
+ KASSERT(off == len2, ("%s: off (%zu) != len2 (%zu)", __func__,
+ off, len2));
+ newmb->m_pkthdr.len = sizeof(hdr) + len + len2;
+
+ mtx_lock(&softc->ha_lock);
+ if (!softc->ha_connected) {
+ mtx_unlock(&softc->ha_lock);
+ m_freem(newmb);
+ return (CTL_HA_STATUS_DISCONNECT);
+ }
+ mbufq_enqueue(&softc->ha_sendq, newmb);
+ softc->ha_wakeup = 1;
+ mtx_unlock(&softc->ha_lock);
+ wakeup(&softc->ha_wakeup);
+ return (CTL_HA_STATUS_SUCCESS);
+}
+
+ctl_ha_status
+ctl_ha_msg_send(ctl_ha_channel channel, const void *addr, size_t len,
+ int wait)
+{
+
+ return (ctl_ha_msg_send2(channel, addr, len, NULL, 0, wait));
+}
+
+/*
+ * Allocate a data transfer request structure.
+ */
+struct ctl_ha_dt_req *
+ctl_dt_req_alloc(void)
+{
+
+ return (malloc(sizeof(struct ctl_ha_dt_req), M_CTL, M_WAITOK | M_ZERO));
+}
+
+/*
+ * Free a data transfer request structure.
+ */
+void
+ctl_dt_req_free(struct ctl_ha_dt_req *req)
+{
+
+ free(req, M_CTL);
+}
+
+/*
+ * Issue a DMA request for a single buffer.
+ */
+ctl_ha_status
+ctl_dt_single(struct ctl_ha_dt_req *req)
+{
+ struct ha_softc *softc = &ha_softc;
+ struct ha_dt_msg_wire wire_dt;
+ ctl_ha_status status;
+
+ wire_dt.command = req->command;
+ wire_dt.size = req->size;
+ wire_dt.local = req->local;
+ wire_dt.remote = req->remote;
+ if (req->command == CTL_HA_DT_CMD_READ && req->callback != NULL) {
+ mtx_lock(&softc->ha_lock);
+ TAILQ_INSERT_TAIL(&softc->ha_dts, req, links);
+ mtx_unlock(&softc->ha_lock);
+ ctl_ha_msg_send(CTL_HA_CHAN_DATA, &wire_dt, sizeof(wire_dt),
+ M_WAITOK);
+ return (CTL_HA_STATUS_WAIT);
+ }
+ if (req->command == CTL_HA_DT_CMD_READ) {
+ status = ctl_ha_msg_send(CTL_HA_CHAN_DATA, &wire_dt,
+ sizeof(wire_dt), M_WAITOK);
+ } else {
+ status = ctl_ha_msg_send2(CTL_HA_CHAN_DATA, &wire_dt,
+ sizeof(wire_dt), req->local, req->size, M_WAITOK);
+ }
+ return (status);
+}
+
+static void
+ctl_dt_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
+{
+ struct ha_softc *softc = &ha_softc;
+ struct ctl_ha_dt_req *req;
+ ctl_ha_status isc_status;
+
+ if (event == CTL_HA_EVT_MSG_RECV) {
+ struct ha_dt_msg_wire wire_dt;
+ uint8_t *tmp;
+ int size;
+
+ size = min(sizeof(wire_dt), param);
+ isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_DATA, &wire_dt,
+ size, M_WAITOK);
+ if (isc_status != CTL_HA_STATUS_SUCCESS) {
+ printf("%s: Error receiving message: %d\n",
+ __func__, isc_status);
+ return;
+ }
+
+ if (wire_dt.command == CTL_HA_DT_CMD_READ) {
+ wire_dt.command = CTL_HA_DT_CMD_WRITE;
+ tmp = wire_dt.local;
+ wire_dt.local = wire_dt.remote;
+ wire_dt.remote = tmp;
+ ctl_ha_msg_send2(CTL_HA_CHAN_DATA, &wire_dt,
+ sizeof(wire_dt), wire_dt.local, wire_dt.size,
+ M_WAITOK);
+ } else if (wire_dt.command == CTL_HA_DT_CMD_WRITE) {
+ isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_DATA,
+ wire_dt.remote, wire_dt.size, M_WAITOK);
+ mtx_lock(&softc->ha_lock);
+ TAILQ_FOREACH(req, &softc->ha_dts, links) {
+ if (req->local == wire_dt.remote) {
+ TAILQ_REMOVE(&softc->ha_dts, req, links);
+ break;
+ }
+ }
+ mtx_unlock(&softc->ha_lock);
+ if (req) {
+ req->ret = isc_status;
+ req->callback(req);
+ }
+ }
+ } else if (event == CTL_HA_EVT_LINK_CHANGE) {
+ CTL_DEBUG_PRINT(("%s: Link state change to %d\n", __func__,
+ param));
+ if (param != CTL_HA_LINK_ONLINE) {
+ mtx_lock(&softc->ha_lock);
+ while ((req = TAILQ_FIRST(&softc->ha_dts)) != NULL) {
+ TAILQ_REMOVE(&softc->ha_dts, req, links);
+ mtx_unlock(&softc->ha_lock);
+ req->ret = CTL_HA_STATUS_DISCONNECT;
+ req->callback(req);
+ mtx_lock(&softc->ha_lock);
+ }
+ mtx_unlock(&softc->ha_lock);
+ }
+ } else {
+ printf("%s: Unknown event %d\n", __func__, event);
+ }
+}
+
+
+ctl_ha_status
+ctl_ha_msg_init(struct ctl_softc *ctl_softc)
+{
+ struct ha_softc *softc = &ha_softc;
+ int error;
+
+ softc->ha_ctl_softc = ctl_softc;
+ mtx_init(&softc->ha_lock, "CTL HA mutex", NULL, MTX_DEF);
+ mbufq_init(&softc->ha_sendq, INT_MAX);
+ TAILQ_INIT(&softc->ha_dts);
+ error = kproc_kthread_add(ctl_ha_conn_thread, softc,
+ &ctl_softc->ctl_proc, NULL, 0, 0, "ctl", "ha_tx");
+ if (error != 0) {
+ printf("error creating CTL HA connection thread!\n");
+ mtx_destroy(&softc->ha_lock);
+ return (CTL_HA_STATUS_ERROR);
+ }
+ SYSCTL_ADD_PROC(&ctl_softc->sysctl_ctx,
+ SYSCTL_CHILDREN(ctl_softc->sysctl_tree),
+ OID_AUTO, "ha_peer", CTLTYPE_STRING | CTLFLAG_RWTUN,
+ softc, 0, ctl_ha_peer_sysctl, "A", "HA peer connection method");
+
+ if (ctl_ha_msg_register(CTL_HA_CHAN_DATA, ctl_dt_event_handler)
+ != CTL_HA_STATUS_SUCCESS) {
+ printf("%s: ctl_ha_msg_register failed.\n", __func__);
+ }
+
+ return (CTL_HA_STATUS_SUCCESS);
+};
+
+ctl_ha_status
+ctl_ha_msg_shutdown(struct ctl_softc *ctl_softc)
+{
+ struct ha_softc *softc = &ha_softc;
+
+ if (ctl_ha_msg_deregister(CTL_HA_CHAN_DATA) != CTL_HA_STATUS_SUCCESS) {
+ printf("%s: ctl_ha_msg_deregister failed.\n", __func__);
+ }
+
+ mtx_destroy(&softc->ha_lock);
+ return (CTL_HA_STATUS_SUCCESS);
+};