ia64/xen-unstable

view xen/drivers/scsi/aacraid/aachba.c @ 945:db2e1ea917df

bitkeeper revision 1.596.1.3 (3fb3b41eWUoRU0H8A0jEX5roXjxKkA)

Many files:
Greatly simplified Xen softirqs. They are now only executed in outermost Xen activation; they are never called within an irq context.
author kaf24@scramble.cl.cam.ac.uk
date Thu Nov 13 16:41:02 2003 +0000 (2003-11-13)
parents 3946af49a538
children 890460f07ddf
line source
1 /*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, write to
21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 */
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/sched.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/slab.h>
34 /*#include <linux/completion.h>*/
35 /*#include <asm/semaphore.h>*/
36 #include <asm/uaccess.h>
37 #define MAJOR_NR SCSI_DISK0_MAJOR /* For DEVICE_NR() */
38 #include <linux/blk.h>
39 #include "scsi.h"
40 #include "hosts.h"
41 #include "sd.h"
43 #include "aacraid.h"
45 /* SCSI Commands */
46 /* TODO: dmb - use the ones defined in include/scsi/scsi.h */
48 #define SS_TEST 0x00 /* Test unit ready */
49 #define SS_REZERO 0x01 /* Rezero unit */
50 #define SS_REQSEN 0x03 /* Request Sense */
51 #define SS_REASGN 0x07 /* Reassign blocks */
52 #define SS_READ 0x08 /* Read 6 */
53 #define SS_WRITE 0x0A /* Write 6 */
54 #define SS_INQUIR 0x12 /* inquiry */
55 #define SS_ST_SP 0x1B /* Start/Stop unit */
56 #define SS_LOCK 0x1E /* prevent/allow medium removal */
57 #define SS_RESERV 0x16 /* Reserve */
58 #define SS_RELES 0x17 /* Release */
59 #define SS_MODESEN 0x1A /* Mode Sense 6 */
60 #define SS_RDCAP 0x25 /* Read Capacity */
61 #define SM_READ 0x28 /* Read 10 */
62 #define SM_WRITE 0x2A /* Write 10 */
63 #define SS_SEEK 0x2B /* Seek */
65 /* values for inqd_pdt: Peripheral device type in plain English */
66 #define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
67 #define INQD_PDT_PROC 0x03 /* Processor device */
68 #define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
69 #define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
70 #define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
71 #define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
73 #define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
74 #define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
76 #define TARGET_LUN_TO_CONTAINER(target, lun) (target)
77 #define CONTAINER_TO_TARGET(cont) ((cont))
78 #define CONTAINER_TO_LUN(cont) (0)
80 #define MAX_FIB_DATA (sizeof(struct hw_fib) - sizeof(FIB_HEADER))
82 #define MAX_DRIVER_SG_SEGMENT_COUNT 17
84 /*
85 * Sense keys
86 */
87 #define SENKEY_NO_SENSE 0x00
88 #define SENKEY_UNDEFINED 0x01
89 #define SENKEY_NOT_READY 0x02
90 #define SENKEY_MEDIUM_ERR 0x03
91 #define SENKEY_HW_ERR 0x04
92 #define SENKEY_ILLEGAL 0x05
93 #define SENKEY_ATTENTION 0x06
94 #define SENKEY_PROTECTED 0x07
95 #define SENKEY_BLANK 0x08
96 #define SENKEY_V_UNIQUE 0x09
97 #define SENKEY_CPY_ABORT 0x0A
98 #define SENKEY_ABORT 0x0B
99 #define SENKEY_EQUAL 0x0C
100 #define SENKEY_VOL_OVERFLOW 0x0D
101 #define SENKEY_MISCOMP 0x0E
102 #define SENKEY_RESERVED 0x0F
104 /*
105 * Sense codes
106 */
108 #define SENCODE_NO_SENSE 0x00
109 #define SENCODE_END_OF_DATA 0x00
110 #define SENCODE_BECOMING_READY 0x04
111 #define SENCODE_INIT_CMD_REQUIRED 0x04
112 #define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
113 #define SENCODE_INVALID_COMMAND 0x20
114 #define SENCODE_LBA_OUT_OF_RANGE 0x21
115 #define SENCODE_INVALID_CDB_FIELD 0x24
116 #define SENCODE_LUN_NOT_SUPPORTED 0x25
117 #define SENCODE_INVALID_PARAM_FIELD 0x26
118 #define SENCODE_PARAM_NOT_SUPPORTED 0x26
119 #define SENCODE_PARAM_VALUE_INVALID 0x26
120 #define SENCODE_RESET_OCCURRED 0x29
121 #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
122 #define SENCODE_INQUIRY_DATA_CHANGED 0x3F
123 #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
124 #define SENCODE_DIAGNOSTIC_FAILURE 0x40
125 #define SENCODE_INTERNAL_TARGET_FAILURE 0x44
126 #define SENCODE_INVALID_MESSAGE_ERROR 0x49
127 #define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
128 #define SENCODE_OVERLAPPED_COMMAND 0x4E
130 /*
131 * Additional sense codes
132 */
134 #define ASENCODE_NO_SENSE 0x00
135 #define ASENCODE_END_OF_DATA 0x05
136 #define ASENCODE_BECOMING_READY 0x01
137 #define ASENCODE_INIT_CMD_REQUIRED 0x02
138 #define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
139 #define ASENCODE_INVALID_COMMAND 0x00
140 #define ASENCODE_LBA_OUT_OF_RANGE 0x00
141 #define ASENCODE_INVALID_CDB_FIELD 0x00
142 #define ASENCODE_LUN_NOT_SUPPORTED 0x00
143 #define ASENCODE_INVALID_PARAM_FIELD 0x00
144 #define ASENCODE_PARAM_NOT_SUPPORTED 0x01
145 #define ASENCODE_PARAM_VALUE_INVALID 0x02
146 #define ASENCODE_RESET_OCCURRED 0x00
147 #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
148 #define ASENCODE_INQUIRY_DATA_CHANGED 0x03
149 #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
150 #define ASENCODE_DIAGNOSTIC_FAILURE 0x80
151 #define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
152 #define ASENCODE_INVALID_MESSAGE_ERROR 0x00
153 #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
154 #define ASENCODE_OVERLAPPED_COMMAND 0x00
156 #define BYTE0(x) (unsigned char)(x)
157 #define BYTE1(x) (unsigned char)((x) >> 8)
158 #define BYTE2(x) (unsigned char)((x) >> 16)
159 #define BYTE3(x) (unsigned char)((x) >> 24)
161 /*------------------------------------------------------------------------------
162 * S T R U C T S / T Y P E D E F S
163 *----------------------------------------------------------------------------*/
164 /* SCSI inquiry data */
165 struct inquiry_data {
166 u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
167 u8 inqd_dtq; /* RMB | Device Type Qualifier */
168 u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
169 u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
170 u8 inqd_len; /* Additional length (n-4) */
171 u8 inqd_pad1[2]; /* Reserved - must be zero */
172 u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
173 u8 inqd_vid[8]; /* Vendor ID */
174 u8 inqd_pid[16]; /* Product ID */
175 u8 inqd_prl[4]; /* Product Revision Level */
176 };
178 struct sense_data {
179 u8 error_code; /* 70h (current errors), 71h(deferred errors) */
180 u8 valid:1; /* A valid bit of one indicates that the information */
181 /* field contains valid information as defined in the
182 * SCSI-2 Standard.
183 */
184 u8 segment_number; /* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
185 u8 sense_key:4; /* Sense Key */
186 u8 reserved:1;
187 u8 ILI:1; /* Incorrect Length Indicator */
188 u8 EOM:1; /* End Of Medium - reserved for random access devices */
189 u8 filemark:1; /* Filemark - reserved for random access devices */
191 u8 information[4]; /* for direct-access devices, contains the unsigned
192 * logical block address or residue associated with
193 * the sense key
194 */
195 u8 add_sense_len; /* number of additional sense bytes to follow this field */
196 u8 cmnd_info[4]; /* not used */
197 u8 ASC; /* Additional Sense Code */
198 u8 ASCQ; /* Additional Sense Code Qualifier */
199 u8 FRUC; /* Field Replaceable Unit Code - not used */
200 u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data
201 * was in error
202 */
203 u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
204 * the bit_ptr field has valid value
205 */
206 u8 reserved2:2;
207 u8 CD:1; /* command data bit: 1- illegal parameter in CDB.
208 * 0- illegal parameter in data.
209 */
210 u8 SKSV:1;
211 u8 field_ptr[2]; /* byte of the CDB or parameter data in error */
212 };
214 /*
215 * M O D U L E G L O B A L S
216 */
218 static struct fsa_scsi_hba *fsa_dev[MAXIMUM_NUM_ADAPTERS]; /* SCSI Device Instance Pointers */
219 static struct sense_data sense_data[MAXIMUM_NUM_CONTAINERS];
220 static void get_sd_devname(int disknum, char *buffer);
221 static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* sgmap);
222 static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg);
223 static int aac_send_srb_fib(Scsi_Cmnd* scsicmd);
224 #ifdef AAC_DETAILED_STATUS_INFO
225 static char *aac_get_status_string(u32 status);
226 #endif
228 /*
229 * Non dasd selection is handled entirely in aachba now
230 */
232 MODULE_PARM(nondasd, "i");
233 MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
235 static int nondasd = -1;
237 /**
238 * aac_get_containers - list containers
239 * @common: adapter to probe
240 *
241 * Make a list of all containers on this controller
242 */
243 int aac_get_containers(struct aac_dev *dev)
244 {
245 struct fsa_scsi_hba *fsa_dev_ptr;
246 u32 index;
247 int status = 0;
248 struct aac_query_mount *dinfo;
249 struct aac_mount *dresp;
250 struct fib * fibptr;
251 unsigned instance;
253 fsa_dev_ptr = &(dev->fsa_dev);
254 instance = dev->scsi_host_ptr->unique_id;
256 if (!(fibptr = fib_alloc(dev)))
257 return -ENOMEM;
259 for (index = 0; index < MAXIMUM_NUM_CONTAINERS; index++) {
260 fib_init(fibptr);
261 dinfo = (struct aac_query_mount *) fib_data(fibptr);
263 dinfo->command = cpu_to_le32(VM_NameServe);
264 dinfo->count = cpu_to_le32(index);
265 dinfo->type = cpu_to_le32(FT_FILESYS);
267 status = fib_send(ContainerCommand,
268 fibptr,
269 sizeof (struct aac_query_mount),
270 FsaNormal,
271 1, 1,
272 NULL, NULL);
273 if (status < 0 ) {
274 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
275 break;
276 }
277 dresp = (struct aac_mount *)fib_data(fibptr);
279 if ((le32_to_cpu(dresp->status) == ST_OK) &&
280 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
281 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
282 fsa_dev_ptr->valid[index] = 1;
283 fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol);
284 fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity);
285 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
286 fsa_dev_ptr->ro[index] = 1;
287 }
288 fib_complete(fibptr);
289 /*
290 * If there are no more containers, then stop asking.
291 */
292 if ((index + 1) >= le32_to_cpu(dresp->count))
293 break;
294 }
295 fib_free(fibptr);
296 fsa_dev[instance] = fsa_dev_ptr;
297 return status;
298 }
300 /**
301 * probe_container - query a logical volume
302 * @dev: device to query
303 * @cid: container identifier
304 *
305 * Queries the controller about the given volume. The volume information
306 * is updated in the struct fsa_scsi_hba structure rather than returned.
307 */
309 static int probe_container(struct aac_dev *dev, int cid)
310 {
311 struct fsa_scsi_hba *fsa_dev_ptr;
312 int status;
313 struct aac_query_mount *dinfo;
314 struct aac_mount *dresp;
315 struct fib * fibptr;
316 unsigned instance;
318 fsa_dev_ptr = &(dev->fsa_dev);
319 instance = dev->scsi_host_ptr->unique_id;
321 if (!(fibptr = fib_alloc(dev)))
322 return -ENOMEM;
324 fib_init(fibptr);
326 dinfo = (struct aac_query_mount *)fib_data(fibptr);
328 dinfo->command = cpu_to_le32(VM_NameServe);
329 dinfo->count = cpu_to_le32(cid);
330 dinfo->type = cpu_to_le32(FT_FILESYS);
332 status = fib_send(ContainerCommand,
333 fibptr,
334 sizeof(struct aac_query_mount),
335 FsaNormal,
336 1, 1,
337 NULL, NULL);
338 if (status < 0) {
339 printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
340 goto error;
341 }
343 dresp = (struct aac_mount *) fib_data(fibptr);
345 if ((le32_to_cpu(dresp->status) == ST_OK) &&
346 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
347 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
348 fsa_dev_ptr->valid[cid] = 1;
349 fsa_dev_ptr->type[cid] = le32_to_cpu(dresp->mnt[0].vol);
350 fsa_dev_ptr->size[cid] = le32_to_cpu(dresp->mnt[0].capacity);
351 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
352 fsa_dev_ptr->ro[cid] = 1;
353 }
355 error:
356 fib_complete(fibptr);
357 fib_free(fibptr);
359 return status;
360 }
362 /* Local Structure to set SCSI inquiry data strings */
363 struct scsi_inq {
364 char vid[8]; /* Vendor ID */
365 char pid[16]; /* Product ID */
366 char prl[4]; /* Product Revision Level */
367 };
369 /**
370 * InqStrCopy - string merge
371 * @a: string to copy from
372 * @b: string to copy to
373 *
374 * Copy a String from one location to another
375 * without copying \0
376 */
378 static void inqstrcpy(char *a, char *b)
379 {
381 while(*a != (char)0)
382 *b++ = *a++;
383 }
385 static char *container_types[] = {
386 "None",
387 "Volume",
388 "Mirror",
389 "Stripe",
390 "RAID5",
391 "SSRW",
392 "SSRO",
393 "Morph",
394 "Legacy",
395 "RAID4",
396 "RAID10",
397 "RAID00",
398 "V-MIRRORS",
399 "PSEUDO R4",
400 "RAID50",
401 "Unknown"
402 };
406 /* Function: setinqstr
407 *
408 * Arguments: [1] pointer to void [1] int
409 *
410 * Purpose: Sets SCSI inquiry data strings for vendor, product
411 * and revision level. Allows strings to be set in platform dependant
412 * files instead of in OS dependant driver source.
413 */
415 static void setinqstr(int devtype, void *data, int tindex)
416 {
417 struct scsi_inq *str;
418 char *findit;
419 struct aac_driver_ident *mp;
421 mp = aac_get_driver_ident(devtype);
423 str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
425 inqstrcpy (mp->vname, str->vid);
426 inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
428 findit = str->pid;
430 for ( ; *findit != ' '; findit++); /* walk till we find a space then incr by 1 */
431 findit++;
433 if (tindex < (sizeof(container_types)/sizeof(char *))){
434 inqstrcpy (container_types[tindex], findit);
435 }
436 inqstrcpy ("V1.0", str->prl);
437 }
439 void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
440 u8 a_sense_code, u8 incorrect_length,
441 u8 bit_pointer, u16 field_pointer,
442 u32 residue)
443 {
444 sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */
445 sense_buf[1] = 0; /* Segment number, always zero */
447 if (incorrect_length) {
448 sense_buf[2] = sense_key | 0x20; /* Set ILI bit | sense key */
449 sense_buf[3] = BYTE3(residue);
450 sense_buf[4] = BYTE2(residue);
451 sense_buf[5] = BYTE1(residue);
452 sense_buf[6] = BYTE0(residue);
453 } else
454 sense_buf[2] = sense_key; /* Sense key */
456 if (sense_key == SENKEY_ILLEGAL)
457 sense_buf[7] = 10; /* Additional sense length */
458 else
459 sense_buf[7] = 6; /* Additional sense length */
461 sense_buf[12] = sense_code; /* Additional sense code */
462 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
463 if (sense_key == SENKEY_ILLEGAL) {
464 sense_buf[15] = 0;
466 if (sense_code == SENCODE_INVALID_PARAM_FIELD)
467 sense_buf[15] = 0x80; /* Std sense key specific field */
468 /* Illegal parameter is in the parameter block */
470 if (sense_code == SENCODE_INVALID_CDB_FIELD)
471 sense_buf[15] = 0xc0; /* Std sense key specific field */
472 /* Illegal parameter is in the CDB block */
473 sense_buf[15] |= bit_pointer;
474 sense_buf[16] = field_pointer >> 8; /* MSB */
475 sense_buf[17] = field_pointer; /* LSB */
476 }
477 }
479 static void aac_io_done(Scsi_Cmnd * scsicmd)
480 {
481 unsigned long cpu_flags;
482 spin_lock_irqsave(&io_request_lock, cpu_flags);
483 scsicmd->scsi_done(scsicmd);
484 spin_unlock_irqrestore(&io_request_lock, cpu_flags);
485 }
487 static void __aac_io_done(Scsi_Cmnd * scsicmd)
488 {
489 scsicmd->scsi_done(scsicmd);
490 }
492 int aac_get_adapter_info(struct aac_dev* dev)
493 {
494 struct fib* fibptr;
495 struct aac_adapter_info* info;
496 int rcode;
497 u32 tmp;
498 if (!(fibptr = fib_alloc(dev)))
499 return -ENOMEM;
501 fib_init(fibptr);
502 info = (struct aac_adapter_info*) fib_data(fibptr);
504 memset(info,0,sizeof(struct aac_adapter_info));
506 rcode = fib_send(RequestAdapterInfo,
507 fibptr,
508 sizeof(struct aac_adapter_info),
509 FsaNormal,
510 1, 1,
511 NULL,
512 NULL);
514 memcpy(&dev->adapter_info, info, sizeof(struct aac_adapter_info));
516 tmp = dev->adapter_info.kernelrev;
517 printk(KERN_INFO "%s%d: kernel %d.%d.%d build %d\n",
518 dev->name, dev->id,
519 tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
520 dev->adapter_info.kernelbuild);
521 tmp = dev->adapter_info.monitorrev;
522 printk(KERN_INFO "%s%d: monitor %d.%d.%d build %d\n",
523 dev->name, dev->id,
524 tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
525 dev->adapter_info.monitorbuild);
526 tmp = dev->adapter_info.biosrev;
527 printk(KERN_INFO "%s%d: bios %d.%d.%d build %d\n",
528 dev->name, dev->id,
529 tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
530 dev->adapter_info.biosbuild);
531 printk(KERN_INFO "%s%d: serial %x%x\n",
532 dev->name, dev->id,
533 dev->adapter_info.serial[0],
534 dev->adapter_info.serial[1]);
536 dev->nondasd_support = 0;
537 dev->raid_scsi_mode = 0;
538 if(dev->adapter_info.options & AAC_OPT_NONDASD){
539 dev->nondasd_support = 1;
540 }
542 /*
543 * If the firmware supports ROMB RAID/SCSI mode and we are currently
544 * in RAID/SCSI mode, set the flag. For now if in this mode we will
545 * force nondasd support on. If we decide to allow the non-dasd flag
546 * additional changes changes will have to be made to support
547 * RAID/SCSI. the function aac_scsi_cmd in this module will have to be
548 * changed to support the new dev->raid_scsi_mode flag instead of
549 * leaching off of the dev->nondasd_support flag. Also in linit.c the
550 * function aac_detect will have to be modified where it sets up the
551 * max number of channels based on the aac->nondasd_support flag only.
552 */
553 if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED)
554 && (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE))
555 {
556 dev->nondasd_support = 1;
557 dev->raid_scsi_mode = 1;
558 }
559 if (dev->raid_scsi_mode != 0)
560 printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",dev->name, dev->id);
562 if (nondasd != -1)
563 dev->nondasd_support = (nondasd!=0);
565 if(dev->nondasd_support != 0)
566 printk(KERN_INFO "%s%d: Non-DASD support enabled\n",dev->name, dev->id);
568 dev->pae_support = 0;
569 if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
570 dev->pae_support = 1;
571 }
572 /* TODO - dmb temporary until fw can set this bit */
573 dev->pae_support = (BITS_PER_LONG >= 64);
574 if(dev->pae_support != 0)
575 {
576 printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", dev->name, dev->id);
577 pci_set_dma_mask(dev->pdev, (dma_addr_t)0xFFFFFFFFFFFFFFFFULL);
578 }
580 fib_complete(fibptr);
581 fib_free(fibptr);
583 return rcode;
584 }
587 static void read_callback(void *context, struct fib * fibptr)
588 {
589 struct aac_dev *dev;
590 struct aac_read_reply *readreply;
591 Scsi_Cmnd *scsicmd;
592 u32 lba;
593 u32 cid;
595 scsicmd = (Scsi_Cmnd *) context;
597 dev = (struct aac_dev *)scsicmd->host->hostdata;
598 cid =TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
600 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
601 dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
603 if (fibptr == NULL)
604 BUG();
606 if(scsicmd->use_sg)
607 pci_unmap_sg(dev->pdev,
608 (struct scatterlist *)scsicmd->buffer,
609 scsicmd->use_sg,
610 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
611 else if(scsicmd->request_bufflen)
612 pci_unmap_single(dev->pdev, (dma_addr_t)(unsigned long)scsicmd->SCp.ptr,
613 scsicmd->request_bufflen,
614 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
615 readreply = (struct aac_read_reply *)fib_data(fibptr);
616 if (le32_to_cpu(readreply->status) == ST_OK)
617 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
618 else {
619 printk(KERN_WARNING "read_callback: read failed, status = %d\n", readreply->status);
620 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
621 set_sense((u8 *) &sense_data[cid],
622 SENKEY_HW_ERR,
623 SENCODE_INTERNAL_TARGET_FAILURE,
624 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
625 0, 0);
626 }
627 fib_complete(fibptr);
628 fib_free(fibptr);
630 aac_io_done(scsicmd);
631 }
633 static void write_callback(void *context, struct fib * fibptr)
634 {
635 struct aac_dev *dev;
636 struct aac_write_reply *writereply;
637 Scsi_Cmnd *scsicmd;
638 u32 lba;
639 u32 cid;
641 scsicmd = (Scsi_Cmnd *) context;
642 dev = (struct aac_dev *)scsicmd->host->hostdata;
643 cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
645 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
646 dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
647 if (fibptr == NULL)
648 BUG();
650 if(scsicmd->use_sg)
651 pci_unmap_sg(dev->pdev,
652 (struct scatterlist *)scsicmd->buffer,
653 scsicmd->use_sg,
654 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
655 else if(scsicmd->request_bufflen)
656 pci_unmap_single(dev->pdev, (dma_addr_t)(unsigned long)scsicmd->SCp.ptr,
657 scsicmd->request_bufflen,
658 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
660 writereply = (struct aac_write_reply *) fib_data(fibptr);
661 if (le32_to_cpu(writereply->status) == ST_OK)
662 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
663 else {
664 printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
665 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
666 set_sense((u8 *) &sense_data[cid],
667 SENKEY_HW_ERR,
668 SENCODE_INTERNAL_TARGET_FAILURE,
669 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
670 0, 0);
671 }
673 fib_complete(fibptr);
674 fib_free(fibptr);
675 aac_io_done(scsicmd);
676 }
678 int aac_read(Scsi_Cmnd * scsicmd, int cid)
679 {
680 u32 lba;
681 u32 count;
682 int status;
684 u16 fibsize;
685 struct aac_dev *dev;
686 struct fib * cmd_fibcontext;
688 dev = (struct aac_dev *)scsicmd->host->hostdata;
689 /*
690 * Get block address and transfer length
691 */
692 if (scsicmd->cmnd[0] == SS_READ) /* 6 byte command */
693 {
694 dprintk((KERN_DEBUG "aachba: received a read(6) command on target %d.\n", cid));
696 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
697 count = scsicmd->cmnd[4];
699 if (count == 0)
700 count = 256;
701 } else {
702 dprintk((KERN_DEBUG "aachba: received a read(10) command on target %d.\n", cid));
704 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
705 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
706 }
707 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
708 /*
709 * Alocate and initialize a Fib
710 */
711 if (!(cmd_fibcontext = fib_alloc(dev))) {
712 scsicmd->result = DID_ERROR << 16;
713 aac_io_done(scsicmd);
714 return (-1);
715 }
717 fib_init(cmd_fibcontext);
719 if(dev->pae_support == 1){
720 struct aac_read64 *readcmd;
721 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
722 readcmd->command = cpu_to_le32(VM_CtHostRead64);
723 readcmd->cid = cpu_to_le16(cid);
724 readcmd->sector_count = cpu_to_le16(count);
725 readcmd->block = cpu_to_le32(lba);
726 readcmd->pad = cpu_to_le16(0);
727 readcmd->flags = cpu_to_le16(0);
729 aac_build_sg64(scsicmd, &readcmd->sg);
730 if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
731 BUG();
732 fibsize = sizeof(struct aac_read64) + ((readcmd->sg.count - 1) * sizeof (struct sgentry64));
733 /*
734 * Now send the Fib to the adapter
735 */
736 status = fib_send(ContainerCommand64,
737 cmd_fibcontext,
738 fibsize,
739 FsaNormal,
740 0, 1,
741 (fib_callback) read_callback,
742 (void *) scsicmd);
743 } else {
744 struct aac_read *readcmd;
745 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
746 readcmd->command = cpu_to_le32(VM_CtBlockRead);
747 readcmd->cid = cpu_to_le32(cid);
748 readcmd->block = cpu_to_le32(lba);
749 readcmd->count = cpu_to_le32(count * 512);
751 if (count * 512 > (64 * 1024))
752 BUG();
754 aac_build_sg(scsicmd, &readcmd->sg);
755 if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
756 BUG();
757 fibsize = sizeof(struct aac_read) + ((readcmd->sg.count - 1) * sizeof (struct sgentry));
758 /*
759 * Now send the Fib to the adapter
760 */
761 status = fib_send(ContainerCommand,
762 cmd_fibcontext,
763 fibsize,
764 FsaNormal,
765 0, 1,
766 (fib_callback) read_callback,
767 (void *) scsicmd);
768 }
771 /*
772 * Check that the command queued to the controller
773 */
774 if (status == -EINPROGRESS)
775 return 0;
777 printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
778 /*
779 * For some reason, the Fib didn't queue, return QUEUE_FULL
780 */
781 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
782 aac_io_done(scsicmd);
783 fib_complete(cmd_fibcontext);
784 fib_free(cmd_fibcontext);
785 return -1;
786 }
788 static int aac_write(Scsi_Cmnd * scsicmd, int cid)
789 {
790 u32 lba;
791 u32 count;
792 int status;
793 u16 fibsize;
794 struct aac_dev *dev;
795 struct fib * cmd_fibcontext;
797 dev = (struct aac_dev *)scsicmd->host->hostdata;
798 /*
799 * Get block address and transfer length
800 */
801 if (scsicmd->cmnd[0] == SS_WRITE) /* 6 byte command */
802 {
803 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
804 count = scsicmd->cmnd[4];
805 if (count == 0)
806 count = 256;
807 } else {
808 dprintk((KERN_DEBUG "aachba: received a write(10) command on target %d.\n", cid));
809 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
810 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
811 }
812 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
813 /*
814 * Allocate and initialize a Fib then setup a BlockWrite command
815 */
816 if (!(cmd_fibcontext = fib_alloc(dev))) {
817 scsicmd->result = DID_ERROR << 16;
818 aac_io_done(scsicmd);
819 return -1;
820 }
821 fib_init(cmd_fibcontext);
823 if(dev->pae_support == 1)
824 {
825 struct aac_write64 *writecmd;
826 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
827 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
828 writecmd->cid = cpu_to_le16(cid);
829 writecmd->sector_count = cpu_to_le16(count);
830 writecmd->block = cpu_to_le32(lba);
831 writecmd->pad = cpu_to_le16(0);
832 writecmd->flags = cpu_to_le16(0);
834 aac_build_sg64(scsicmd, &writecmd->sg);
835 if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
836 BUG();
837 fibsize = sizeof(struct aac_write64) + ((writecmd->sg.count - 1) * sizeof (struct sgentry64));
838 /*
839 * Now send the Fib to the adapter
840 */
841 status = fib_send(ContainerCommand64,
842 cmd_fibcontext,
843 fibsize,
844 FsaNormal,
845 0, 1,
846 (fib_callback) write_callback,
847 (void *) scsicmd);
848 }
849 else
850 {
851 struct aac_write *writecmd;
852 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
853 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
854 writecmd->cid = cpu_to_le32(cid);
855 writecmd->block = cpu_to_le32(lba);
856 writecmd->count = cpu_to_le32(count * 512);
857 writecmd->sg.count = cpu_to_le32(1);
858 /* ->stable is not used - it did mean which type of write */
860 if (count * 512 > (64 * 1024))
861 BUG();
862 aac_build_sg(scsicmd, &writecmd->sg);
863 if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
864 BUG();
865 fibsize = sizeof(struct aac_write) + ((writecmd->sg.count - 1) * sizeof (struct sgentry));
866 /*
867 * Now send the Fib to the adapter
868 */
869 status = fib_send(ContainerCommand,
870 cmd_fibcontext,
871 fibsize,
872 FsaNormal,
873 0, 1,
874 (fib_callback) write_callback,
875 (void *) scsicmd);
876 }
878 /*
879 * Check that the command queued to the controller
880 */
881 if (status == -EINPROGRESS)
882 return 0;
884 printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status);
885 /*
886 * For some reason, the Fib didn't queue, return QUEUE_FULL
887 */
888 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
889 aac_io_done(scsicmd);
891 fib_complete(cmd_fibcontext);
892 fib_free(cmd_fibcontext);
893 return -1;
894 }
897 /**
898 * aac_scsi_cmd() - Process SCSI command
899 * @scsicmd: SCSI command block
900 * @wait: 1 if the user wants to await completion
901 *
902 * Emulate a SCSI command and queue the required request for the
903 * aacraid firmware.
904 */
906 int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
907 {
908 u32 cid = 0;
909 struct fsa_scsi_hba *fsa_dev_ptr;
910 int cardtype;
911 int ret;
912 struct aac_dev *dev = (struct aac_dev *)scsicmd->host->hostdata;
914 cardtype = dev->cardtype;
916 fsa_dev_ptr = fsa_dev[scsicmd->host->unique_id];
918 /*
919 * If the bus, target or lun is out of range, return fail
920 * Test does not apply to ID 16, the pseudo id for the controller
921 * itself.
922 */
923 if (scsicmd->target != scsicmd->host->this_id) {
924 if ((scsicmd->channel == 0) ){
925 if( (scsicmd->target >= AAC_MAX_TARGET) || (scsicmd->lun != 0)){
926 scsicmd->result = DID_NO_CONNECT << 16;
927 __aac_io_done(scsicmd);
928 return 0;
929 }
930 cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
932 /*
933 * If the target container doesn't exist, it may have
934 * been newly created
935 */
936 if (fsa_dev_ptr->valid[cid] == 0) {
937 switch (scsicmd->cmnd[0]) {
938 case SS_INQUIR:
939 case SS_RDCAP:
940 case SS_TEST:
941 spin_unlock_irq(&io_request_lock);
942 probe_container(dev, cid);
943 spin_lock_irq(&io_request_lock);
944 if (fsa_dev_ptr->valid[cid] == 0) {
945 scsicmd->result = DID_NO_CONNECT << 16;
946 __aac_io_done(scsicmd);
947 return 0;
948 }
949 default:
950 break;
951 }
952 }
953 /*
954 * If the target container still doesn't exist,
955 * return failure
956 */
957 if (fsa_dev_ptr->valid[cid] == 0) {
958 scsicmd->result = DID_BAD_TARGET << 16;
959 __aac_io_done(scsicmd);
960 return -1;
961 }
962 } else { /* check for physical non-dasd devices */
963 if(dev->nondasd_support == 1){
964 return aac_send_srb_fib(scsicmd);
965 } else {
966 scsicmd->result = DID_NO_CONNECT << 16;
967 __aac_io_done(scsicmd);
968 return 0;
969 }
970 }
971 }
972 /*
973 * else Command for the controller itself
974 */
975 else if ((scsicmd->cmnd[0] != SS_INQUIR) && /* only INQUIRY & TUR cmnd supported for controller */
976 (scsicmd->cmnd[0] != SS_TEST))
977 {
978 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
979 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
980 set_sense((u8 *) &sense_data[cid],
981 SENKEY_ILLEGAL,
982 SENCODE_INVALID_COMMAND,
983 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
984 __aac_io_done(scsicmd);
985 return -1;
986 }
989 /* Handle commands here that don't really require going out to the adapter */
990 switch (scsicmd->cmnd[0]) {
991 case SS_INQUIR:
992 {
993 struct inquiry_data *inq_data_ptr;
995 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->target));
996 inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
997 memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
999 inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */
1000 inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
1001 inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
1002 inq_data_ptr->inqd_len = 31;
1003 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
1004 inq_data_ptr->inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
1005 /*
1006 * Set the Vendor, Product, and Revision Level
1007 * see: <vendor>.c i.e. aac.c
1008 */
1009 setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
1010 if (scsicmd->target == scsicmd->host->this_id)
1011 inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */
1012 else
1013 inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
1014 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
1015 __aac_io_done(scsicmd);
1016 return 0;
1018 case SS_RDCAP:
1020 int capacity;
1021 char *cp;
1023 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
1024 capacity = fsa_dev_ptr->size[cid] - 1;
1025 cp = scsicmd->request_buffer;
1026 cp[0] = (capacity >> 24) & 0xff;
1027 cp[1] = (capacity >> 16) & 0xff;
1028 cp[2] = (capacity >> 8) & 0xff;
1029 cp[3] = (capacity >> 0) & 0xff;
1030 cp[4] = 0;
1031 cp[5] = 0;
1032 cp[6] = 2;
1033 cp[7] = 0;
1035 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
1036 __aac_io_done(scsicmd);
1038 return 0;
1041 case SS_MODESEN:
1043 char *mode_buf;
1045 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
1046 mode_buf = scsicmd->request_buffer;
1047 mode_buf[0] = 0; /* Mode data length (MSB) */
1048 mode_buf[1] = 6; /* Mode data length (LSB) */
1049 mode_buf[2] = 0; /* Medium type - default */
1050 mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1051 mode_buf[4] = 0; /* reserved */
1052 mode_buf[5] = 0; /* reserved */
1053 mode_buf[6] = 0; /* Block descriptor length (MSB) */
1054 mode_buf[7] = 0; /* Block descriptor length (LSB) */
1056 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
1057 __aac_io_done(scsicmd);
1059 return 0;
1061 case SS_REQSEN:
1062 dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
1063 memcpy(scsicmd->sense_buffer, &sense_data[cid], sizeof (struct sense_data));
1064 memset(&sense_data[cid], 0, sizeof (struct sense_data));
1065 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
1066 __aac_io_done(scsicmd);
1067 return (0);
1069 case SS_LOCK:
1070 dprintk((KERN_DEBUG "LOCK command.\n"));
1071 if (scsicmd->cmnd[4])
1072 fsa_dev_ptr->locked[cid] = 1;
1073 else
1074 fsa_dev_ptr->locked[cid] = 0;
1076 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
1077 __aac_io_done(scsicmd);
1078 return 0;
1079 /*
1080 * These commands are all No-Ops
1081 */
1082 case SS_TEST:
1083 case SS_RESERV:
1084 case SS_RELES:
1085 case SS_REZERO:
1086 case SS_REASGN:
1087 case SS_SEEK:
1088 case SS_ST_SP:
1089 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
1090 __aac_io_done(scsicmd);
1091 return (0);
1094 switch (scsicmd->cmnd[0])
1096 case SS_READ:
1097 case SM_READ:
1098 /*
1099 * Hack to keep track of ordinal number of the device that
1100 * corresponds to a container. Needed to convert
1101 * containers to /dev/sd device names
1102 */
1104 spin_unlock_irq(&io_request_lock);
1105 fsa_dev_ptr->devno[cid] = DEVICE_NR(scsicmd->request.rq_dev);
1106 ret = aac_read(scsicmd, cid);
1107 spin_lock_irq(&io_request_lock);
1108 return ret;
1110 case SS_WRITE:
1111 case SM_WRITE:
1112 spin_unlock_irq(&io_request_lock);
1113 ret = aac_write(scsicmd, cid);
1114 spin_lock_irq(&io_request_lock);
1115 return ret;
1116 default:
1117 /*
1118 * Unhandled commands
1119 */
1120 printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]);
1121 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
1122 set_sense((u8 *) &sense_data[cid],
1123 SENKEY_ILLEGAL, SENCODE_INVALID_COMMAND,
1124 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1125 __aac_io_done(scsicmd);
1126 return -1;
1130 static int query_disk(struct aac_dev *dev, void *arg)
1132 struct aac_query_disk qd;
1133 struct fsa_scsi_hba *fsa_dev_ptr;
1135 fsa_dev_ptr = &(dev->fsa_dev);
1136 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
1137 return -EFAULT;
1138 if (qd.cnum == -1)
1139 qd.cnum = TARGET_LUN_TO_CONTAINER(qd.target, qd.lun);
1140 else if ((qd.bus == -1) && (qd.target == -1) && (qd.lun == -1))
1142 if (qd.cnum < 0 || qd.cnum > MAXIMUM_NUM_CONTAINERS)
1143 return -EINVAL;
1144 qd.instance = dev->scsi_host_ptr->host_no;
1145 qd.bus = 0;
1146 qd.target = CONTAINER_TO_TARGET(qd.cnum);
1147 qd.lun = CONTAINER_TO_LUN(qd.cnum);
1149 else return -EINVAL;
1151 qd.valid = fsa_dev_ptr->valid[qd.cnum];
1152 qd.locked = fsa_dev_ptr->locked[qd.cnum];
1153 qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
1155 if (fsa_dev_ptr->devno[qd.cnum] == -1)
1156 qd.unmapped = 1;
1157 else
1158 qd.unmapped = 0;
1160 get_sd_devname(fsa_dev_ptr->devno[qd.cnum], qd.name);
1162 if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
1163 return -EFAULT;
1164 return 0;
1167 static void get_sd_devname(int disknum, char *buffer)
1169 if (disknum < 0) {
1170 sprintf(buffer, "%s", "");
1171 return;
1174 if (disknum < 26)
1175 sprintf(buffer, "sd%c", 'a' + disknum);
1176 else {
1177 unsigned int min1;
1178 unsigned int min2;
1179 /*
1180 * For larger numbers of disks, we need to go to a new
1181 * naming scheme.
1182 */
1183 min1 = disknum / 26;
1184 min2 = disknum % 26;
1185 sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
1189 static int force_delete_disk(struct aac_dev *dev, void *arg)
1191 struct aac_delete_disk dd;
1192 struct fsa_scsi_hba *fsa_dev_ptr;
1194 fsa_dev_ptr = &(dev->fsa_dev);
1196 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1197 return -EFAULT;
1199 if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
1200 return -EINVAL;
1201 /*
1202 * Mark this container as being deleted.
1203 */
1204 fsa_dev_ptr->deleted[dd.cnum] = 1;
1205 /*
1206 * Mark the container as no longer valid
1207 */
1208 fsa_dev_ptr->valid[dd.cnum] = 0;
1209 return 0;
1212 static int delete_disk(struct aac_dev *dev, void *arg)
1214 struct aac_delete_disk dd;
1215 struct fsa_scsi_hba *fsa_dev_ptr;
1217 fsa_dev_ptr = &(dev->fsa_dev);
1219 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1220 return -EFAULT;
1222 if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
1223 return -EINVAL;
1224 /*
1225 * If the container is locked, it can not be deleted by the API.
1226 */
1227 if (fsa_dev_ptr->locked[dd.cnum])
1228 return -EBUSY;
1229 else {
1230 /*
1231 * Mark the container as no longer being valid.
1232 */
1233 fsa_dev_ptr->valid[dd.cnum] = 0;
1234 fsa_dev_ptr->devno[dd.cnum] = -1;
1235 return 0;
1239 int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg)
1241 switch (cmd) {
1242 case FSACTL_QUERY_DISK:
1243 return query_disk(dev, arg);
1244 case FSACTL_DELETE_DISK:
1245 return delete_disk(dev, arg);
1246 case FSACTL_FORCE_DELETE_DISK:
1247 return force_delete_disk(dev, arg);
1248 case 2131:
1249 return aac_get_containers(dev);
1250 default:
1251 return -ENOTTY;
1255 /**
1257 * aac_srb_callback
1258 * @context: the context set in the fib - here it is scsi cmd
1259 * @fibptr: pointer to the fib
1261 * Handles the completion of a scsi command to a non dasd device
1263 */
1265 static void aac_srb_callback(void *context, struct fib * fibptr)
1267 struct aac_dev *dev;
1268 struct aac_srb_reply *srbreply;
1269 Scsi_Cmnd *scsicmd;
1271 scsicmd = (Scsi_Cmnd *) context;
1272 dev = (struct aac_dev *)scsicmd->host->hostdata;
1274 if (fibptr == NULL)
1275 BUG();
1277 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
1279 scsicmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
1280 // calculate resid for sg
1281 scsicmd->resid = scsicmd->request_bufflen - srbreply->data_xfer_length;
1283 if(scsicmd->use_sg)
1284 pci_unmap_sg(dev->pdev,
1285 (struct scatterlist *)scsicmd->buffer,
1286 scsicmd->use_sg,
1287 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1288 else if(scsicmd->request_bufflen)
1289 pci_unmap_single(dev->pdev, (ulong)scsicmd->SCp.ptr, scsicmd->request_bufflen,
1290 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1292 /*
1293 * First check the fib status
1294 */
1296 if (le32_to_cpu(srbreply->status) != ST_OK){
1297 int len;
1298 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
1299 len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
1300 sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
1301 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
1302 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1305 /*
1306 * Next check the srb status
1307 */
1308 switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
1309 case SRB_STATUS_ERROR_RECOVERY:
1310 case SRB_STATUS_PENDING:
1311 case SRB_STATUS_SUCCESS:
1312 if(scsicmd->cmnd[0] == INQUIRY ){
1313 u8 b;
1314 u8 b1;
1315 /* We can't expose disk devices because we can't tell whether they
1316 * are the raw container drives or stand alone drives. If they have
1317 * the removable bit set then we should expose them though.
1318 */
1319 b = (*(u8*)scsicmd->buffer)&0x1f;
1320 b1 = ((u8*)scsicmd->buffer)[1];
1321 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1322 || (b==TYPE_DISK && (b1&0x80)) ){
1323 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1324 /*
1325 * We will allow disk devices if in RAID/SCSI mode and
1326 * the channel is 2
1327 */
1328 } else if((dev->raid_scsi_mode)&&(scsicmd->channel == 2)){
1329 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1330 } else {
1331 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1333 } else {
1334 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1336 break;
1337 case SRB_STATUS_DATA_OVERRUN:
1338 switch(scsicmd->cmnd[0]){
1339 case READ_6:
1340 case WRITE_6:
1341 case READ_10:
1342 case WRITE_10:
1343 case READ_12:
1344 case WRITE_12:
1345 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
1346 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
1347 } else {
1348 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
1350 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1351 break;
1352 case INQUIRY: {
1353 u8 b;
1354 u8 b1;
1355 /* We can't expose disk devices because we can't tell whether they
1356 * are the raw container drives or stand alone drives
1357 */
1358 b = (*(u8*)scsicmd->buffer)&0x0f;
1359 b1 = ((u8*)scsicmd->buffer)[1];
1360 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1361 || (b==TYPE_DISK && (b1&0x80)) ){
1362 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1363 /*
1364 * We will allow disk devices if in RAID/SCSI mode and
1365 * the channel is 2
1366 */
1367 } else if((dev->raid_scsi_mode)&&(scsicmd->channel == 2)){
1368 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1369 } else {
1370 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1372 break;
1374 default:
1375 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1376 break;
1378 break;
1379 case SRB_STATUS_ABORTED:
1380 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
1381 break;
1382 case SRB_STATUS_ABORT_FAILED:
1383 // Not sure about this one - but assuming the hba was trying to abort for some reason
1384 scsicmd->result = DID_ERROR << 16 | ABORT << 8;
1385 break;
1386 case SRB_STATUS_PARITY_ERROR:
1387 scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
1388 break;
1389 case SRB_STATUS_NO_DEVICE:
1390 case SRB_STATUS_INVALID_PATH_ID:
1391 case SRB_STATUS_INVALID_TARGET_ID:
1392 case SRB_STATUS_INVALID_LUN:
1393 case SRB_STATUS_SELECTION_TIMEOUT:
1394 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1395 break;
1397 case SRB_STATUS_COMMAND_TIMEOUT:
1398 case SRB_STATUS_TIMEOUT:
1399 scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
1400 break;
1402 case SRB_STATUS_BUSY:
1403 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1404 break;
1406 case SRB_STATUS_BUS_RESET:
1407 scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
1408 break;
1410 case SRB_STATUS_MESSAGE_REJECTED:
1411 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
1412 break;
1413 case SRB_STATUS_REQUEST_FLUSHED:
1414 case SRB_STATUS_ERROR:
1415 case SRB_STATUS_INVALID_REQUEST:
1416 case SRB_STATUS_REQUEST_SENSE_FAILED:
1417 case SRB_STATUS_NO_HBA:
1418 case SRB_STATUS_UNEXPECTED_BUS_FREE:
1419 case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
1420 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
1421 case SRB_STATUS_DELAYED_RETRY:
1422 case SRB_STATUS_BAD_FUNCTION:
1423 case SRB_STATUS_NOT_STARTED:
1424 case SRB_STATUS_NOT_IN_USE:
1425 case SRB_STATUS_FORCE_ABORT:
1426 case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
1427 default:
1428 #ifdef AAC_DETAILED_STATUS_INFO
1429 printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",le32_to_cpu(srbreply->srb_status&0x3f),aac_get_status_string(le32_to_cpu(srbreply->srb_status)), scsicmd->cmnd[0], le32_to_cpu(srbreply->scsi_status) );
1430 #endif
1431 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1432 break;
1434 if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition
1435 int len;
1436 scsicmd->result |= CHECK_CONDITION;
1437 len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
1438 sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
1439 printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", le32_to_cpu(srbreply->status), len);
1440 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1442 /*
1443 * OR in the scsi status (already shifted up a bit)
1444 */
1445 scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
1447 fib_complete(fibptr);
1448 fib_free(fibptr);
1449 aac_io_done(scsicmd);
1452 /**
1454 * aac_send_scb_fib
1455 * @scsicmd: the scsi command block
1457 * This routine will form a FIB and fill in the aac_srb from the
1458 * scsicmd passed in.
1459 */
1461 static int aac_send_srb_fib(Scsi_Cmnd* scsicmd)
1463 struct fib* cmd_fibcontext;
1464 struct aac_dev* dev;
1465 int status;
1466 struct aac_srb *srbcmd;
1467 u16 fibsize;
1468 u32 flag;
1469 u32 timeout;
1471 if( scsicmd->target > 15 || scsicmd->lun > 7) {
1472 scsicmd->result = DID_NO_CONNECT << 16;
1473 __aac_io_done(scsicmd);
1474 return 0;
1477 dev = (struct aac_dev *)scsicmd->host->hostdata;
1478 switch(scsicmd->sc_data_direction){
1479 case SCSI_DATA_WRITE:
1480 flag = SRB_DataOut;
1481 break;
1482 case SCSI_DATA_UNKNOWN:
1483 flag = SRB_DataIn | SRB_DataOut;
1484 break;
1485 case SCSI_DATA_READ:
1486 flag = SRB_DataIn;
1487 break;
1488 case SCSI_DATA_NONE:
1489 default:
1490 flag = SRB_NoDataXfer;
1491 break;
1495 /*
1496 * Allocate and initialize a Fib then setup a BlockWrite command
1497 */
1498 if (!(cmd_fibcontext = fib_alloc(dev))) {
1499 scsicmd->result = DID_ERROR << 16;
1500 __aac_io_done(scsicmd);
1501 return -1;
1503 fib_init(cmd_fibcontext);
1505 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
1506 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1507 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->channel));
1508 srbcmd->target = cpu_to_le32(scsicmd->target);
1509 srbcmd->lun = cpu_to_le32(scsicmd->lun);
1510 srbcmd->flags = cpu_to_le32(flag);
1511 timeout = (scsicmd->timeout-jiffies)/HZ;
1512 if(timeout == 0){
1513 timeout = 1;
1515 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
1516 srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
1517 srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
1519 if( dev->pae_support ==1 ) {
1520 aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
1521 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
1523 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1524 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
1525 /*
1526 * Build Scatter/Gather list
1527 */
1528 fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry64));
1530 /*
1531 * Now send the Fib to the adapter
1532 */
1533 status = fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
1534 (fib_callback) aac_srb_callback, (void *) scsicmd);
1535 } else {
1536 aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
1537 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
1539 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1540 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
1541 /*
1542 * Build Scatter/Gather list
1543 */
1544 fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
1546 /*
1547 * Now send the Fib to the adapter
1548 */
1549 status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
1550 (fib_callback) aac_srb_callback, (void *) scsicmd);
1552 /*
1553 * Check that the command queued to the controller
1554 */
1555 if (status == -EINPROGRESS){
1556 return 0;
1559 printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
1560 /*
1561 * For some reason, the Fib didn't queue, return QUEUE_FULL
1562 */
1563 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
1564 __aac_io_done(scsicmd);
1566 fib_complete(cmd_fibcontext);
1567 fib_free(cmd_fibcontext);
1569 return -1;
1572 static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* psg)
1574 struct aac_dev *dev;
1575 unsigned long byte_count = 0;
1577 dev = (struct aac_dev *)scsicmd->host->hostdata;
1578 // Get rid of old data
1579 psg->count = cpu_to_le32(0);
1580 psg->sg[0].addr = cpu_to_le32(NULL);
1581 psg->sg[0].count = cpu_to_le32(0);
1582 if (scsicmd->use_sg) {
1583 struct scatterlist *sg;
1584 int i;
1585 int sg_count;
1586 sg = (struct scatterlist *) scsicmd->request_buffer;
1588 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
1589 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1590 psg->count = cpu_to_le32(sg_count);
1592 byte_count = 0;
1594 for (i = 0; i < sg_count; i++) {
1595 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
1596 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
1597 byte_count += sg_dma_len(sg);
1598 sg++;
1600 /* hba wants the size to be exact */
1601 if(byte_count > scsicmd->request_bufflen){
1602 psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
1603 byte_count = scsicmd->request_bufflen;
1605 /* Check for command underflow */
1606 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
1607 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
1608 byte_count, scsicmd->underflow);
1611 else if(scsicmd->request_bufflen) {
1612 dma_addr_t addr;
1613 addr = pci_map_single(dev->pdev,
1614 scsicmd->request_buffer,
1615 scsicmd->request_bufflen,
1616 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1617 psg->count = cpu_to_le32(1);
1618 psg->sg[0].addr = cpu_to_le32(addr);
1619 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
1620 /* Cast to pointer from integer of different size */
1621 scsicmd->SCp.ptr = (void *)addr;
1622 byte_count = scsicmd->request_bufflen;
1624 return byte_count;
1628 static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg)
1630 struct aac_dev *dev;
1631 unsigned long byte_count = 0;
1632 u64 le_addr;
1634 dev = (struct aac_dev *)scsicmd->host->hostdata;
1635 // Get rid of old data
1636 psg->count = cpu_to_le32(0);
1637 psg->sg[0].addr[0] = cpu_to_le32(NULL);
1638 psg->sg[0].addr[1] = cpu_to_le32(NULL);
1639 psg->sg[0].count = cpu_to_le32(0);
1640 if (scsicmd->use_sg) {
1641 struct scatterlist *sg;
1642 int i;
1643 int sg_count;
1644 sg = (struct scatterlist *) scsicmd->request_buffer;
1646 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
1647 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1648 psg->count = cpu_to_le32(sg_count);
1650 byte_count = 0;
1652 for (i = 0; i < sg_count; i++) {
1653 le_addr = cpu_to_le64(sg_dma_address(sg));
1654 psg->sg[i].addr[1] = (u32)(le_addr>>32);
1655 psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
1656 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
1657 byte_count += sg_dma_len(sg);
1658 sg++;
1660 /* hba wants the size to be exact */
1661 if(byte_count > scsicmd->request_bufflen){
1662 psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
1663 byte_count = scsicmd->request_bufflen;
1665 /* Check for command underflow */
1666 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
1667 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
1668 byte_count, scsicmd->underflow);
1671 else if(scsicmd->request_bufflen) {
1672 dma_addr_t addr;
1673 addr = pci_map_single(dev->pdev,
1674 scsicmd->request_buffer,
1675 scsicmd->request_bufflen,
1676 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
1677 psg->count = cpu_to_le32(1);
1678 le_addr = cpu_to_le64(addr);
1679 psg->sg[0].addr[1] = (u32)(le_addr>>32);
1680 psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
1681 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
1682 /* Cast to pointer from integer of different size */
1683 scsicmd->SCp.ptr = (void *)addr;
1684 byte_count = scsicmd->request_bufflen;
1686 return byte_count;
1689 #ifdef AAC_DETAILED_STATUS_INFO
1691 struct aac_srb_status_info {
1692 u32 status;
1693 char *str;
1694 };
1697 static struct aac_srb_status_info srb_status_info[] = {
1698 { SRB_STATUS_PENDING, "Pending Status"},
1699 { SRB_STATUS_SUCCESS, "Success"},
1700 { SRB_STATUS_ABORTED, "Aborted Command"},
1701 { SRB_STATUS_ABORT_FAILED, "Abort Failed"},
1702 { SRB_STATUS_ERROR, "Error Event"},
1703 { SRB_STATUS_BUSY, "Device Busy"},
1704 { SRB_STATUS_INVALID_REQUEST, "Invalid Request"},
1705 { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"},
1706 { SRB_STATUS_NO_DEVICE, "No Device"},
1707 { SRB_STATUS_TIMEOUT, "Timeout"},
1708 { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
1709 { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"},
1710 { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"},
1711 { SRB_STATUS_BUS_RESET, "Bus Reset"},
1712 { SRB_STATUS_PARITY_ERROR, "Parity Error"},
1713 { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
1714 { SRB_STATUS_NO_HBA, "No HBA"},
1715 { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"},
1716 { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
1717 { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
1718 { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
1719 { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"},
1720 { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"},
1721 { SRB_STATUS_INVALID_LUN, "Invalid LUN"},
1722 { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
1723 { SRB_STATUS_BAD_FUNCTION, "Bad Function"},
1724 { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
1725 { SRB_STATUS_NOT_STARTED, "Not Started"},
1726 { SRB_STATUS_NOT_IN_USE, "Not In Use"},
1727 { SRB_STATUS_FORCE_ABORT, "Force Abort"},
1728 { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
1729 { 0xff, "Unknown Error"}
1730 };
1732 char *aac_get_status_string(u32 status)
1734 int i;
1736 for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){
1737 if(srb_status_info[i].status == status){
1738 return srb_status_info[i].str;
1742 return "Bad Status Code";
1745 #endif