ia64/xen-unstable

view xen/drivers/block/cciss.c @ 1114:6f46495a58dd

bitkeeper revision 1.740.1.1 (403b2d9cPtJ1eKIhZY0pelA-D7cKLA)

cciss.c:
Fix CCISS driver when no controller present.
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 24 10:55:24 2004 +0000 (2004-02-24)
parents 3ba8d5faadd2
children bc02f2f73b0e
line source
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2002 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to Cciss-discuss@lists.sourceforge.net
20 *
21 */
23 #include <xeno/config.h> /* CONFIG_PROC_FS */
24 #include <xeno/module.h>
25 #include <xeno/version.h>
26 #include <xeno/types.h>
27 #include <xeno/pci.h>
28 #include <xeno/kernel.h>
29 #include <xeno/slab.h>
30 #include <xeno/delay.h>
31 #include <xeno/major.h>
32 #include <xeno/blkpg.h>
33 #include <xeno/interrupt.h>
34 #include <xeno/timer.h>
35 #include <xeno/init.h>
36 #include <xeno/hdreg.h>
37 #include <xeno/spinlock.h>
38 #include <asm/uaccess.h>
39 #include <asm/io.h>
41 #include <xeno/blk.h>
42 #include <xeno/blkdev.h>
43 #include <xeno/genhd.h>
45 #include <asm/irq.h>
46 #include <asm/byteorder.h>
48 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
49 #define DRIVER_NAME "HP CISS Driver (v 2.4.50)"
50 #define DRIVER_VERSION CCISS_DRIVER_VERSION(2,4,50)
52 /* Embedded module documentation macros - see modules.h */
53 MODULE_AUTHOR("Hewlett-Packard Company");
54 MODULE_DESCRIPTION("Driver for HP SA5xxx SA6xxx Controllers version 2.4.50");
55 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400 6i");
56 MODULE_LICENSE("GPL");
58 #include "cciss_cmd.h"
59 #include "cciss.h"
61 /* define the PCI info for the cards we can control */
62 const struct pci_device_id cciss_pci_device_id[] = {
63 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
64 0x0E11, 0x4070, 0, 0, 0},
65 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
66 0x0E11, 0x4080, 0, 0, 0},
67 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
68 0x0E11, 0x4082, 0, 0, 0},
69 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
70 0x0E11, 0x4083, 0, 0, 0},
71 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
72 0x0E11, 0x409A, 0, 0, 0},
73 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
74 0x0E11, 0x409B, 0, 0, 0},
75 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
76 0x0E11, 0x409C, 0, 0, 0},
77 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
78 0x0E11, 0x409D, 0, 0, 0},
79 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
80 0x0E11, 0x4091, 0, 0, 0},
81 {0,}
82 };
83 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
85 #define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
87 /* board_id = Subsystem Device ID & Vendor ID
88 * product = Marketing Name for the board
89 * access = Address of the struct of function pointers
90 */
91 static struct board_type products[] = {
92 { 0x40700E11, "Smart Array 5300", &SA5_access},
93 { 0x40800E11, "Smart Array 5i", &SA5B_access},
94 { 0x40820E11, "Smart Array 532", &SA5B_access},
95 { 0x40830E11, "Smart Array 5312", &SA5B_access},
96 { 0x409A0E11, "Smart Array 641", &SA5_access},
97 { 0x409B0E11, "Smart Array 642", &SA5_access},
98 { 0x409C0E11, "Smart Array 6400", &SA5_access},
99 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
100 { 0x40910E11, "Smart Array 6i", &SA5_access},
101 };
103 /* How long to wait (in millesconds) for board to go into simple mode */
104 #define MAX_CONFIG_WAIT 30000
105 #define MAX_IOCTL_CONFIG_WAIT 1000
107 /*define how many times we will try a command because of bus resets */
108 #define MAX_CMD_RETRIES 3
110 #define READ_AHEAD 128
111 #define NR_CMDS 128 /* #commands that can be outstanding */
112 #define MAX_CTLR 32
114 /* No sense in giving up our preallocated major numbers */
115 #if MAX_CTLR < 8
116 #error"cciss.c: MAX_CTLR must be 8 or greater"
117 #endif
119 /* Originally cciss driver only supports 8 major number */
120 #define MAX_CTLR_ORIG COMPAQ_CISS_MAJOR7 - COMPAQ_CISS_MAJOR + 1
122 #define CCISS_DMA_MASK 0xFFFFFFFFFFFFFFFF /* 64 bit DMA */
124 #ifdef CONFIG_CISS_MONITOR_THREAD
125 static int cciss_monitor(void *ctlr);
126 static int start_monitor_thread(ctlr_info_t *h, unsigned char *cmd,
127 unsigned long count, int (*cciss_monitor)(void *), int *rc);
128 static u32 heartbeat_timer = 0;
129 #else
130 #define cciss_monitor(x)
131 #define kill_monitor_thead(x)
132 #endif
134 static ctlr_info_t *hba[MAX_CTLR];
135 static int map_major_to_ctlr[MAX_BLKDEV] = {0}; /* gets ctlr num from maj num */
137 static void do_cciss_request(request_queue_t *q);
138 static int cciss_open(struct inode *inode, struct file *filep);
139 static int cciss_release(struct inode *inode, struct file *filep);
140 static int cciss_ioctl(struct inode *inode, struct file *filep,
141 unsigned int cmd, unsigned long arg);
143 static int revalidate_logvol(kdev_t dev, int maxusage);
144 static int frevalidate_logvol(kdev_t dev);
145 #if 0
146 static int deregister_disk(int ctlr, int logvol);
147 static int register_new_disk(int cltr, int opened_vol, __u64 requested_lun);
148 static int cciss_rescan_disk(int cltr, int logvol);
149 #endif
151 static void cciss_getgeometry(int cntl_num);
153 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c);
154 static void start_io( ctlr_info_t *h);
157 #ifdef CONFIG_PROC_FS
158 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
159 int length, int *eof, void *data);
160 static void cciss_procinit(int i);
162 XXX
163 #else
164 /*static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
165 int length, int *eof, void *data) { return 0;}
166 */
167 static void cciss_procinit(int i) {}
169 #endif /* CONFIG_PROC_FS */
172 static struct block_device_operations cciss_fops = {
173 open: cciss_open,
174 release: cciss_release,
175 ioctl: cciss_ioctl,
176 revalidate: frevalidate_logvol,
177 };
179 #include "cciss_scsi.c" /* For SCSI tape support */
181 #define ENG_GIG 1048576000
182 #define ENG_GIG_FACTOR (ENG_GIG/512)
183 #define RAID_UNKNOWN 6
184 static const char *raid_label[] = {"0","4","1(0+1)","5","5+1","ADG",
185 "UNKNOWN"};
186 /*
187 * Report information about this controller.
188 */
189 #ifdef CONFIG_PROC_FS
190 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
191 int length, int *eof, void *data)
192 {
193 off_t pos = 0;
194 off_t len = 0;
195 int size, i, ctlr;
196 ctlr_info_t *h = (ctlr_info_t*)data;
197 drive_info_struct *drv;
198 unsigned long flags;
199 unsigned int vol_sz, vol_sz_frac;
201 spin_lock_irqsave(&io_request_lock, flags);
202 if (h->busy_configuring) {
203 spin_unlock_irqrestore(&io_request_lock, flags);
204 return -EBUSY;
205 }
206 h->busy_configuring = 1;
207 spin_unlock_irqrestore(&io_request_lock, flags);
209 ctlr = h->ctlr;
210 size = sprintf(buffer, "%s: HP %s Controller\n"
211 "Board ID: 0x%08lx\n"
212 "Firmware Version: %c%c%c%c\n"
213 "IRQ: %d\n"
214 "Logical drives: %d\n"
215 "Current Q depth: %d\n"
216 "Current # commands on controller: %d\n"
217 "Max Q depth since init: %d\n"
218 "Max # commands on controller since init: %d\n"
219 "Max SG entries since init: %d\n"
220 MONITOR_PERIOD_PATTERN
221 MONITOR_DEADLINE_PATTERN
222 MONITOR_STATUS_PATTERN
223 "\n",
224 h->devname,
225 h->product_name,
226 (unsigned long)h->board_id,
227 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
228 (unsigned int)h->intr,
229 h->num_luns,
230 h->Qdepth, h->commands_outstanding,
231 h->maxQsinceinit, h->max_outstanding, h->maxSG,
232 MONITOR_PERIOD_VALUE(h),
233 MONITOR_DEADLINE_VALUE(h),
234 CTLR_STATUS(h));
236 pos += size; len += size;
237 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
238 for(i=0; i<=h->highest_lun; i++) {
239 drv = &h->drv[i];
240 if (drv->nr_blocks == 0)
241 continue;
242 vol_sz = drv->nr_blocks/ENG_GIG_FACTOR;
243 vol_sz_frac = (drv->nr_blocks%ENG_GIG_FACTOR)*100/ENG_GIG_FACTOR;
245 if (drv->raid_level > 5)
246 drv->raid_level = RAID_UNKNOWN;
247 size = sprintf(buffer+len, "cciss/c%dd%d:"
248 "\t%4d.%02dGB\tRAID %s\n",
249 ctlr, i, vol_sz,vol_sz_frac,
250 raid_label[drv->raid_level]);
251 pos += size, len += size;
252 }
254 *eof = 1;
255 *start = buffer+offset;
256 len -= offset;
257 if (len>length)
258 len = length;
259 h->busy_configuring = 0;
260 return len;
261 }
263 static int
264 cciss_proc_write(struct file *file, const char *buffer,
265 unsigned long count, void *data)
266 {
267 unsigned char cmd[80];
268 int len;
269 ctlr_info_t *h = (ctlr_info_t *) data;
270 int rc;
272 if (count > sizeof(cmd)-1)
273 return -EINVAL;
274 if (copy_from_user(cmd, buffer, count))
275 return -EFAULT;
276 cmd[count] = '\0';
277 len = strlen(cmd);
278 if (cmd[len-1] == '\n')
279 cmd[--len] = '\0';
281 # ifdef CONFIG_CISS_SCSI_TAPE
282 if (strcmp("engage scsi", cmd)==0) {
283 rc = cciss_engage_scsi(h->ctlr);
284 if (rc != 0)
285 return -rc;
286 return count;
287 }
288 /* might be nice to have "disengage" too, but it's not
289 safely possible. (only 1 module use count, lock issues.) */
290 # endif
292 if (START_MONITOR_THREAD(h, cmd, count, cciss_monitor, &rc) == 0)
293 return rc;
295 return -EINVAL;
296 }
298 /*
299 * Get us a file in /proc/cciss that says something about each controller.
300 * Create /proc/cciss if it doesn't exist yet.
301 */
302 static void __init cciss_procinit(int i)
303 {
304 struct proc_dir_entry *pde;
306 if (proc_cciss == NULL) {
307 proc_cciss = proc_mkdir("cciss", proc_root_driver);
308 if (!proc_cciss) {
309 printk("cciss: proc_mkdir failed\n");
310 return;
311 }
312 }
314 pde = create_proc_read_entry(hba[i]->devname,
315 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
316 proc_cciss, cciss_proc_get_info, hba[i]);
317 pde->write_proc = cciss_proc_write;
318 }
319 #endif /* CONFIG_PROC_FS */
321 /*
322 * For operations that cannot sleep, a command block is allocated at init,
323 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
324 * which ones are free or in use. For operations that can wait for kmalloc
325 * to possible sleep, this routine can be called with get_from_pool set to 0.
326 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
327 */
328 static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
329 {
330 CommandList_struct *c;
331 int i;
332 u64bit temp64;
333 dma_addr_t cmd_dma_handle, err_dma_handle;
335 if (!get_from_pool) {
336 c = (CommandList_struct *) pci_alloc_consistent(
337 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
338 if (c==NULL)
339 return NULL;
340 memset(c, 0, sizeof(CommandList_struct));
342 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
343 h->pdev, sizeof(ErrorInfo_struct),
344 &err_dma_handle);
346 if (c->err_info == NULL)
347 {
348 pci_free_consistent(h->pdev,
349 sizeof(CommandList_struct), c, cmd_dma_handle);
350 return NULL;
351 }
352 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
353 } else /* get it out of the controllers pool */
354 {
355 do {
356 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
357 if (i == NR_CMDS)
358 return NULL;
359 } while(test_and_set_bit(i%32, h->cmd_pool_bits+(i/32)) != 0);
360 #ifdef CCISS_DEBUG
361 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
362 #endif
363 c = h->cmd_pool + i;
364 memset(c, 0, sizeof(CommandList_struct));
365 cmd_dma_handle = h->cmd_pool_dhandle
366 + i*sizeof(CommandList_struct);
367 c->err_info = h->errinfo_pool + i;
368 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
369 err_dma_handle = h->errinfo_pool_dhandle
370 + i*sizeof(ErrorInfo_struct);
371 h->nr_allocs++;
372 }
374 c->busaddr = (__u32) cmd_dma_handle;
375 temp64.val = (__u64) err_dma_handle;
376 c->ErrDesc.Addr.lower = temp64.val32.lower;
377 c->ErrDesc.Addr.upper = temp64.val32.upper;
378 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
380 c->ctlr = h->ctlr;
381 return c;
384 }
386 /*
387 * Frees a command block that was previously allocated with cmd_alloc().
388 */
389 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
390 {
391 int i;
392 u64bit temp64;
394 if (!got_from_pool) {
395 temp64.val32.lower = c->ErrDesc.Addr.lower;
396 temp64.val32.upper = c->ErrDesc.Addr.upper;
397 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
398 c->err_info, (dma_addr_t) temp64.val);
399 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
400 c, (dma_addr_t) c->busaddr);
401 } else
402 {
403 i = c - h->cmd_pool;
404 clear_bit(i%32, h->cmd_pool_bits+(i/32));
405 h->nr_frees++;
406 }
407 }
409 /*
410 * fills in the disk information.
411 */
412 static void cciss_geninit( int ctlr)
413 {
414 drive_info_struct *drv;
415 int i,j;
417 /* Loop through each real device */
418 hba[ctlr]->gendisk.nr_real = 0;
419 for(i=0; i< NWD; i++) {
420 drv = &(hba[ctlr]->drv[i]);
421 if (!(drv->nr_blocks))
422 continue;
423 hba[ctlr]->hd[i << NWD_SHIFT].nr_sects =
424 hba[ctlr]->sizes[i << NWD_SHIFT] = drv->nr_blocks;
426 /* for each partition */
427 for(j=0; j<MAX_PART; j++) {
428 hba[ctlr]->blocksizes[(i<<NWD_SHIFT) + j] = 1024;
430 hba[ctlr]->hardsizes[ (i<<NWD_SHIFT) + j] =
431 drv->block_size;
432 }
433 }
434 hba[ctlr]->gendisk.nr_real = hba[ctlr]->highest_lun+1;
435 }
438 void cciss_probe_devices(xen_disk_info_t *xdi)
439 {
440 int i, ctlr;
441 drive_info_struct *drv;
442 xen_disk_t *xd = &xdi->disks[xdi->count];
444 ctlr = 0; /* XXX SMH: only deal with 1 controller for now */
446 /* Bail if there is no controller. */
447 if ( hba[ctlr] == NULL )
448 return;
450 /* Loop through each real device */
451 for(i=0; i < NWD; i++) {
453 drv = &(hba[ctlr]->drv[i]);
455 if (!(drv->nr_blocks))
456 continue;
458 if ( xdi->count == xdi->max )
459 BUG();
461 hba[ctlr]->hd[i << NWD_SHIFT].nr_sects =
462 hba[ctlr]->sizes[i << NWD_SHIFT] = drv->nr_blocks;
464 /* We export 'raw' linux device numbers to domain 0. */
465 xd->device = MKDEV(hba[ctlr]->major, i << 4);
466 xd->info = XD_TYPE_DISK; /* XXX should check properly */
467 xd->capacity = drv->nr_blocks; /* in terms of 512byte sectors */
468 xd->domain = 0;
470 xdi->count++;
471 xd++;
473 }
475 }
477 /*
478 * Open. Make sure the device is really there.
479 */
480 static int cciss_open(struct inode *inode, struct file *filep)
481 {
482 int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
483 int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
485 #ifdef CCISS_DEBUG
486 printk(KERN_DEBUG "cciss_open %x (%x:%x)\n", inode->i_rdev, ctlr, dsk);
487 #endif /* CCISS_DEBUG */
489 if (ctlr > MAX_CTLR || hba[ctlr] == NULL || !CTLR_IS_ALIVE(hba[ctlr]))
490 return -ENXIO;
491 /*
492 * Root is allowed to open raw volume zero even if its not configured
493 * so array config can still work. Root is also allowed to open any
494 * volume that has a LUN ID, so it can issue IOCTL to reread the
495 * disk information. I don't think I really like this.
496 * but I'm already using way to many device nodes to claim another one
497 * for "raw controller".
498 */
499 if (hba[ctlr]->sizes[MINOR(inode->i_rdev)] == 0) { /* not online? */
500 if (MINOR(inode->i_rdev) != 0) { /* not node 0? */
501 /* if not node 0 make sure it is a partition = 0 */
502 if (MINOR(inode->i_rdev) & 0x0f) {
503 return -ENXIO;
504 /* if it is, make sure we have a LUN ID */
505 } else if (hba[ctlr]->drv[MINOR(inode->i_rdev)
506 >> NWD_SHIFT].LunID == 0) {
507 return -ENXIO;
508 }
509 }
510 if (!capable(CAP_SYS_ADMIN))
511 return -EPERM;
512 }
514 hba[ctlr]->drv[dsk].usage_count++;
515 hba[ctlr]->usage_count++;
516 return 0;
517 }
518 /*
519 * Close. Sync first.
520 */
521 static int cciss_release(struct inode *inode, struct file *filep)
522 {
523 int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
524 int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
526 #ifdef CCISS_DEBUG
527 printk(KERN_DEBUG "cciss_release %x (%x:%x)\n", inode->i_rdev, ctlr, dsk);
528 #endif /* CCISS_DEBUG */
530 /* fsync_dev(inode->i_rdev); */
532 hba[ctlr]->drv[dsk].usage_count--;
533 hba[ctlr]->usage_count--;
534 return 0;
535 }
537 /*
538 * ioctl
539 */
540 static int cciss_ioctl(struct inode *inode, struct file *filep,
541 unsigned int cmd, unsigned long arg)
542 {
543 #if 0
544 //int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
546 //int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
547 #endif
549 printk(KERN_ALERT "cciss_ioctl: Called BUT NOT SUPPORTED cmd=%x %lx\n", cmd, arg);
551 return -EBADRQC;
553 #if 0
555 #ifdef CCISS_DEBUG
556 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
557 #endif /* CCISS_DEBUG */
559 switch(cmd) {
560 case HDIO_GETGEO:
561 {
562 struct hd_geometry driver_geo;
563 if (hba[ctlr]->drv[dsk].cylinders) {
564 driver_geo.heads = hba[ctlr]->drv[dsk].heads;
565 driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
566 driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
567 } else {
568 driver_geo.heads = 0xff;
569 driver_geo.sectors = 0x3f;
570 driver_geo.cylinders =
571 hba[ctlr]->drv[dsk].nr_blocks / (0xff*0x3f);
572 }
573 driver_geo.start=
574 hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
575 if (copy_to_user((void *) arg, &driver_geo,
576 sizeof( struct hd_geometry)))
577 return -EFAULT;
578 return 0;
579 }
580 case HDIO_GETGEO_BIG:
581 {
582 struct hd_big_geometry driver_geo;
583 if (hba[ctlr]->drv[dsk].cylinders) {
584 driver_geo.heads = hba[ctlr]->drv[dsk].heads;
585 driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
586 driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
587 } else {
588 driver_geo.heads = 0xff;
589 driver_geo.sectors = 0x3f;
590 driver_geo.cylinders =
591 hba[ctlr]->drv[dsk].nr_blocks / (0xff*0x3f);
592 }
593 driver_geo.start=
594 hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
595 if (copy_to_user((void *) arg, &driver_geo,
596 sizeof( struct hd_big_geometry)))
597 return -EFAULT;
598 return 0;
599 }
600 case BLKRRPART:
601 if (!capable(CAP_SYS_ADMIN))
602 return -EPERM;
603 return revalidate_logvol(inode->i_rdev, 1);
604 case BLKGETSIZE:
605 case BLKGETSIZE64:
606 case BLKFLSBUF:
607 case BLKBSZSET:
608 case BLKBSZGET:
609 case BLKROSET:
610 case BLKROGET:
611 case BLKRASET:
612 case BLKRAGET:
613 case BLKPG:
614 case BLKELVGET:
615 case BLKELVSET:
616 return blk_ioctl(inode->i_rdev, cmd, arg);
617 case CCISS_GETPCIINFO:
618 {
619 cciss_pci_info_struct pciinfo;
621 if (!arg)
622 return -EINVAL;
623 pciinfo.bus = hba[ctlr]->pdev->bus->number;
624 pciinfo.dev_fn = hba[ctlr]->pdev->devfn;
625 pciinfo.board_id = hba[ctlr]->board_id;
626 if (copy_to_user((void *) arg, &pciinfo, sizeof( cciss_pci_info_struct )))
627 return -EFAULT;
628 return 0;
629 }
630 case CCISS_GETINTINFO:
631 {
632 cciss_coalint_struct intinfo;
633 ctlr_info_t *c = hba[ctlr];
635 if (!arg)
636 return -EINVAL;
637 intinfo.delay = readl(&c->cfgtable->HostWrite.CoalIntDelay);
638 intinfo.count = readl(&c->cfgtable->HostWrite.CoalIntCount);
639 if (copy_to_user((void *) arg, &intinfo, sizeof( cciss_coalint_struct )))
640 return -EFAULT;
641 return 0;
642 }
643 case CCISS_SETINTINFO:
644 {
645 cciss_coalint_struct intinfo;
646 ctlr_info_t *c = hba[ctlr];
647 unsigned long flags;
648 int i;
650 if (!arg)
651 return -EINVAL;
652 if (!capable(CAP_SYS_ADMIN))
653 return -EPERM;
654 if (copy_from_user(&intinfo, (void *) arg, sizeof( cciss_coalint_struct)))
655 return -EFAULT;
656 if ( (intinfo.delay == 0 ) && (intinfo.count == 0)) {
657 return -EINVAL;
658 }
660 spin_lock_irqsave(&io_request_lock, flags);
661 /* Can only safely update if no commands outstanding */
662 if (c->commands_outstanding > 0 ) {
663 spin_unlock_irqrestore(&io_request_lock, flags);
664 return -EINVAL;
665 }
666 /* Update the field, and then ring the doorbell */
667 writel( intinfo.delay,
668 &(c->cfgtable->HostWrite.CoalIntDelay));
669 writel( intinfo.count,
670 &(c->cfgtable->HostWrite.CoalIntCount));
671 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
673 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
674 if (!(readl(c->vaddr + SA5_DOORBELL)
675 & CFGTBL_ChangeReq))
676 break;
677 /* delay and try again */
678 udelay(1000);
679 }
680 spin_unlock_irqrestore(&io_request_lock, flags);
681 if (i >= MAX_IOCTL_CONFIG_WAIT)
682 /* there is an unlikely case where this can happen,
683 * involving hot replacing a failed 144 GB drive in a
684 * RAID 5 set just as we attempt this ioctl. */
685 return -EAGAIN;
686 return 0;
687 }
688 case CCISS_GETNODENAME:
689 {
690 NodeName_type NodeName;
691 ctlr_info_t *c = hba[ctlr];
692 int i;
694 if (!arg)
695 return -EINVAL;
696 for(i=0;i<16;i++)
697 NodeName[i] = readb(&c->cfgtable->ServerName[i]);
698 if (copy_to_user((void *) arg, NodeName, sizeof( NodeName_type)))
699 return -EFAULT;
700 return 0;
701 }
702 case CCISS_SETNODENAME:
703 {
704 NodeName_type NodeName;
705 ctlr_info_t *c = hba[ctlr];
706 unsigned long flags;
707 int i;
709 if (!arg)
710 return -EINVAL;
711 if (!capable(CAP_SYS_ADMIN))
712 return -EPERM;
714 if (copy_from_user(NodeName, (void *) arg, sizeof( NodeName_type)))
715 return -EFAULT;
717 spin_lock_irqsave(&io_request_lock, flags);
719 /* Update the field, and then ring the doorbell */
720 for(i=0;i<16;i++)
721 writeb( NodeName[i], &c->cfgtable->ServerName[i]);
723 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
725 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
726 if (!(readl(c->vaddr + SA5_DOORBELL)
727 & CFGTBL_ChangeReq))
728 break;
729 /* delay and try again */
730 udelay(1000);
731 }
732 spin_unlock_irqrestore(&io_request_lock, flags);
733 if (i >= MAX_IOCTL_CONFIG_WAIT)
734 /* there is an unlikely case where this can happen,
735 * involving hot replacing a failed 144 GB drive in a
736 * RAID 5 set just as we attempt this ioctl. */
737 return -EAGAIN;
738 return 0;
739 }
741 case CCISS_GETHEARTBEAT:
742 {
743 Heartbeat_type heartbeat;
744 ctlr_info_t *c = hba[ctlr];
746 if (!arg)
747 return -EINVAL;
748 heartbeat = readl(&c->cfgtable->HeartBeat);
749 if (copy_to_user((void *) arg, &heartbeat, sizeof( Heartbeat_type)))
750 return -EFAULT;
751 return 0;
752 }
753 case CCISS_GETBUSTYPES:
754 {
755 BusTypes_type BusTypes;
756 ctlr_info_t *c = hba[ctlr];
758 if (!arg)
759 return -EINVAL;
760 BusTypes = readl(&c->cfgtable->BusTypes);
761 if (copy_to_user((void *) arg, &BusTypes, sizeof( BusTypes_type) ))
762 return -EFAULT;
763 return 0;
764 }
765 case CCISS_GETFIRMVER:
766 {
767 FirmwareVer_type firmware;
769 if (!arg)
770 return -EINVAL;
771 memcpy(firmware, hba[ctlr]->firm_ver, 4);
773 if (copy_to_user((void *) arg, firmware, sizeof( FirmwareVer_type)))
774 return -EFAULT;
775 return 0;
776 }
777 case CCISS_GETDRIVVER:
778 {
779 DriverVer_type DriverVer = DRIVER_VERSION;
781 if (!arg)
782 return -EINVAL;
784 if (copy_to_user((void *) arg, &DriverVer, sizeof( DriverVer_type) ))
785 return -EFAULT;
786 return 0;
787 }
788 case CCISS_RESCANDISK:
789 {
790 return cciss_rescan_disk(ctlr, dsk);
791 }
792 case CCISS_DEREGDISK:
793 return deregister_disk(ctlr,dsk);
795 case CCISS_REGNEWD:
796 return register_new_disk(ctlr, dsk, 0);
797 case CCISS_REGNEWDISK:
798 {
799 __u64 new_logvol;
801 if (!arg)
802 return -EINVAL;
803 if (copy_from_user(&new_logvol, (void *) arg,
804 sizeof( __u64)))
805 return -EFAULT;
806 return register_new_disk(ctlr, dsk, new_logvol);
807 }
808 case CCISS_GETLUNINFO:
809 {
810 LogvolInfo_struct luninfo;
811 int num_parts = 0;
812 int i, start;
814 luninfo.LunID = hba[ctlr]->drv[dsk].LunID;
815 luninfo.num_opens = hba[ctlr]->drv[dsk].usage_count;
817 /* count partitions 1 to 15 with sizes > 0 */
818 start = (dsk << NWD_SHIFT);
819 for(i=1; i <MAX_PART; i++) {
820 int minor = start+i;
821 if (hba[ctlr]->sizes[minor] != 0)
822 num_parts++;
823 }
824 luninfo.num_parts = num_parts;
825 if (copy_to_user((void *) arg, &luninfo,
826 sizeof( LogvolInfo_struct) ))
827 return -EFAULT;
828 return 0;
829 }
830 #if 0
831 case CCISS_PASSTHRU:
832 {
833 IOCTL_Command_struct iocommand;
834 ctlr_info_t *h = hba[ctlr];
835 CommandList_struct *c;
836 char *buff = NULL;
837 u64bit temp64;
838 unsigned long flags;
839 DECLARE_COMPLETION(wait);
841 if (!arg)
842 return -EINVAL;
844 if (!capable(CAP_SYS_RAWIO))
845 return -EPERM;
847 if (copy_from_user(&iocommand, (void *) arg, sizeof( IOCTL_Command_struct) ))
848 return -EFAULT;
849 if ((iocommand.buf_size < 1) &&
850 (iocommand.Request.Type.Direction
851 != XFER_NONE)) {
852 return -EINVAL;
853 }
854 /* Check kmalloc limits */
855 if (iocommand.buf_size > 128000)
856 return -EINVAL;
857 if (iocommand.buf_size > 0) {
858 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
859 if (buff == NULL)
860 return -ENOMEM;
861 }
862 if (iocommand.Request.Type.Direction == XFER_WRITE) {
863 /* Copy the data into the buffer we created */
864 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
865 {
866 kfree(buff);
867 return -EFAULT;
868 }
869 }
870 if ((c = cmd_alloc(h , 0)) == NULL) {
871 kfree(buff);
872 return -ENOMEM;
873 }
874 /* Fill in the command type */
875 c->cmd_type = CMD_IOCTL_PEND;
876 /* Fill in Command Header */
877 c->Header.ReplyQueue = 0; /* unused in simple mode */
878 if (iocommand.buf_size > 0) { /* buffer to fill */
879 c->Header.SGList = 1;
880 c->Header.SGTotal= 1;
881 } else { /* no buffers to fill */
882 c->Header.SGList = 0;
883 c->Header.SGTotal= 0;
884 }
885 c->Header.LUN = iocommand.LUN_info;
886 c->Header.Tag.lower = c->busaddr; /* use the kernel address */
887 /* the cmd block for tag */
889 /* Fill in Request block */
890 c->Request = iocommand.Request;
892 /* Fill in the scatter gather information */
893 if (iocommand.buf_size > 0 ) {
894 temp64.val = pci_map_single( h->pdev, buff,
895 iocommand.buf_size,
896 PCI_DMA_BIDIRECTIONAL);
897 c->SG[0].Addr.lower = temp64.val32.lower;
898 c->SG[0].Addr.upper = temp64.val32.upper;
899 c->SG[0].Len = iocommand.buf_size;
900 c->SG[0].Ext = 0; /* we are not chaining */
901 }
902 c->waiting = &wait;
904 /* Put the request on the tail of the request queue */
905 spin_lock_irqsave(&io_request_lock, flags);
906 addQ(&h->reqQ, c);
907 h->Qdepth++;
908 start_io(h);
909 spin_unlock_irqrestore(&io_request_lock, flags);
911 wait_for_completion(&wait);
913 /* unlock the buffers from DMA */
914 temp64.val32.lower = c->SG[0].Addr.lower;
915 temp64.val32.upper = c->SG[0].Addr.upper;
916 pci_unmap_single( h->pdev, (dma_addr_t) temp64.val,
917 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
919 /* Copy the error information out */
920 iocommand.error_info = *(c->err_info);
921 if (copy_to_user((void *) arg, &iocommand,
922 sizeof( IOCTL_Command_struct) ) ) {
923 kfree(buff);
924 cmd_free(h, c, 0);
925 return( -EFAULT);
926 }
928 if (iocommand.Request.Type.Direction == XFER_READ) {
929 /* Copy the data out of the buffer we created */
930 if (copy_to_user(iocommand.buf, buff,
931 iocommand.buf_size)) {
932 kfree(buff);
933 cmd_free(h, c, 0);
934 return -EFAULT;
935 }
936 }
937 kfree(buff);
938 cmd_free(h, c, 0);
939 return 0;
940 }
941 case CCISS_BIG_PASSTHRU:
942 {
943 BIG_IOCTL_Command_struct iocommand;
944 ctlr_info_t *h = hba[ctlr];
945 CommandList_struct *c;
946 char *buff[MAXSGENTRIES] = {NULL,};
947 int buff_size[MAXSGENTRIES] = {0,};
948 u64bit temp64;
949 unsigned long flags;
950 BYTE sg_used = 0;
951 int status = 0;
952 int i;
953 DECLARE_COMPLETION(wait);
955 if (!arg)
956 return -EINVAL;
958 if (!capable(CAP_SYS_RAWIO))
959 return -EPERM;
961 if (copy_from_user(&iocommand, (void *) arg, sizeof( BIG_IOCTL_Command_struct) ))
962 return -EFAULT;
963 if ((iocommand.buf_size < 1) &&
964 (iocommand.Request.Type.Direction != XFER_NONE)) {
965 return -EINVAL;
966 }
967 /* Check kmalloc limits using all SGs */
968 if (iocommand.malloc_size > MAX_KMALLOC_SIZE)
969 return -EINVAL;
970 if (iocommand.buf_size > iocommand.malloc_size * MAXSGENTRIES)
971 return -EINVAL;
972 if (iocommand.buf_size > 0) {
973 __u32 size_left_alloc = iocommand.buf_size;
974 BYTE *data_ptr = (BYTE *) iocommand.buf;
975 while (size_left_alloc > 0) {
976 buff_size[sg_used] = (size_left_alloc
977 > iocommand.malloc_size)
978 ? iocommand.malloc_size : size_left_alloc;
979 buff[sg_used] = kmalloc( buff_size[sg_used],
980 GFP_KERNEL);
981 if (buff[sg_used] == NULL) {
982 status = -ENOMEM;
983 goto cleanup1;
984 }
985 if (iocommand.Request.Type.Direction ==
986 XFER_WRITE)
987 /* Copy the data into the buffer created */
988 if (copy_from_user(buff[sg_used], data_ptr,
989 buff_size[sg_used])) {
990 status = -ENOMEM;
991 goto cleanup1;
992 }
993 size_left_alloc -= buff_size[sg_used];
994 data_ptr += buff_size[sg_used];
995 sg_used++;
996 }
998 }
999 if ((c = cmd_alloc(h , 0)) == NULL) {
1000 status = -ENOMEM;
1001 goto cleanup1;
1003 /* Fill in the command type */
1004 c->cmd_type = CMD_IOCTL_PEND;
1005 /* Fill in Command Header */
1006 c->Header.ReplyQueue = 0; /* unused in simple mode */
1008 if (iocommand.buf_size > 0) { /* buffer to fill */
1009 c->Header.SGList = sg_used;
1010 c->Header.SGTotal= sg_used;
1011 } else { /* no buffers to fill */
1012 c->Header.SGList = 0;
1013 c->Header.SGTotal= 0;
1015 c->Header.LUN = iocommand.LUN_info;
1016 c->Header.Tag.lower = c->busaddr; /* use the kernel address */
1017 /* the cmd block for tag */
1019 /* Fill in Request block */
1020 c->Request = iocommand.Request;
1021 /* Fill in the scatter gather information */
1022 if (iocommand.buf_size > 0 ) {
1023 int i;
1024 for(i=0; i< sg_used; i++) {
1025 temp64.val = pci_map_single( h->pdev, buff[i],
1026 buff_size[i],
1027 PCI_DMA_BIDIRECTIONAL);
1029 c->SG[i].Addr.lower = temp64.val32.lower;
1030 c->SG[i].Addr.upper = temp64.val32.upper;
1031 c->SG[i].Len = buff_size[i];
1032 c->SG[i].Ext = 0; /* we are not chaining */
1035 c->waiting = &wait;
1036 /* Put the request on the tail of the request queue */
1037 spin_lock_irqsave(&io_request_lock, flags);
1038 addQ(&h->reqQ, c);
1039 h->Qdepth++;
1040 start_io(h);
1041 spin_unlock_irqrestore(&io_request_lock, flags);
1042 wait_for_completion(&wait);
1043 /* unlock the buffers from DMA */
1044 for(i=0; i< sg_used; i++) {
1045 temp64.val32.lower = c->SG[i].Addr.lower;
1046 temp64.val32.upper = c->SG[i].Addr.upper;
1047 pci_unmap_single( h->pdev, (dma_addr_t) temp64.val,
1048 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1050 /* Copy the error information out */
1051 iocommand.error_info = *(c->err_info);
1052 if (copy_to_user((void *) arg, &iocommand,
1053 sizeof( IOCTL_Command_struct) ) ) {
1054 cmd_free(h, c, 0);
1055 status = -EFAULT;
1056 goto cleanup1;
1058 if (iocommand.Request.Type.Direction == XFER_READ) {
1059 /* Copy the data out of the buffer we created */
1060 BYTE *ptr = (BYTE *) iocommand.buf;
1061 for(i=0; i< sg_used; i++) {
1062 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1063 cmd_free(h, c, 0);
1064 status = -EFAULT;
1065 goto cleanup1;
1068 ptr += buff_size[i];
1071 cmd_free(h, c, 0);
1072 status = 0;
1075 cleanup1:
1076 for(i=0; i< sg_used; i++) {
1077 if (buff[i] != NULL)
1078 kfree(buff[i]);
1080 return status;
1082 #endif //PASSTHROUGH
1084 default:
1085 return -EBADRQC;
1088 #endif
1092 /* Borrowed and adapted from sd.c */
1093 static int revalidate_logvol(kdev_t dev, int maxusage)
1095 int ctlr, target;
1096 struct gendisk *gdev;
1097 unsigned long flags;
1098 int max_p;
1099 int start;
1100 int i;
1102 target = MINOR(dev) >> NWD_SHIFT;
1103 ctlr = map_major_to_ctlr[MAJOR(dev)];
1104 gdev = &(hba[ctlr]->gendisk);
1106 spin_lock_irqsave(&io_request_lock, flags);
1107 if (hba[ctlr]->drv[target].usage_count > maxusage) {
1108 spin_unlock_irqrestore(&io_request_lock, flags);
1109 printk(KERN_WARNING "cciss: Device busy for "
1110 "revalidation (usage=%d)\n",
1111 hba[ctlr]->drv[target].usage_count);
1112 return -EBUSY;
1114 hba[ctlr]->drv[target].usage_count++;
1115 spin_unlock_irqrestore(&io_request_lock, flags);
1117 max_p = gdev->max_p;
1118 start = target << gdev->minor_shift;
1120 for(i=max_p-1; i>=0; i--) {
1121 int minor = start+i;
1122 invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
1123 gdev->part[minor].start_sect = 0;
1124 gdev->part[minor].nr_sects = 0;
1126 /* reset the blocksize so we can read the partition table */
1127 blksize_size[hba[ctlr]->major][minor] = 1024;
1129 /* setup partitions per disk */
1130 grok_partitions(gdev, target, MAX_PART,
1131 hba[ctlr]->drv[target].nr_blocks);
1132 hba[ctlr]->drv[target].usage_count--;
1133 return 0;
1136 static int frevalidate_logvol(kdev_t dev)
1138 #ifdef CCISS_DEBUG
1139 printk(KERN_DEBUG "cciss: frevalidate has been called\n");
1140 #endif /* CCISS_DEBUG */
1141 return revalidate_logvol(dev, 0);
1143 #if 0
1144 static int deregister_disk(int ctlr, int logvol)
1146 unsigned long flags;
1147 struct gendisk *gdev = &(hba[ctlr]->gendisk);
1148 ctlr_info_t *h = hba[ctlr];
1149 int start, max_p, i;
1151 if (!capable(CAP_SYS_RAWIO))
1152 return -EPERM;
1154 spin_lock_irqsave(&io_request_lock, flags);
1155 /* make sure logical volume is NOT is use */
1156 if (h->drv[logvol].usage_count > 1 || h->busy_configuring) {
1157 spin_unlock_irqrestore(&io_request_lock, flags);
1158 return -EBUSY;
1160 h->busy_configuring = 1;
1161 spin_unlock_irqrestore(&io_request_lock, flags);
1163 /* invalidate the devices and deregister the disk */
1164 max_p = gdev->max_p;
1165 start = logvol << gdev->minor_shift;
1166 for (i=max_p-1; i>=0; i--) {
1167 int minor = start+i;
1168 /* printk("invalidating( %d %d)\n", ctlr, minor); */
1169 invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
1170 /* so open will now fail */
1171 h->sizes[minor] = 0;
1172 /* so it will no longer appear in /proc/partitions */
1173 gdev->part[minor].start_sect = 0;
1174 gdev->part[minor].nr_sects = 0;
1176 /* check to see if it was the last disk */
1177 if (logvol == h->highest_lun) {
1178 /* if so, find the new hightest lun */
1179 int i, newhighest =-1;
1180 for(i=0; i<h->highest_lun; i++) {
1181 /* if the disk has size > 0, it is available */
1182 if (h->sizes[i << gdev->minor_shift] != 0)
1183 newhighest = i;
1185 h->highest_lun = newhighest;
1188 --h->num_luns;
1189 gdev->nr_real = h->highest_lun+1;
1190 /* zero out the disk size info */
1191 h->drv[logvol].nr_blocks = 0;
1192 h->drv[logvol].block_size = 0;
1193 h->drv[logvol].cylinders = 0;
1194 h->drv[logvol].LunID = 0;
1195 h->busy_configuring = 0;
1196 return 0;
1198 static int sendcmd_withirq(__u8 cmd,
1199 int ctlr,
1200 void *buff,
1201 size_t size,
1202 unsigned int use_unit_num,
1203 unsigned int log_unit,
1204 __u8 page_code,
1205 __u8 cmdtype)
1207 ctlr_info_t *h = hba[ctlr];
1208 CommandList_struct *c;
1209 u64bit buff_dma_handle;
1210 unsigned long flags;
1211 int return_status = IO_OK;
1212 #if 0
1213 DECLARE_COMPLETION(wait);
1214 #else
1215 /* XXX SMH: no waiting for us ... spin instead */
1216 int wait = 1;
1217 int usecs = 0;
1218 #endif
1220 if ((c = cmd_alloc(h , 0)) == NULL)
1221 return -ENOMEM;
1222 c->cmd_type = CMD_IOCTL_PEND;
1223 /* Fill in Command Header */
1224 c->Header.ReplyQueue = 0; /* unused in simple mode */
1225 if (buff != NULL) { /* buffer to fill */
1226 c->Header.SGList = 1;
1227 c->Header.SGTotal= 1;
1228 } else {
1229 /* no buffers to fill */
1230 c->Header.SGList = 0;
1231 c->Header.SGTotal= 0;
1233 c->Header.Tag.lower = c->busaddr; /* tag is phys addr of cmd */
1234 /* Fill in Request block */
1235 c->Request.CDB[0] = cmd;
1236 c->Request.Type.Type = cmdtype;
1237 if (cmdtype == TYPE_CMD) {
1238 switch (cmd) {
1239 case CISS_INQUIRY:
1240 /* If the logical unit number is 0 then, this is going
1241 to controller so It's a physical command
1242 mode = 0 target = 0.
1243 So we have nothing to write.
1244 Otherwise
1245 mode = 1 target = LUNID
1246 */
1247 if (use_unit_num != 0) {
1248 c->Header.LUN.LogDev.VolId =
1249 hba[ctlr]->drv[log_unit].LunID;
1250 c->Header.LUN.LogDev.Mode = 1;
1252 if (page_code != 0) {
1253 c->Request.CDB[1] = 0x01;
1254 c->Request.CDB[2] = page_code;
1256 c->Request.CDBLen = 6;
1257 c->Request.Type.Attribute = ATTR_SIMPLE;
1258 c->Request.Type.Direction = XFER_READ; /* Read */
1259 c->Request.Timeout = 0; /* Don't time out */
1260 c->Request.CDB[4] = size & 0xFF;
1261 break;
1262 case CISS_REPORT_LOG:
1263 /* Talking to controller so It's a physical command
1264 mode = 00 target = 0.
1265 So we have nothing to write.
1266 */
1267 c->Request.CDBLen = 12;
1268 c->Request.Type.Attribute = ATTR_SIMPLE;
1269 c->Request.Type.Direction = XFER_READ; /* Read */
1270 c->Request.Timeout = 0; /* Don't time out */
1271 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
1272 c->Request.CDB[7] = (size >> 16) & 0xFF;
1273 c->Request.CDB[8] = (size >> 8) & 0xFF;
1274 c->Request.CDB[9] = size & 0xFF;
1275 break;
1276 case CCISS_READ_CAPACITY:
1277 c->Header.LUN.LogDev.VolId=
1278 hba[ctlr]->drv[log_unit].LunID;
1279 c->Header.LUN.LogDev.Mode = 1;
1280 c->Request.CDBLen = 10;
1281 c->Request.Type.Attribute = ATTR_SIMPLE;
1282 c->Request.Type.Direction = XFER_READ; /* Read */
1283 c->Request.Timeout = 0; /* Don't time out */
1284 break;
1285 default:
1286 printk(KERN_WARNING
1287 "cciss: Unknown Command 0x%x sent attempted\n", cmd);
1288 cmd_free(h, c, 1);
1289 return IO_ERROR;
1291 } else if (cmdtype == TYPE_MSG) {
1292 switch (cmd) {
1293 case 3: /* No-Op message */
1294 c->Request.CDBLen = 1;
1295 c->Request.Type.Attribute = ATTR_SIMPLE;
1296 c->Request.Type.Direction = XFER_WRITE;
1297 c->Request.Timeout = 0;
1298 c->Request.CDB[0] = cmd;
1299 break;
1300 default:
1301 printk(KERN_WARNING
1302 "cciss%d: unknown message type %d\n",
1303 ctlr, cmd);
1304 cmd_free(h, c, 1);
1305 return IO_ERROR;
1307 } else {
1308 printk(KERN_WARNING
1309 "cciss%d: unknown command type %d\n", ctlr, cmdtype);
1310 cmd_free(h, c, 1);
1311 return IO_ERROR;
1314 /* Fill in the scatter gather information */
1315 if (size > 0) {
1316 buff_dma_handle.val = (__u64) pci_map_single( h->pdev,
1317 buff, size, PCI_DMA_BIDIRECTIONAL);
1318 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1319 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1320 c->SG[0].Len = size;
1321 c->SG[0].Ext = 0; /* we are not chaining */
1324 resend_cmd2:
1326 #if 0
1327 c->waiting = &wait;
1328 #else
1329 /* XXX SMH: we spin instead of waiting... */
1330 c->waiting = (void *)&wait;
1331 #endif
1333 /* Put the request on the tail of the queue and send it */
1334 spin_lock_irqsave(&io_request_lock, flags);
1335 addQ(&h->reqQ, c);
1336 h->Qdepth++;
1337 start_io(h);
1338 spin_unlock_irqrestore(&io_request_lock, flags);
1340 #if 0
1341 wait_for_completion(&wait);
1342 #else
1343 /* XXX SMH: spin instead of waiting on wait queue */
1344 while(wait) {
1345 do_softirq();
1346 udelay(500);
1347 usecs += 500;
1348 if(usecs > 1000000) {
1349 printk("cciss: still waiting...!\n");
1350 usecs = 0;
1353 #endif
1356 if (c->err_info->CommandStatus != 0) {
1357 /* an error has occurred */
1358 switch (c->err_info->CommandStatus) {
1359 case CMD_TARGET_STATUS:
1360 printk(KERN_WARNING "cciss: cmd %p has "
1361 " completed with errors\n", c);
1362 if (c->err_info->ScsiStatus) {
1363 printk(KERN_WARNING "cciss: cmd %p "
1364 "has SCSI Status = %x\n", c,
1365 c->err_info->ScsiStatus);
1367 break;
1368 case CMD_DATA_UNDERRUN:
1369 case CMD_DATA_OVERRUN:
1370 /* expected for inquire and report lun commands */
1371 break;
1372 case CMD_INVALID:
1373 printk(KERN_WARNING "cciss: cmd %p is "
1374 "reported invalid\n", c);
1375 return_status = IO_ERROR;
1376 break;
1377 case CMD_PROTOCOL_ERR:
1378 printk(KERN_WARNING "cciss: cmd %p has "
1379 "protocol error \n", c);
1380 return_status = IO_ERROR;
1381 break;
1382 case CMD_HARDWARE_ERR:
1383 printk(KERN_WARNING "cciss: cmd %p had "
1384 " hardware error\n", c);
1385 return_status = IO_ERROR;
1386 break;
1387 case CMD_CONNECTION_LOST:
1388 printk(KERN_WARNING "cciss: cmd %p had "
1389 "connection lost\n", c);
1390 return_status = IO_ERROR;
1391 break;
1392 case CMD_ABORTED:
1393 printk(KERN_WARNING "cciss: cmd %p was "
1394 "aborted\n", c);
1395 return_status = IO_ERROR;
1396 break;
1397 case CMD_ABORT_FAILED:
1398 printk(KERN_WARNING "cciss: cmd %p reports "
1399 "abort failed\n", c);
1400 return_status = IO_ERROR;
1401 break;
1402 case CMD_UNSOLICITED_ABORT:
1403 printk(KERN_WARNING "cciss: cmd %p aborted "
1404 "do to an unsolicited abort\n", c);
1405 if (c->retry_count < MAX_CMD_RETRIES)
1407 printk(KERN_WARNING "retrying cmd\n");
1408 c->retry_count++;
1409 /* erase the old error */
1410 /* information */
1411 memset(c->err_info, 0,
1412 sizeof(ErrorInfo_struct));
1413 return_status = IO_OK;
1414 #if 0
1415 INIT_COMPLETION(wait);
1416 #else
1417 /* XXX SMH: spin instead of waiting. */
1418 wait = 0;
1419 #endif
1420 goto resend_cmd2;
1423 return_status = IO_ERROR;
1424 break;
1425 default:
1426 printk(KERN_WARNING "cciss: cmd %p returned "
1427 "unknown status %x\n", c,
1428 c->err_info->CommandStatus);
1429 return_status = IO_ERROR;
1433 /* unlock the buffers from DMA */
1434 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1435 size, PCI_DMA_BIDIRECTIONAL);
1436 cmd_free(h, c, 0);
1437 return return_status;
1439 static int register_new_disk(int ctlr, int opened_vol, __u64 requested_lun)
1441 struct gendisk *gdev = &(hba[ctlr]->gendisk);
1442 ctlr_info_t *h = hba[ctlr];
1443 int start, max_p, i;
1444 int num_luns;
1445 int logvol;
1446 int new_lun_found = 0;
1447 int new_lun_index = 0;
1448 int free_index_found = 0;
1449 int free_index = 0;
1450 ReportLunData_struct *ld_buff;
1451 ReadCapdata_struct *size_buff;
1452 InquiryData_struct *inq_buff;
1453 int return_code;
1454 int listlength = 0;
1455 __u32 lunid = 0;
1456 unsigned int block_size;
1457 unsigned int total_size;
1458 unsigned long flags;
1459 int req_lunid = (int) (requested_lun & (__u64) 0xffffffff);
1461 if (!capable(CAP_SYS_RAWIO))
1462 return -EPERM;
1463 /* if we have no space in our disk array left to add anything */
1464 spin_lock_irqsave(&io_request_lock, flags);
1465 if (h->num_luns >= CISS_MAX_LUN) {
1466 spin_unlock_irqrestore(&io_request_lock, flags);
1467 return -EINVAL;
1469 if (h->busy_configuring) {
1470 spin_unlock_irqrestore(&io_request_lock, flags);
1471 return -EBUSY;
1473 h->busy_configuring = 1;
1474 spin_unlock_irqrestore(&io_request_lock, flags);
1476 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1477 if (ld_buff == NULL) {
1478 printk(KERN_ERR "cciss: out of memory\n");
1479 h->busy_configuring = 0;
1480 return -ENOMEM;
1482 memset(ld_buff, 0, sizeof(ReportLunData_struct));
1483 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1484 if (size_buff == NULL) {
1485 printk(KERN_ERR "cciss: out of memory\n");
1486 kfree(ld_buff);
1487 h->busy_configuring = 0;
1488 return -ENOMEM;
1490 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1491 if (inq_buff == NULL) {
1492 printk(KERN_ERR "cciss: out of memory\n");
1493 kfree(ld_buff);
1494 kfree(size_buff);
1495 h->busy_configuring = 0;
1496 return -ENOMEM;
1499 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1500 sizeof(ReportLunData_struct), 0, 0, 0, TYPE_CMD);
1502 if (return_code == IO_OK) {
1503 listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0]));
1504 } else {
1505 /* reading number of logical volumes failed */
1506 printk(KERN_WARNING "cciss: report logical volume"
1507 " command failed\n");
1508 listlength = 0;
1509 h->busy_configuring = 0;
1510 return -1;
1512 num_luns = listlength / 8; /* 8 bytes pre entry */
1513 if (num_luns > CISS_MAX_LUN)
1514 num_luns = CISS_MAX_LUN;
1516 #ifdef CCISS_DEBUG
1517 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
1518 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
1519 ld_buff->LUNListLength[3], num_luns);
1520 #endif
1521 for(i=0; i< num_luns; i++) {
1522 int j;
1523 int lunID_found = 0;
1525 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
1526 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
1527 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
1528 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1530 /* check to see if this is a new lun */
1531 for(j=0; j <= h->highest_lun; j++) {
1532 #ifdef CCISS_DEBUG
1533 printk("Checking %d %x against %x\n", j,h->drv[j].LunID,
1534 lunid);
1535 #endif /* CCISS_DEBUG */
1536 if (h->drv[j].LunID == lunid) {
1537 lunID_found = 1;
1538 break;
1542 if (lunID_found == 1)
1543 continue;
1544 else { /* new lun found */
1546 #ifdef CCISS_DEBUG
1547 printk("new lun found at %d\n", i);
1548 #endif /* CCISS_DEBUG */
1549 if (req_lunid) /* we are looking for a specific lun */
1551 if (lunid != req_lunid)
1553 #ifdef CCISS_DEBUG
1554 printk("new lun %x is not %x\n",
1555 lunid, req_lunid);
1556 #endif /* CCISS_DEBUG */
1557 continue;
1560 new_lun_index = i;
1561 new_lun_found = 1;
1562 break;
1565 if (!new_lun_found) {
1566 printk(KERN_DEBUG "cciss: New Logical Volume not found\n");
1567 h->busy_configuring = 0;
1568 return -1;
1570 /* Now find the free index */
1571 for(i=0; i <CISS_MAX_LUN; i++) {
1572 #ifdef CCISS_DEBUG
1573 printk("Checking Index %d\n", i);
1574 #endif /* CCISS_DEBUG */
1575 if (hba[ctlr]->drv[i].LunID == 0) {
1576 #ifdef CCISS_DEBUG
1577 printk("free index found at %d\n", i);
1578 #endif /* CCISS_DEBUG */
1579 free_index_found = 1;
1580 free_index = i;
1581 break;
1584 if (!free_index_found) {
1585 printk(KERN_WARNING "cciss: unable to find free slot for disk\n");
1586 h->busy_configuring = 0;
1587 return -1;
1590 logvol = free_index;
1591 hba[ctlr]->drv[logvol].LunID = lunid;
1592 /* there could be gaps in lun numbers, track hightest */
1593 if (hba[ctlr]->highest_lun < logvol)
1594 hba[ctlr]->highest_lun = logvol;
1596 memset(size_buff, 0, sizeof(ReadCapdata_struct));
1597 return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr,
1598 size_buff, sizeof(ReadCapdata_struct), 1,
1599 logvol, 0, TYPE_CMD);
1600 if (return_code == IO_OK) {
1601 total_size = (0xff &
1602 (unsigned int) size_buff->total_size[0]) << 24;
1603 total_size |= (0xff &
1604 (unsigned int) size_buff->total_size[1]) << 16;
1605 total_size |= (0xff &
1606 (unsigned int) size_buff->total_size[2]) << 8;
1607 total_size |= (0xff &
1608 (unsigned int) size_buff->total_size[3]);
1609 total_size++; /* command returns highest block address */
1611 block_size = (0xff &
1612 (unsigned int) size_buff->block_size[0]) << 24;
1613 block_size |= (0xff &
1614 (unsigned int) size_buff->block_size[1]) << 16;
1615 block_size |= (0xff &
1616 (unsigned int) size_buff->block_size[2]) << 8;
1617 block_size |= (0xff &
1618 (unsigned int) size_buff->block_size[3]);
1619 } else {
1620 /* read capacity command failed */
1621 printk(KERN_WARNING "cciss: read capacity failed\n");
1622 total_size = 0;
1623 block_size = BLOCK_SIZE;
1625 printk(KERN_INFO " blocks= %d block_size= %d\n",
1626 total_size, block_size);
1627 /* Execute the command to read the disk geometry */
1628 memset(inq_buff, 0, sizeof(InquiryData_struct));
1629 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
1630 sizeof(InquiryData_struct), 1, logvol ,0xC1, TYPE_CMD);
1631 if (return_code == IO_OK) {
1632 if (inq_buff->data_byte[8] == 0xFF) {
1633 printk(KERN_WARNING
1634 "cciss: reading geometry failed, "
1635 "volume does not support reading geometry\n");
1637 hba[ctlr]->drv[logvol].block_size = block_size;
1638 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1639 hba[ctlr]->drv[logvol].heads = 255;
1640 hba[ctlr]->drv[logvol].sectors = 32; /* secs/trk */
1641 hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
1642 hba[ctlr]->drv[logvol].raid_level = RAID_UNKNOWN;
1643 } else {
1644 hba[ctlr]->drv[logvol].block_size = block_size;
1645 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1646 hba[ctlr]->drv[logvol].heads = inq_buff->data_byte[6];
1647 hba[ctlr]->drv[logvol].sectors = inq_buff->data_byte[7];
1648 hba[ctlr]->drv[logvol].cylinders =
1649 (inq_buff->data_byte[4] & 0xff) << 8;
1650 hba[ctlr]->drv[logvol].cylinders +=
1651 inq_buff->data_byte[5];
1652 hba[ctlr]->drv[logvol].raid_level =
1653 inq_buff->data_byte[8];
1655 } else {
1656 /* Get geometry failed */
1657 printk(KERN_WARNING "cciss: reading geometry failed, "
1658 "continuing with default geometry\n");
1660 hba[ctlr]->drv[logvol].block_size = block_size;
1661 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1662 hba[ctlr]->drv[logvol].heads = 255;
1663 hba[ctlr]->drv[logvol].sectors = 32; /* Sectors per track */
1664 hba[ctlr]->drv[logvol].cylinders = total_size / 255 / 32;
1666 if (hba[ctlr]->drv[logvol].raid_level > 5)
1667 hba[ctlr]->drv[logvol].raid_level = RAID_UNKNOWN;
1669 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d RAID %s\n\n",
1670 hba[ctlr]->drv[logvol].heads,
1671 hba[ctlr]->drv[logvol].sectors,
1672 hba[ctlr]->drv[logvol].cylinders,
1673 raid_label[hba[ctlr]->drv[logvol].raid_level]);
1676 /* special case for c?d0, which may be opened even when
1677 it does not "exist". In that case, don't mess with usage count.
1678 Also, /dev/c1d1 could be used to re-add c0d0 so we can't just
1679 check whether logvol == 0, must check logvol != opened_vol */
1680 if (logvol != opened_vol)
1681 hba[ctlr]->drv[logvol].usage_count = 0;
1683 max_p = gdev->max_p;
1684 start = logvol<< gdev->minor_shift;
1685 hba[ctlr]->hd[start].nr_sects = total_size;
1686 hba[ctlr]->sizes[start] = total_size;
1688 for(i=max_p-1; i>=0; i--) {
1689 int minor = start+i;
1691 invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
1692 gdev->part[minor].start_sect = 0;
1693 gdev->part[minor].nr_sects = 0;
1695 /* reset the blocksize so we can read the partition table */
1696 blksize_size[hba[ctlr]->major][minor] = block_size;
1697 hba[ctlr]->hardsizes[minor] = block_size;
1702 ++hba[ctlr]->num_luns;
1703 gdev->nr_real = hba[ctlr]->highest_lun + 1;
1706 /* setup partitions per disk */
1707 grok_partitions(gdev, logvol, MAX_PART,
1708 hba[ctlr]->drv[logvol].nr_blocks);
1709 kfree(ld_buff);
1710 kfree(size_buff);
1711 kfree(inq_buff);
1712 h->busy_configuring = 0;
1713 return logvol;
1716 static int cciss_rescan_disk(int ctlr, int logvol)
1718 struct gendisk *gdev = &(hba[ctlr]->gendisk);
1719 int start, max_p, i;
1720 ReadCapdata_struct *size_buff;
1721 InquiryData_struct *inq_buff;
1722 int return_code;
1723 unsigned int block_size;
1724 unsigned int total_size;
1726 if (!capable(CAP_SYS_RAWIO))
1727 return -EPERM;
1728 if (hba[ctlr]->sizes[logvol << NWD_SHIFT] != 0) {
1729 /* disk is possible on line, return just a warning */
1730 return 1;
1732 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1733 if (size_buff == NULL) {
1734 printk(KERN_ERR "cciss: out of memory\n");
1735 return -1;
1737 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1738 if (inq_buff == NULL) {
1739 printk(KERN_ERR "cciss: out of memory\n");
1740 kfree(size_buff);
1741 return -1;
1743 memset(size_buff, 0, sizeof(ReadCapdata_struct));
1744 return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, size_buff,
1745 sizeof( ReadCapdata_struct), 1, logvol, 0,
1746 TYPE_CMD);
1747 if (return_code == IO_OK) {
1748 total_size = (0xff &
1749 (unsigned int)(size_buff->total_size[0])) << 24;
1750 total_size |= (0xff &
1751 (unsigned int)(size_buff->total_size[1])) << 16;
1752 total_size |= (0xff &
1753 (unsigned int)(size_buff->total_size[2])) << 8;
1754 total_size |= (0xff & (unsigned int)
1755 (size_buff->total_size[3]));
1756 total_size++; /* command returns highest block address */
1758 block_size = (0xff &
1759 (unsigned int)(size_buff->block_size[0])) << 24;
1760 block_size |= (0xff &
1761 (unsigned int)(size_buff->block_size[1])) << 16;
1762 block_size |= (0xff &
1763 (unsigned int)(size_buff->block_size[2])) << 8;
1764 block_size |= (0xff &
1765 (unsigned int)(size_buff->block_size[3]));
1766 } else { /* read capacity command failed */
1767 printk(KERN_WARNING "cciss: read capacity failed\n");
1768 total_size = block_size = 0;
1770 printk(KERN_INFO " blocks= %d block_size= %d\n",
1771 total_size, block_size);
1772 /* Execute the command to read the disk geometry */
1773 memset(inq_buff, 0, sizeof(InquiryData_struct));
1774 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
1775 sizeof(InquiryData_struct), 1, logvol ,0xC1, TYPE_CMD);
1776 if (return_code == IO_OK) {
1777 if (inq_buff->data_byte[8] == 0xFF) {
1778 printk(KERN_WARNING "cciss: reading geometry failed, "
1779 "volume does not support reading geometry\n");
1781 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1782 hba[ctlr]->drv[logvol].heads = 255;
1783 hba[ctlr]->drv[logvol].sectors = 32; /* Sectors/track */
1784 hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
1785 } else {
1786 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1787 hba[ctlr]->drv[logvol].heads = inq_buff->data_byte[6];
1788 hba[ctlr]->drv[logvol].sectors = inq_buff->data_byte[7];
1789 hba[ctlr]->drv[logvol].cylinders =
1790 (inq_buff->data_byte[4] & 0xff) << 8;
1791 hba[ctlr]->drv[logvol].cylinders +=
1792 inq_buff->data_byte[5];
1794 } else { /* Get geometry failed */
1795 printk(KERN_WARNING "cciss: reading geometry failed, "
1796 "continuing with default geometry\n");
1798 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1799 hba[ctlr]->drv[logvol].heads = 255;
1800 hba[ctlr]->drv[logvol].sectors = 32; /* Sectors / track */
1801 hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
1804 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d \n\n",
1805 hba[ctlr]->drv[logvol].heads,
1806 hba[ctlr]->drv[logvol].sectors,
1807 hba[ctlr]->drv[logvol].cylinders);
1808 max_p = gdev->max_p;
1809 start = logvol<< gdev->minor_shift;
1810 hba[ctlr]->hd[start].nr_sects = hba[ctlr]->sizes[start]= total_size;
1812 for (i=max_p-1; i>=0; i--) {
1813 int minor = start+i;
1814 invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
1815 gdev->part[minor].start_sect = 0;
1816 gdev->part[minor].nr_sects = 0;
1818 /* reset the blocksize so we can read the partition table */
1819 blksize_size[hba[ctlr]->major][minor] = block_size;
1820 hba[ctlr]->hardsizes[minor] = block_size;
1823 /* setup partitions per disk */
1824 grok_partitions(gdev, logvol, MAX_PART,
1825 hba[ctlr]->drv[logvol].nr_blocks );
1827 kfree(size_buff);
1828 kfree(inq_buff);
1829 return 0;
1831 #endif
1833 /*
1834 * Wait polling for a command to complete.
1835 * The memory mapped FIFO is polled for the completion.
1836 * Used only at init time, interrupts disabled.
1837 */
1838 static unsigned long pollcomplete(int ctlr)
1840 unsigned long done;
1841 int i;
1843 /* Wait (up to 20 seconds) for a command to complete */
1845 for (i = 20 * HZ; i > 0; i--) {
1846 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1847 if (done == FIFO_EMPTY) {
1848 set_current_state(TASK_UNINTERRUPTIBLE);
1849 schedule_timeout(1);
1850 } else
1851 return done;
1853 /* Invalid address to tell caller we ran out of time */
1854 return 1;
1856 /*
1857 * Send a command to the controller, and wait for it to complete.
1858 * Only used at init time.
1859 */
1860 static int sendcmd(
1861 __u8 cmd,
1862 int ctlr,
1863 void *buff,
1864 size_t size,
1865 unsigned int use_unit_num, /* 0: address the controller,
1866 1: address logical volume log_unit,
1867 2: periph device address is scsi3addr */
1868 unsigned int log_unit,
1869 __u8 page_code,
1870 unsigned char *scsi3addr)
1872 CommandList_struct *c;
1873 int i;
1874 unsigned long complete;
1875 ctlr_info_t *info_p= hba[ctlr];
1876 u64bit buff_dma_handle;
1877 int status = IO_OK;
1879 c = cmd_alloc(info_p, 1);
1880 if (c == NULL) {
1881 printk(KERN_WARNING "cciss: unable to get memory");
1882 return IO_ERROR;
1884 /* Fill in Command Header */
1885 c->Header.ReplyQueue = 0; /* unused in simple mode */
1886 if (buff != NULL) { /* buffer to fill */
1887 c->Header.SGList = 1;
1888 c->Header.SGTotal= 1;
1889 } else { /* no buffers to fill */
1890 c->Header.SGList = 0;
1891 c->Header.SGTotal= 0;
1893 c->Header.Tag.lower = c->busaddr; /* use the kernel address */
1894 /* the cmd block for tag */
1895 /* Fill in Request block */
1896 switch (cmd) {
1897 case CISS_INQUIRY:
1898 /* If the logical unit number is 0 then, this is going
1899 to controller so It's a physical command
1900 mode = 0 target = 0.
1901 So we have nothing to write.
1902 otherwise, if use_unit_num == 1,
1903 mode = 1(volume set addressing) target = LUNID
1904 otherwise, if use_unit_num == 2,
1905 mode = 0(periph dev addr) target = scsi3addr
1906 */
1907 if (use_unit_num == 1) {
1908 c->Header.LUN.LogDev.VolId=
1909 hba[ctlr]->drv[log_unit].LunID;
1910 c->Header.LUN.LogDev.Mode = 1;
1912 else if (use_unit_num == 2) {
1913 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1914 c->Header.LUN.LogDev.Mode = 0;
1915 /* phys dev addr */
1918 /* are we trying to read a vital product page */
1919 if (page_code != 0) {
1920 c->Request.CDB[1] = 0x01;
1921 c->Request.CDB[2] = page_code;
1923 c->Request.CDBLen = 6;
1924 c->Request.Type.Type = TYPE_CMD; /* It is a command. */
1925 c->Request.Type.Attribute = ATTR_SIMPLE;
1926 c->Request.Type.Direction = XFER_READ; /* Read */
1927 c->Request.Timeout = 0; /* Don't time out */
1928 c->Request.CDB[0] = CISS_INQUIRY;
1929 c->Request.CDB[4] = size & 0xFF;
1930 break;
1931 case CISS_REPORT_LOG:
1932 case CISS_REPORT_PHYS:
1933 /* Talking to controller so It's a physical command
1934 mode = 00 target = 0.
1935 So we have nothing to write.
1936 */
1937 c->Request.CDBLen = 12;
1938 c->Request.Type.Type = TYPE_CMD; /* It is a command. */
1939 c->Request.Type.Attribute = ATTR_SIMPLE;
1940 c->Request.Type.Direction = XFER_READ; /* Read */
1941 c->Request.Timeout = 0; /* Don't time out */
1942 c->Request.CDB[0] = cmd;
1943 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
1944 c->Request.CDB[7] = (size >> 16) & 0xFF;
1945 c->Request.CDB[8] = (size >> 8) & 0xFF;
1946 c->Request.CDB[9] = size & 0xFF;
1947 break;
1949 case CCISS_READ_CAPACITY:
1950 c->Header.LUN.LogDev.VolId=
1951 hba[ctlr]->drv[log_unit].LunID;
1952 c->Header.LUN.LogDev.Mode = 1;
1953 c->Request.CDBLen = 10;
1954 c->Request.Type.Type = TYPE_CMD; /* It is a command. */
1955 c->Request.Type.Attribute = ATTR_SIMPLE;
1956 c->Request.Type.Direction = XFER_READ; /* Read */
1957 c->Request.Timeout = 0; /* Don't time out */
1958 c->Request.CDB[0] = CCISS_READ_CAPACITY;
1959 break;
1960 case CCISS_CACHE_FLUSH:
1961 c->Request.CDBLen = 12;
1962 c->Request.Type.Type = TYPE_CMD; /* It is a command. */
1963 c->Request.Type.Attribute = ATTR_SIMPLE;
1964 c->Request.Type.Direction = XFER_WRITE; /* No data */
1965 c->Request.Timeout = 0; /* Don't time out */
1966 c->Request.CDB[0] = BMIC_WRITE; /* BMIC Passthru */
1967 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1968 break;
1969 default:
1970 printk(KERN_WARNING
1971 "cciss: Unknown Command 0x%x sent attempted\n",
1972 cmd);
1973 cmd_free(info_p, c, 1);
1974 return IO_ERROR;
1975 };
1976 /* Fill in the scatter gather information */
1977 if (size > 0) {
1978 buff_dma_handle.val = (__u64) pci_map_single( info_p->pdev,
1979 buff, size, PCI_DMA_BIDIRECTIONAL);
1980 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1981 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1982 c->SG[0].Len = size;
1983 c->SG[0].Ext = 0; /* we are not chaining */
1985 resend_cmd1:
1986 /*
1987 * Disable interrupt
1988 */
1989 #ifdef CCISS_DEBUG
1990 printk(KERN_DEBUG "cciss: turning intr off\n");
1991 #endif /* CCISS_DEBUG */
1992 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
1994 /* Make sure there is room in the command FIFO */
1995 /* Actually it should be completely empty at this time. */
1996 for (i = 200000; i > 0; i--) {
1997 /* if fifo isn't full go */
1998 if (!(info_p->access.fifo_full(info_p))) {
2000 break;
2002 udelay(10);
2003 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2004 " waiting!\n", ctlr);
2006 /*
2007 * Send the cmd
2008 */
2009 info_p->access.submit_command(info_p, c);
2010 complete = pollcomplete(ctlr);
2012 #ifdef CCISS_DEBUG
2013 printk(KERN_DEBUG "cciss: command completed\n");
2014 #endif /* CCISS_DEBUG */
2016 if (complete != 1) {
2017 if ( (complete & CISS_ERROR_BIT)
2018 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2019 /* if data overrun or underun on Report command
2020 ignore it
2021 */
2022 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2023 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2024 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2025 ((c->err_info->CommandStatus ==
2026 CMD_DATA_OVERRUN) ||
2027 (c->err_info->CommandStatus ==
2028 CMD_DATA_UNDERRUN)
2029 )) {
2030 complete = c->busaddr;
2031 } else {
2032 if (c->err_info->CommandStatus ==
2033 CMD_UNSOLICITED_ABORT) {
2034 printk(KERN_WARNING "cciss: "
2035 "cmd %p aborted do "
2036 "to an unsolicited abort \n", c);
2037 if (c->retry_count < MAX_CMD_RETRIES) {
2038 printk(KERN_WARNING
2039 "retrying cmd\n");
2040 c->retry_count++;
2041 /* erase the old error */
2042 /* information */
2043 memset(c->err_info, 0,
2044 sizeof(ErrorInfo_struct));
2045 goto resend_cmd1;
2046 } else {
2047 printk(KERN_WARNING
2048 "retried to many times\n");
2049 status = IO_ERROR;
2050 goto cleanup1;
2053 printk(KERN_WARNING "cciss cciss%d: sendcmd"
2054 " Error %x \n", ctlr,
2055 c->err_info->CommandStatus);
2056 printk(KERN_WARNING "cciss cciss%d: sendcmd"
2057 " offensive info\n"
2058 " size %x\n num %x value %x\n", ctlr,
2059 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2060 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2061 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2062 status = IO_ERROR;
2063 goto cleanup1;
2066 if (complete != c->busaddr) {
2067 printk( KERN_WARNING "cciss cciss%d: SendCmd "
2068 "Invalid command list address returned! (%lx)\n",
2069 ctlr, complete);
2070 status = IO_ERROR;
2071 goto cleanup1;
2073 } else {
2074 printk( KERN_WARNING
2075 "cciss cciss%d: SendCmd Timeout out, "
2076 "No command list address returned!\n",
2077 ctlr);
2078 status = IO_ERROR;
2081 cleanup1:
2082 /* unlock the data buffer from DMA */
2083 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2084 size, PCI_DMA_BIDIRECTIONAL);
2085 cmd_free(info_p, c, 1);
2086 return status;
2088 /*
2089 * Map (physical) PCI mem into (virtual) kernel space
2090 */
2091 static ulong remap_pci_mem(ulong base, ulong size)
2093 ulong page_base = ((ulong) base) & PAGE_MASK;
2094 ulong page_offs = ((ulong) base) - page_base;
2095 ulong page_remapped = (ulong) ioremap(page_base, page_offs+size);
2097 return (ulong) (page_remapped ? (page_remapped + page_offs) : 0UL);
2100 /*
2101 * Enqueuing and dequeuing functions for cmdlists.
2102 */
2103 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
2105 if (*Qptr == NULL) {
2106 *Qptr = c;
2107 c->next = c->prev = c;
2108 } else {
2109 c->prev = (*Qptr)->prev;
2110 c->next = (*Qptr);
2111 (*Qptr)->prev->next = c;
2112 (*Qptr)->prev = c;
2116 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
2117 CommandList_struct *c)
2119 if (c && c->next != c) {
2120 if (*Qptr == c) *Qptr = c->next;
2121 c->prev->next = c->next;
2122 c->next->prev = c->prev;
2123 } else {
2124 *Qptr = NULL;
2126 return c;
2129 /*
2130 * Takes jobs of the Q and sends them to the hardware, then puts it on
2131 * the Q to wait for completion.
2132 */
2133 static void start_io( ctlr_info_t *h)
2135 CommandList_struct *c;
2137 while(( c = h->reqQ) != NULL ) {
2138 /* can't do anything if fifo is full */
2139 if ((h->access.fifo_full(h))) {
2140 printk(KERN_WARNING "cciss: fifo full \n");
2141 return;
2143 /* Get the frist entry from the Request Q */
2144 removeQ(&(h->reqQ), c);
2145 h->Qdepth--;
2147 /* Tell the controller execute command */
2148 h->access.submit_command(h, c);
2150 /* Put job onto the completed Q */
2151 addQ (&(h->cmpQ), c);
2155 static inline void complete_buffers( struct buffer_head *bh, int status)
2157 struct buffer_head *xbh;
2159 while(bh) {
2160 xbh = bh->b_reqnext;
2161 bh->b_reqnext = NULL;
2162 blk_finished_io(bh->b_size >> 9);
2163 bh->b_end_io(bh, status);
2164 bh = xbh;
2167 /* This code assumes io_request_lock is already held */
2168 /* Zeros out the error record and then resends the command back */
2169 /* to the controller */
2170 static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2172 /* erase the old error information */
2173 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2175 /* add it to software queue and then send it to the controller */
2176 addQ(&(h->reqQ),c);
2177 h->Qdepth++;
2178 if (h->Qdepth > h->maxQsinceinit)
2179 h->maxQsinceinit = h->Qdepth;
2181 start_io(h);
2183 /* checks the status of the job and calls complete buffers to mark all
2184 * buffers for the completed job.
2185 */
2186 static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2187 int timeout)
2189 int status = 1;
2190 int retry_cmd = 0;
2191 int i, ddir;
2192 u64bit temp64;
2194 if (timeout)
2195 status = 0;
2197 if (cmd->err_info->CommandStatus != 0) {
2198 /* an error has occurred */
2199 switch (cmd->err_info->CommandStatus) {
2200 unsigned char sense_key;
2201 case CMD_TARGET_STATUS:
2202 status = 0;
2204 if (cmd->err_info->ScsiStatus == 0x02) {
2205 printk(KERN_WARNING "cciss: cmd %p "
2206 "has CHECK CONDITION,"
2207 " sense key = 0x%x\n", cmd,
2208 cmd->err_info->SenseInfo[2]);
2209 /* check the sense key */
2210 sense_key = 0xf &
2211 cmd->err_info->SenseInfo[2];
2212 /* recovered error */
2213 if ( sense_key == 0x1)
2214 status = 1;
2215 } else {
2216 printk(KERN_WARNING "cciss: cmd %p "
2217 "has SCSI Status 0x%x\n",
2218 cmd, cmd->err_info->ScsiStatus);
2220 break;
2221 case CMD_DATA_UNDERRUN:
2222 printk(KERN_WARNING "cciss: cmd %p has"
2223 " completed with data underrun "
2224 "reported\n", cmd);
2225 break;
2226 case CMD_DATA_OVERRUN:
2227 printk(KERN_WARNING "cciss: cmd %p has"
2228 " completed with data overrun "
2229 "reported\n", cmd);
2230 break;
2231 case CMD_INVALID:
2232 printk(KERN_WARNING "cciss: cmd %p is "
2233 "reported invalid\n", cmd);
2234 status = 0;
2235 break;
2236 case CMD_PROTOCOL_ERR:
2237 printk(KERN_WARNING "cciss: cmd %p has "
2238 "protocol error \n", cmd);
2239 status = 0;
2240 break;
2241 case CMD_HARDWARE_ERR:
2242 printk(KERN_WARNING "cciss: cmd %p had "
2243 " hardware error\n", cmd);
2244 status = 0;
2245 break;
2246 case CMD_CONNECTION_LOST:
2247 printk(KERN_WARNING "cciss: cmd %p had "
2248 "connection lost\n", cmd);
2249 status=0;
2250 break;
2251 case CMD_ABORTED:
2252 printk(KERN_WARNING "cciss: cmd %p was "
2253 "aborted\n", cmd);
2254 status=0;
2255 break;
2256 case CMD_ABORT_FAILED:
2257 printk(KERN_WARNING "cciss: cmd %p reports "
2258 "abort failed\n", cmd);
2259 status=0;
2260 break;
2261 case CMD_UNSOLICITED_ABORT:
2262 printk(KERN_WARNING "cciss: cmd %p aborted do "
2263 "to an unsolicited abort \n",
2264 cmd);
2265 if (cmd->retry_count < MAX_CMD_RETRIES) {
2266 retry_cmd=1;
2267 printk(KERN_WARNING
2268 "retrying cmd\n");
2269 cmd->retry_count++;
2270 } else {
2271 printk(KERN_WARNING
2272 "retried to many times\n");
2274 status=0;
2275 break;
2276 case CMD_TIMEOUT:
2277 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2278 cmd);
2279 status=0;
2280 break;
2281 default:
2282 printk(KERN_WARNING "cciss: cmd %p returned "
2283 "unknown status %x\n", cmd,
2284 cmd->err_info->CommandStatus);
2285 status=0;
2288 /* We need to return this command */
2289 if (retry_cmd) {
2290 resend_cciss_cmd(h,cmd);
2291 return;
2293 /* command did not need to be retried */
2294 /* unmap the DMA mapping for all the scatter gather elements */
2295 if (cmd->Request.Type.Direction == XFER_READ)
2296 ddir = PCI_DMA_FROMDEVICE;
2297 else
2298 ddir = PCI_DMA_TODEVICE;
2299 for(i=0; i<cmd->Header.SGList; i++) {
2300 temp64.val32.lower = cmd->SG[i].Addr.lower;
2301 temp64.val32.upper = cmd->SG[i].Addr.upper;
2302 pci_unmap_page(hba[cmd->ctlr]->pdev,
2303 temp64.val, cmd->SG[i].Len, ddir);
2305 complete_buffers(cmd->rq->bh, status);
2306 #ifdef CCISS_DEBUG
2307 printk("Done with %p\n", cmd->rq);
2308 #endif /* CCISS_DEBUG */
2309 end_that_request_last(cmd->rq);
2310 cmd_free(h,cmd,1);
2314 static inline int cpq_new_segment(request_queue_t *q, struct request *rq,
2315 int max_segments)
2317 if (rq->nr_segments < MAXSGENTRIES) {
2318 rq->nr_segments++;
2319 return 1;
2321 return 0;
2324 static int cpq_back_merge_fn(request_queue_t *q, struct request *rq,
2325 struct buffer_head *bh, int max_segments)
2327 if (blk_seg_merge_ok(rq->bhtail, bh))
2328 return 1;
2329 return cpq_new_segment(q, rq, max_segments);
2332 static int cpq_front_merge_fn(request_queue_t *q, struct request *rq,
2333 struct buffer_head *bh, int max_segments)
2335 if (blk_seg_merge_ok(bh, rq->bh))
2336 return 1;
2337 return cpq_new_segment(q, rq, max_segments);
2340 static int cpq_merge_requests_fn(request_queue_t *q, struct request *rq,
2341 struct request *nxt, int max_segments)
2343 int total_segments = rq->nr_segments + nxt->nr_segments;
2345 if (blk_seg_merge_ok(rq->bhtail, nxt->bh))
2346 total_segments--;
2348 if (total_segments > MAXSGENTRIES)
2349 return 0;
2351 rq->nr_segments = total_segments;
2352 return 1;
2355 /*
2356 * Get a request and submit it to the controller.
2357 * Currently we do one request at a time. Ideally we would like to send
2358 * everything to the controller on the first call, but there is a danger
2359 * of holding the io_request_lock for to long.
2360 */
2361 static void do_cciss_request(request_queue_t *q)
2363 ctlr_info_t *h= q->queuedata;
2364 CommandList_struct *c;
2365 int log_unit, start_blk, seg;
2366 unsigned long long lastdataend;
2367 struct buffer_head *bh;
2368 struct list_head *queue_head = &q->queue_head;
2369 struct request *creq;
2370 u64bit temp64;
2371 struct scatterlist tmp_sg[MAXSGENTRIES];
2372 int i, ddir;
2374 if (q->plugged)
2375 goto startio;
2377 next:
2378 if (list_empty(queue_head))
2379 goto startio;
2381 creq = blkdev_entry_next_request(queue_head);
2382 if (creq->nr_segments > MAXSGENTRIES)
2383 BUG();
2385 if( h->ctlr != map_major_to_ctlr[MAJOR(creq->rq_dev)] ) {
2386 printk(KERN_WARNING "doreq cmd for %d, %x at %p\n",
2387 h->ctlr, creq->rq_dev, creq);
2388 blkdev_dequeue_request(creq);
2389 complete_buffers(creq->bh, 0);
2390 end_that_request_last(creq);
2391 goto startio;
2394 /* make sure controller is alive. */
2395 if (!CTLR_IS_ALIVE(h)) {
2396 printk(KERN_WARNING "cciss%d: I/O quit ", h->ctlr);
2397 blkdev_dequeue_request(creq);
2398 complete_buffers(creq->bh, 0);
2399 end_that_request_last(creq);
2400 return;
2403 if (( c = cmd_alloc(h, 1)) == NULL)
2404 goto startio;
2406 blkdev_dequeue_request(creq);
2408 spin_unlock_irq(&io_request_lock);
2410 c->cmd_type = CMD_RWREQ;
2411 c->rq = creq;
2412 bh = creq->bh;
2414 /* fill in the request */
2415 log_unit = MINOR(creq->rq_dev) >> NWD_SHIFT;
2416 c->Header.ReplyQueue = 0; /* unused in simple mode */
2417 c->Header.Tag.lower = c->busaddr; /* use the physical address */
2418 /* the cmd block for tag */
2419 c->Header.LUN.LogDev.VolId= hba[h->ctlr]->drv[log_unit].LunID;
2420 c->Header.LUN.LogDev.Mode = 1;
2421 c->Request.CDBLen = 10; /* 12 byte commands not in FW yet. */
2422 c->Request.Type.Type = TYPE_CMD; /* It is a command. */
2423 c->Request.Type.Attribute = ATTR_SIMPLE;
2424 c->Request.Type.Direction =
2425 (creq->cmd == READ) ? XFER_READ: XFER_WRITE;
2426 c->Request.Timeout = 0; /* Don't time out */
2427 c->Request.CDB[0] = (creq->cmd == READ) ? CCISS_READ : CCISS_WRITE;
2428 start_blk = hba[h->ctlr]->hd[MINOR(creq->rq_dev)].start_sect + creq->sector;
2429 #ifdef CCISS_DEBUG
2430 if (bh == NULL)
2431 panic("cciss: bh== NULL?");
2432 printk(KERN_DEBUG "cciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2433 (int) creq->nr_sectors);
2434 #endif /* CCISS_DEBUG */
2435 seg = 0;
2436 lastdataend = ~0ULL;
2437 while(bh) {
2439 if (bh_phys(bh) == lastdataend)
2440 { /* tack it on to the last segment */
2441 tmp_sg[seg-1].length +=bh->b_size;
2442 lastdataend += bh->b_size;
2443 } else {
2444 if (seg == MAXSGENTRIES)
2445 BUG();
2446 tmp_sg[seg].page = bh->b_page;
2447 tmp_sg[seg].length = bh->b_size;
2448 tmp_sg[seg].offset = bh_offset(bh);
2449 lastdataend = bh_phys(bh) + bh->b_size;
2450 seg++;
2452 bh = bh->b_reqnext;
2455 /* get the DMA records for the setup */
2456 if (c->Request.Type.Direction == XFER_READ)
2457 ddir = PCI_DMA_FROMDEVICE;
2458 else
2459 ddir = PCI_DMA_TODEVICE;
2460 for (i=0; i<seg; i++) {
2461 c->SG[i].Len = tmp_sg[i].length;
2462 temp64.val = pci_map_page(h->pdev, tmp_sg[i].page,
2463 tmp_sg[i].offset, tmp_sg[i].length, ddir);
2464 c->SG[i].Addr.lower = temp64.val32.lower;
2465 c->SG[i].Addr.upper = temp64.val32.upper;
2466 c->SG[i].Ext = 0; /* we are not chaining */
2468 /* track how many SG entries we are using */
2469 if (seg > h->maxSG)
2470 h->maxSG = seg;
2472 #ifdef CCISS_DEBUG
2473 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2474 #endif /* CCISS_DEBUG */
2476 c->Header.SGList = c->Header.SGTotal = seg;
2477 c->Request.CDB[1]= 0;
2478 c->Request.CDB[2]= (start_blk >> 24) & 0xff; /* MSB */
2479 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2480 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2481 c->Request.CDB[5]= start_blk & 0xff;
2482 c->Request.CDB[6]= 0; /* (sect >> 24) & 0xff; MSB */
2483 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2484 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2485 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2487 spin_lock_irq(&io_request_lock);
2489 addQ(&(h->reqQ),c);
2490 h->Qdepth++;
2491 if (h->Qdepth > h->maxQsinceinit)
2492 h->maxQsinceinit = h->Qdepth;
2494 goto next;
2496 startio:
2497 start_io(h);
2500 static void do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2502 ctlr_info_t *h = dev_id;
2503 CommandList_struct *c;
2504 unsigned long flags;
2505 __u32 a, a1;
2508 /* Is this interrupt for us? */
2509 if ((h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0))
2510 return;
2512 /*
2513 * If there are completed commands in the completion queue,
2514 * we had better do something about it.
2515 */
2516 spin_lock_irqsave(&io_request_lock, flags);
2517 while( h->access.intr_pending(h)) {
2518 while((a = h->access.command_completed(h)) != FIFO_EMPTY) {
2519 a1 = a;
2520 a &= ~3;
2521 if ((c = h->cmpQ) == NULL) {
2522 printk(KERN_WARNING "cciss: Completion of %08lx ignored\n", (unsigned long)a1);
2523 continue;
2525 while(c->busaddr != a) {
2526 c = c->next;
2527 if (c == h->cmpQ)
2528 break;
2530 /*
2531 * If we've found the command, take it off the
2532 * completion Q and free it
2533 */
2534 if (c->busaddr == a) {
2535 removeQ(&h->cmpQ, c);
2536 if (c->cmd_type == CMD_RWREQ) {
2537 complete_command(h, c, 0);
2538 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2539 #if 0
2540 complete(c->waiting);
2541 #else
2542 /* XXX SMH: use a flag to signal */
2543 if(*(int *)(c->waiting) != 0)
2544 *(int *)(c->waiting) = 0;
2545 #endif
2547 # ifdef CONFIG_CISS_SCSI_TAPE
2548 else if (c->cmd_type == CMD_SCSI) {
2549 complete_scsi_command(c, 0, a1);
2551 # endif
2552 continue;
2556 /*
2557 * See if we can queue up some more IO
2558 */
2559 do_cciss_request(BLK_DEFAULT_QUEUE(h->major));
2560 spin_unlock_irqrestore(&io_request_lock, flags);
2562 /*
2563 * We cannot read the structure directly, for portablity we must use
2564 * the io functions.
2565 * This is for debug only.
2566 */
2567 #ifdef CCISS_DEBUG
2568 static void print_cfg_table( CfgTable_struct *tb)
2570 int i;
2571 char temp_name[17];
2573 printk("Controller Configuration information\n");
2574 printk("------------------------------------\n");
2575 for(i=0;i<4;i++)
2576 temp_name[i] = readb(&(tb->Signature[i]));
2577 temp_name[4]='\0';
2578 printk(" Signature = %s\n", temp_name);
2579 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2580 printk(" Transport methods supported = 0x%x\n",
2581 readl(&(tb-> TransportSupport)));
2582 printk(" Transport methods active = 0x%x\n",
2583 readl(&(tb->TransportActive)));
2584 printk(" Requested transport Method = 0x%x\n",
2585 readl(&(tb->HostWrite.TransportRequest)));
2586 printk(" Coalese Interrupt Delay = 0x%x\n",
2587 readl(&(tb->HostWrite.CoalIntDelay)));
2588 printk(" Coalese Interrupt Count = 0x%x\n",
2589 readl(&(tb->HostWrite.CoalIntCount)));
2590 printk(" Max outstanding commands = 0x%d\n",
2591 readl(&(tb->CmdsOutMax)));
2592 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2593 for(i=0;i<16;i++)
2594 temp_name[i] = readb(&(tb->ServerName[i]));
2595 temp_name[16] = '\0';
2596 printk(" Server Name = %s\n", temp_name);
2597 printk(" Heartbeat Counter = 0x%x\n\n\n",
2598 readl(&(tb->HeartBeat)));
2600 #endif /* CCISS_DEBUG */
2602 static void release_io_mem(ctlr_info_t *c)
2604 /* if IO mem was not protected do nothing */
2605 if (c->io_mem_addr == 0)
2606 return;
2607 release_region(c->io_mem_addr, c->io_mem_length);
2608 c->io_mem_addr = 0;
2609 c->io_mem_length = 0;
2611 static int find_PCI_BAR_index(struct pci_dev *pdev,
2612 unsigned long pci_bar_addr)
2614 int i, offset, mem_type, bar_type;
2615 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2616 return 0;
2617 offset = 0;
2618 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2619 bar_type = pci_resource_flags(pdev, i) &
2620 PCI_BASE_ADDRESS_SPACE;
2621 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2622 offset += 4;
2623 else {
2624 mem_type = pci_resource_flags(pdev, i) &
2625 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2626 switch (mem_type) {
2627 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2628 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2629 offset += 4; /* 32 bit */
2630 break;
2631 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2632 offset += 8;
2633 break;
2634 default: /* reserved in PCI 2.2 */
2635 printk(KERN_WARNING "Base address is invalid\n");
2636 return -1;
2637 break;
2640 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2641 return i+1;
2643 return -1;
2646 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2648 ushort subsystem_vendor_id, subsystem_device_id, command;
2649 unchar irq = pdev->irq;
2650 __u32 board_id;
2651 __u64 cfg_offset;
2652 __u32 cfg_base_addr;
2653 __u64 cfg_base_addr_index;
2654 int i;
2656 /* check to see if controller has been disabled */
2657 /* BEFORE we try to enable it */
2658 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2659 if (!(command & 0x02)) {
2660 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2661 return -1;
2663 if (pci_enable_device(pdev)) {
2664 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2665 return -1;
2667 if (pci_set_dma_mask(pdev, CCISS_DMA_MASK ) != 0) {
2668 printk(KERN_ERR "cciss: Unable to set DMA mask\n");
2669 return -1;
2672 subsystem_vendor_id = pdev->subsystem_vendor;
2673 subsystem_device_id = pdev->subsystem_device;
2674 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2675 subsystem_vendor_id );
2678 /* search for our IO range so we can protect it */
2679 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2680 /* is this an IO range */
2681 if (pci_resource_flags(pdev, i) & 0x01) {
2682 c->io_mem_addr = pci_resource_start(pdev, i);
2683 c->io_mem_length = pci_resource_end(pdev, i) -
2684 pci_resource_start(pdev, i) + 1;
2685 #ifdef CCISS_DEBUG
2686 printk("IO value found base_addr[%d] %lx %lx\n", i,
2687 c->io_mem_addr, c->io_mem_length);
2688 #endif /* CCISS_DEBUG */
2689 /* register the IO range */
2690 if (!request_region( c->io_mem_addr,
2691 c->io_mem_length, "cciss")) {
2692 printk(KERN_WARNING
2693 "cciss I/O memory range already in "
2694 "use addr=%lx length=%ld\n",
2695 c->io_mem_addr, c->io_mem_length);
2696 c->io_mem_addr= 0;
2697 c->io_mem_length = 0;
2699 break;
2703 #ifdef CCISS_DEBUG
2704 printk("command = %x\n", command);
2705 printk("irq = %x\n", irq);
2706 printk("board_id = %x\n", board_id);
2707 #endif /* CCISS_DEBUG */
2709 c->intr = irq;
2711 /*
2712 * Memory base addr is first addr , the second points to the config
2713 * table
2714 */
2716 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2717 #ifdef CCISS_DEBUG
2718 printk("address 0 = %x\n", c->paddr);
2719 #endif /* CCISS_DEBUG */
2720 c->vaddr = remap_pci_mem(c->paddr, 200);
2722 /* get the address index number */
2723 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2724 /* I am not prepared to deal with a 64 bit address value */
2725 cfg_base_addr &= (__u32) 0x0000ffff;
2726 #ifdef CCISS_DEBUG
2727 printk("cfg base address = %x\n", cfg_base_addr);
2728 #endif /* CCISS_DEBUG */
2729 cfg_base_addr_index =
2730 find_PCI_BAR_index(pdev, cfg_base_addr);
2731 #ifdef CCISS_DEBUG
2732 printk("cfg base address index = %x\n", cfg_base_addr_index);
2733 #endif /* CCISS_DEBUG */
2734 if (cfg_base_addr_index == -1) {
2735 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2736 release_io_mem(hba[i]);
2737 return -1;
2740 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2741 #ifdef CCISS_DEBUG
2742 printk("cfg offset = %x\n", cfg_offset);
2743 #endif /* CCISS_DEBUG */
2744 c->cfgtable = (CfgTable_struct *)
2745 remap_pci_mem(pci_resource_start(pdev, cfg_base_addr_index)
2746 + cfg_offset, sizeof(CfgTable_struct));
2747 c->board_id = board_id;
2749 #ifdef CCISS_DEBUG
2750 print_cfg_table(c->cfgtable);
2751 #endif /* CCISS_DEBUG */
2753 for(i=0; i<NR_PRODUCTS; i++) {
2754 if (board_id == products[i].board_id) {
2755 c->product_name = products[i].product_name;
2756 c->access = *(products[i].access);
2757 break;
2760 if (i == NR_PRODUCTS) {
2761 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2762 " to access the Smart Array controller %08lx\n",
2763 (unsigned long)board_id);
2764 return -1;
2766 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2767 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2768 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2769 (readb(&c->cfgtable->Signature[3]) != 'S') ) {
2770 printk("Does not appear to be a valid CISS config table\n");
2771 return -1;
2773 #ifdef CCISS_DEBUG
2774 printk("Trying to put board into Simple mode\n");
2775 #endif /* CCISS_DEBUG */
2776 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2777 /* Update the field, and then ring the doorbell */
2778 writel( CFGTBL_Trans_Simple,
2779 &(c->cfgtable->HostWrite.TransportRequest));
2780 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2782 /* Here, we wait, possibly for a long time, (4 secs or more).
2783 * In some unlikely cases, (e.g. A failed 144 GB drive in a
2784 * RAID 5 set was hot replaced just as we're coming in here) it
2785 * can take that long. Normally (almost always) we will wait
2786 * less than 1 sec. */
2787 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2788 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2789 break;
2790 /* delay and try again */
2791 set_current_state(TASK_INTERRUPTIBLE);
2792 schedule_timeout(1);
2795 #ifdef CCISS_DEBUG
2796 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2797 #endif /* CCISS_DEBUG */
2798 #ifdef CCISS_DEBUG
2799 print_cfg_table(c->cfgtable);
2800 #endif /* CCISS_DEBUG */
2802 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
2803 printk(KERN_WARNING "cciss: unable to get board into"
2804 " simple mode\n");
2805 return -1;
2807 return 0;
2811 /*
2812 * Gets information about the local volumes attached to the controller.
2813 */
2814 static void cciss_getgeometry(int cntl_num)
2816 ReportLunData_struct *ld_buff;
2817 ReadCapdata_struct *size_buff;
2818 InquiryData_struct *inq_buff;
2819 int return_code;
2820 int i;
2821 int listlength = 0;
2822 __u32 lunid = 0;
2823 int block_size;
2824 int total_size;
2826 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2827 if (ld_buff == NULL) {
2828 printk(KERN_ERR "cciss: out of memory\n");
2829 return;
2831 memset(ld_buff, 0, sizeof(ReportLunData_struct));
2832 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2833 if (size_buff == NULL) {
2834 printk(KERN_ERR "cciss: out of memory\n");
2835 kfree(ld_buff);
2836 return;
2838 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2839 if (inq_buff == NULL) {
2840 printk(KERN_ERR "cciss: out of memory\n");
2841 kfree(ld_buff);
2842 kfree(size_buff);
2843 return;
2845 /* Get the firmware version */
2846 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2847 sizeof(InquiryData_struct), 0, 0 ,0, NULL);
2848 if (return_code == IO_OK) {
2849 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2850 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2851 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2852 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2853 } else { /* send command failed */
2854 printk(KERN_WARNING "cciss: unable to determine firmware"
2855 " version of controller\n");
2857 /* Get the number of logical volumes */
2858 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2859 sizeof(ReportLunData_struct), 0, 0, 0, NULL);
2861 if (return_code == IO_OK) {
2862 #ifdef CCISS_DEBUG
2863 printk("LUN Data\n--------------------------\n");
2864 #endif /* CCISS_DEBUG */
2866 listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0]));
2867 } else { /* reading number of logical volumes failed */
2868 printk(KERN_WARNING "cciss: report logical volume"
2869 " command failed\n");
2870 listlength = 0;
2872 hba[cntl_num]->num_luns = listlength / 8; /* 8 bytes pre entry */
2873 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
2874 printk(KERN_ERR "cciss: only %d number of logical volumes supported\n",
2875 CISS_MAX_LUN);
2876 hba[cntl_num]->num_luns = CISS_MAX_LUN;
2878 #ifdef CCISS_DEBUG
2879 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
2880 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
2881 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
2882 #endif /* CCISS_DEBUG */
2884 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
2885 for(i=0; i< hba[cntl_num]->num_luns; i++) {
2886 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
2887 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
2888 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
2889 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
2890 hba[cntl_num]->drv[i].LunID = lunid;
2892 #ifdef CCISS_DEBUG
2893 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
2894 ld_buff->LUN[i][0], ld_buff->LUN[i][1],ld_buff->LUN[i][2],
2895 ld_buff->LUN[i][3], hba[cntl_num]->drv[i].LunID);
2896 #endif /* CCISS_DEBUG */
2898 memset(size_buff, 0, sizeof(ReadCapdata_struct));
2899 return_code = sendcmd(CCISS_READ_CAPACITY, cntl_num, size_buff,
2900 sizeof( ReadCapdata_struct), 1, i, 0, NULL);
2901 if (return_code == IO_OK) {
2902 total_size = (0xff &
2903 (unsigned int)(size_buff->total_size[0])) << 24;
2904 total_size |= (0xff &
2905 (unsigned int)(size_buff->total_size[1])) << 16;
2906 total_size |= (0xff &
2907 (unsigned int)(size_buff->total_size[2])) << 8;
2908 total_size |= (0xff & (unsigned int)
2909 (size_buff->total_size[3]));
2910 total_size++; /* command returns highest */
2911 /* block address */
2913 block_size = (0xff &
2914 (unsigned int)(size_buff->block_size[0])) << 24;
2915 block_size |= (0xff &
2916 (unsigned int)(size_buff->block_size[1])) << 16;
2917 block_size |= (0xff &
2918 (unsigned int)(size_buff->block_size[2])) << 8;
2919 block_size |= (0xff &
2920 (unsigned int)(size_buff->block_size[3]));
2921 } else { /* read capacity command failed */
2922 printk(KERN_WARNING "cciss: read capacity failed\n");
2923 total_size = block_size = 0;
2925 printk(KERN_INFO " blocks= %d block_size= %d\n",
2926 total_size, block_size);
2928 /* Execute the command to read the disk geometry */
2929 memset(inq_buff, 0, sizeof(InquiryData_struct));
2930 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2931 sizeof(InquiryData_struct), 1, i, 0xC1, NULL );
2932 if (return_code == IO_OK) {
2933 if (inq_buff->data_byte[8] == 0xFF) {
2934 printk(KERN_WARNING "cciss: reading geometry failed, volume does not support reading geometry\n");
2936 hba[cntl_num]->drv[i].block_size = block_size;
2937 hba[cntl_num]->drv[i].nr_blocks = total_size;
2938 hba[cntl_num]->drv[i].heads = 255;
2939 hba[cntl_num]->drv[i].sectors = 32; /* Sectors */
2940 /* per track */
2941 hba[cntl_num]->drv[i].cylinders = total_size
2942 / 255 / 32;
2943 } else {
2945 hba[cntl_num]->drv[i].block_size = block_size;
2946 hba[cntl_num]->drv[i].nr_blocks = total_size;
2947 hba[cntl_num]->drv[i].heads =
2948 inq_buff->data_byte[6];
2949 hba[cntl_num]->drv[i].sectors =
2950 inq_buff->data_byte[7];
2951 hba[cntl_num]->drv[i].cylinders =
2952 (inq_buff->data_byte[4] & 0xff) << 8;
2953 hba[cntl_num]->drv[i].cylinders +=
2954 inq_buff->data_byte[5];
2955 hba[cntl_num]->drv[i].raid_level =
2956 inq_buff->data_byte[8];
2959 else { /* Get geometry failed */
2960 printk(KERN_WARNING "cciss: reading geometry failed, continuing with default geometry\n");
2962 hba[cntl_num]->drv[i].block_size = block_size;
2963 hba[cntl_num]->drv[i].nr_blocks = total_size;
2964 hba[cntl_num]->drv[i].heads = 255;
2965 hba[cntl_num]->drv[i].sectors = 32; /* Sectors */
2966 /* per track */
2967 hba[cntl_num]->drv[i].cylinders = total_size / 255 / 32;
2969 if (hba[cntl_num]->drv[i].raid_level > 5)
2970 hba[cntl_num]->drv[i].raid_level = RAID_UNKNOWN;
2971 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d RAID %s\n\n",
2972 hba[cntl_num]->drv[i].heads,
2973 hba[cntl_num]->drv[i].sectors,
2974 hba[cntl_num]->drv[i].cylinders,
2975 raid_label[hba[cntl_num]->drv[i].raid_level]);
2977 kfree(ld_buff);
2978 kfree(size_buff);
2979 kfree(inq_buff);
2982 /* Function to find the first free pointer into our hba[] array */
2983 /* Returns -1 if no free entries are left. */
2984 static int alloc_cciss_hba(void)
2986 int i;
2987 for(i=0; i< MAX_CTLR; i++) {
2988 if (hba[i] == NULL) {
2989 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
2990 if (hba[i]==NULL) {
2991 printk(KERN_ERR "cciss: out of memory.\n");
2992 return -1;
2994 return i;
2997 printk(KERN_WARNING
2998 "cciss: This driver supports a maximum of %d controllers.\n"
2999 "You can change this value in cciss.c and recompile.\n",
3000 MAX_CTLR);
3001 return -1;
3004 static void free_hba(int i)
3006 kfree(hba[i]);
3007 hba[i]=NULL;
3009 #ifdef CONFIG_CISS_MONITOR_THREAD
3010 static void fail_all_cmds(unsigned long ctlr)
3012 /* If we get here, the board is apparently dead. */
3013 ctlr_info_t *h = hba[ctlr];
3014 CommandList_struct *c;
3015 unsigned long flags;
3017 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3018 h->alive = 0; /* the controller apparently died... */
3020 spin_lock_irqsave(&io_request_lock, flags);
3022 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3024 /* move everything off the request queue onto the completed queue */
3025 while( (c = h->reqQ) != NULL ) {
3026 removeQ(&(h->reqQ), c);
3027 h->Qdepth--;
3028 addQ (&(h->cmpQ), c);
3031 /* Now, fail everything on the completed queue with a HW error */
3032 while( (c = h->cmpQ) != NULL ) {
3033 removeQ(&h->cmpQ, c);
3034 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3035 if (c->cmd_type == CMD_RWREQ) {
3036 complete_command(h, c, 0);
3037 } else if (c->cmd_type == CMD_IOCTL_PEND)
3038 complete(c->waiting);
3039 # ifdef CONFIG_CISS_SCSI_TAPE
3040 else if (c->cmd_type == CMD_SCSI)
3041 complete_scsi_command(c, 0, 0);
3042 # endif
3044 spin_unlock_irqrestore(&io_request_lock, flags);
3045 return;
3047 static int cciss_monitor(void *ctlr)
3049 /* If the board fails, we ought to detect that. So we periodically
3050 send down a No-Op message and expect it to complete quickly. If it
3051 doesn't, then we assume the board is dead, and fail all commands.
3052 This is useful mostly in a multipath configuration, so that failover
3053 will happen. */
3055 int rc;
3056 ctlr_info_t *h = (ctlr_info_t *) ctlr;
3057 unsigned long flags;
3058 u32 current_timer;
3060 daemonize();
3061 exit_files(current);
3062 reparent_to_init();
3064 printk("cciss%d: Monitor thread starting.\n", h->ctlr);
3066 /* only listen to signals if the HA was loaded as a module. */
3067 #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
3068 siginitsetinv(&current->blocked, SHUTDOWN_SIGS);
3069 sprintf(current->comm, "ccissmon%d", h->ctlr);
3070 h->monitor_thread = current;
3072 init_timer(&h->watchdog);
3073 h->watchdog.function = fail_all_cmds;
3074 h->watchdog.data = (unsigned long) h->ctlr;
3075 while (1) {
3076 /* check heartbeat timer */
3077 current_timer = readl(&h->cfgtable->HeartBeat);
3078 current_timer &= 0x0fffffff;
3079 if (heartbeat_timer == current_timer) {
3080 fail_all_cmds(h->ctlr);
3081 break;
3083 else
3084 heartbeat_timer = current_timer;
3086 set_current_state(TASK_UNINTERRUPTIBLE);
3087 schedule_timeout(h->monitor_period * HZ);
3088 h->watchdog.expires = jiffies + HZ * h->monitor_deadline;
3089 add_timer(&h->watchdog);
3090 /* send down a trivial command (no op message) to ctlr */
3091 rc = sendcmd_withirq(3, h->ctlr, NULL, 0, 0, 0, 0, TYPE_MSG);
3092 del_timer(&h->watchdog);
3093 if (!CTLR_IS_ALIVE(h))
3094 break;
3095 if (signal_pending(current)) {
3096 printk(KERN_WARNING "%s received signal.\n",
3097 current->comm);
3098 break;
3100 if (h->monitor_period == 0) /* zero period means exit thread */
3101 break;
3103 printk(KERN_INFO "%s exiting.\n", current->comm);
3104 spin_lock_irqsave(&io_request_lock, flags);
3105 h->monitor_started = 0;
3106 h->monitor_thread = NULL;
3107 spin_unlock_irqrestore(&io_request_lock, flags);
3108 return 0;
3110 static int start_monitor_thread(ctlr_info_t *h, unsigned char *cmd,
3111 unsigned long count, int (*cciss_monitor)(void *), int *rc)
3113 unsigned long flags;
3114 unsigned int new_period, old_period, new_deadline, old_deadline;
3116 if (strncmp("monitor", cmd, 7) == 0) {
3117 new_period = simple_strtol(cmd + 8, NULL, 10);
3118 spin_lock_irqsave(&io_request_lock, flags);
3119 new_deadline = h->monitor_deadline;
3120 spin_unlock_irqrestore(&io_request_lock, flags);
3121 } else if (strncmp("deadline", cmd, 8) == 0) {
3122 new_deadline = simple_strtol(cmd + 9, NULL, 10);
3123 spin_lock_irqsave(&io_request_lock, flags);
3124 new_period = h->monitor_period;
3125 spin_unlock_irqrestore(&io_request_lock, flags);
3126 } else
3127 return -1;
3128 if (new_period != 0 && new_period < CCISS_MIN_PERIOD)
3129 new_period = CCISS_MIN_PERIOD;
3130 if (new_period > CCISS_MAX_PERIOD)
3131 new_period = CCISS_MAX_PERIOD;
3132 if (new_deadline >= new_period) {
3133 new_deadline = new_period - 5;
3134 printk(KERN_INFO "setting deadline to %d\n", new_deadline);
3136 spin_lock_irqsave(&io_request_lock, flags);
3137 if (h->monitor_started != 0) {
3138 old_period = h->monitor_period;
3139 old_deadline = h->monitor_deadline;
3140 h->monitor_period = new_period;
3141 h->monitor_deadline = new_deadline;
3142 spin_unlock_irqrestore(&io_request_lock, flags);
3143 if (new_period == 0) {
3144 printk(KERN_INFO "cciss%d: stopping monitor thread\n",
3145 h->ctlr);
3146 *rc = count;
3147 return 0;
3149 if (new_period != old_period)
3150 printk(KERN_INFO "cciss%d: adjusting monitor thread "
3151 "period from %d to %d seconds\n",
3152 h->ctlr, old_period, new_period);
3153 if (new_deadline != old_deadline)
3154 printk(KERN_INFO "cciss%d: adjusting monitor thread "
3155 "deadline from %d to %d seconds\n",
3156 h->ctlr, old_deadline, new_deadline);
3157 *rc = count;
3158 return 0;
3160 h->monitor_started = 1;
3161 h->monitor_period = new_period;
3162 h->monitor_deadline = new_deadline;
3163 spin_unlock_irqrestore(&io_request_lock, flags);
3164 kernel_thread(cciss_monitor, h, 0);
3165 *rc = count;
3166 return 0;
3169 static void kill_monitor_thread(ctlr_info_t *h)
3171 if (h->monitor_thread)
3172 send_sig(SIGKILL, h->monitor_thread, 1);
3174 #else
3175 #define kill_monitor_thread(h)
3176 #endif
3177 /*
3178 * This is it. Find all the controllers and register them. I really hate
3179 * stealing all these major device numbers.
3180 * returns the number of block devices registered.
3181 */
3182 static int __init cciss_init_one(struct pci_dev *pdev,
3183 const struct pci_device_id *ent)
3185 request_queue_t *q;
3186 int i;
3187 int j;
3188 #if 0
3189 int rc;
3190 #endif
3192 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3193 " bus %d dev %d func %d\n",
3194 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3195 PCI_FUNC(pdev->devfn));
3196 i = alloc_cciss_hba();
3197 if (i < 0 )
3198 return -1;
3199 memset(hba[i], 0, sizeof(ctlr_info_t));
3200 if (cciss_pci_init(hba[i], pdev) != 0) {
3201 free_hba(i);
3202 return -1;
3204 sprintf(hba[i]->devname, "cciss%d", i);
3205 hba[i]->ctlr = i;
3207 /* register with the major number, or get a dynamic major number */
3208 /* by passing 0 as argument */
3210 if (i < MAX_CTLR_ORIG)
3211 hba[i]->major = MAJOR_NR + i;
3213 hba[i]->pdev = pdev;
3214 ASSERT_CTLR_ALIVE(hba[i]);
3216 #if 0
3217 rc = (register_blkdev(hba[i]->major, hba[i]->devname, &cciss_fops));
3218 if (rc < 0) {
3219 printk(KERN_ERR "cciss: Unable to get major number "
3220 "%d for %s\n", hba[i]->major, hba[i]->devname);
3221 release_io_mem(hba[i]);
3222 free_hba(i);
3223 return -1;
3224 } else
3226 if (i < MAX_CTLR_ORIG) {
3227 hba[i]->major = MAJOR_NR + i;
3228 map_major_to_ctlr[MAJOR_NR + i] = i;
3229 } else {
3230 hba[i]->major = rc;
3231 map_major_to_ctlr[rc] = i;
3235 XXXX Need to register this...
3237 #endif
3239 /* make sure the board interrupts are off */
3240 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3241 if (request_irq(hba[i]->intr, do_cciss_intr,
3242 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
3243 hba[i]->devname, hba[i])) {
3245 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3246 hba[i]->intr, hba[i]->devname);
3247 unregister_blkdev( hba[i]->major, hba[i]->devname);
3248 map_major_to_ctlr[hba[i]->major] = 0;
3249 release_io_mem(hba[i]);
3250 free_hba(i);
3251 return -1;
3253 hba[i]->cmd_pool_bits = (__u32*)kmalloc(
3254 ((NR_CMDS+31)/32)*sizeof(__u32), GFP_KERNEL);
3255 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3256 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3257 &(hba[i]->cmd_pool_dhandle));
3258 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3259 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3260 &(hba[i]->errinfo_pool_dhandle));
3261 if ((hba[i]->cmd_pool_bits == NULL)
3262 || (hba[i]->cmd_pool == NULL)
3263 || (hba[i]->errinfo_pool == NULL)) {
3265 if (hba[i]->cmd_pool_bits)
3266 kfree(hba[i]->cmd_pool_bits);
3267 if (hba[i]->cmd_pool)
3268 pci_free_consistent(hba[i]->pdev,
3269 NR_CMDS * sizeof(CommandList_struct),
3270 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3271 if (hba[i]->errinfo_pool)
3272 pci_free_consistent(hba[i]->pdev,
3273 NR_CMDS * sizeof( ErrorInfo_struct),
3274 hba[i]->errinfo_pool,
3275 hba[i]->errinfo_pool_dhandle);
3276 free_irq(hba[i]->intr, hba[i]);
3277 unregister_blkdev(hba[i]->major, hba[i]->devname);
3278 map_major_to_ctlr[hba[i]->major] = 0;
3279 release_io_mem(hba[i]);
3280 free_hba(i);
3281 printk( KERN_ERR "cciss: out of memory");
3282 return -1;
3285 /* Initialize the pdev driver private data.
3286 have it point to hba[i]. */
3287 pci_set_drvdata(pdev, hba[i]);
3288 /* command and error info recs zeroed out before
3289 they are used */
3290 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+31)/32)*sizeof(__u32));
3292 #ifdef CCISS_DEBUG
3293 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3294 #endif /* CCISS_DEBUG */
3296 cciss_getgeometry(i);
3298 cciss_find_non_disk_devices(i); /* find our tape drives, if any */
3300 /* Turn the interrupts on so we can service requests */
3301 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3303 cciss_procinit(i);
3305 q = BLK_DEFAULT_QUEUE(hba[i]->major);
3306 q->queuedata = hba[i];
3307 blk_init_queue(q, do_cciss_request);
3308 #if 0
3309 // XXX SMH; no bounce support for us yet
3310 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3311 #endif
3312 blk_queue_headactive(q, 0);
3314 /* fill in the other Kernel structs */
3315 blksize_size[hba[i]->major] = hba[i]->blocksizes;
3316 hardsect_size[hba[i]->major] = hba[i]->hardsizes;
3317 read_ahead[hba[i]->major] = READ_AHEAD;
3319 /* Set the pointers to queue functions */
3320 q->back_merge_fn = cpq_back_merge_fn;
3321 q->front_merge_fn = cpq_front_merge_fn;
3322 q->merge_requests_fn = cpq_merge_requests_fn;
3325 /* Fill in the gendisk data */
3326 hba[i]->gendisk.major = hba[i]->major;
3327 hba[i]->gendisk.major_name = "cciss";
3328 hba[i]->gendisk.minor_shift = NWD_SHIFT;
3329 hba[i]->gendisk.max_p = MAX_PART;
3330 hba[i]->gendisk.part = hba[i]->hd;
3331 hba[i]->gendisk.sizes = hba[i]->sizes;
3332 hba[i]->gendisk.nr_real = hba[i]->highest_lun+1;
3333 hba[i]->gendisk.fops = &cciss_fops;
3335 /* Get on the disk list */
3336 add_gendisk(&(hba[i]->gendisk));
3338 cciss_geninit(i);
3339 for(j=0; j<NWD; j++)
3340 register_disk(&(hba[i]->gendisk),
3341 MKDEV(hba[i]->major, j <<4),
3342 MAX_PART, &cciss_fops,
3343 hba[i]->drv[j].nr_blocks);
3345 cciss_register_scsi(i, 1); /* hook ourself into SCSI subsystem */
3347 return 1;
3350 static void __devexit cciss_remove_one (struct pci_dev *pdev)
3352 ctlr_info_t *tmp_ptr;
3353 int i;
3354 char flush_buf[4];
3355 int return_code;
3357 if (pci_get_drvdata(pdev) == NULL) {
3358 printk( KERN_ERR "cciss: Unable to remove device \n");
3359 return;
3361 tmp_ptr = pci_get_drvdata(pdev);
3362 i = tmp_ptr->ctlr;
3363 if (hba[i] == NULL) {
3364 printk(KERN_ERR "cciss: device appears to "
3365 "already be removed \n");
3366 return;
3368 kill_monitor_thread(hba[i]);
3369 /* no sense in trying to flush a dead board's cache. */
3370 if (CTLR_IS_ALIVE(hba[i])) {
3371 /* Turn board interrupts off and flush the cache */
3372 /* write all data in the battery backed cache to disks */
3373 memset(flush_buf, 0, 4);
3374 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf,
3375 4, 0, 0, 0, NULL);
3376 if (return_code != IO_OK)
3377 printk(KERN_WARNING
3378 "cciss%d: Error flushing cache\n", i);
3380 free_irq(hba[i]->intr, hba[i]);
3381 pci_set_drvdata(pdev, NULL);
3382 iounmap((void*)hba[i]->vaddr);
3383 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3384 unregister_blkdev(hba[i]->major, hba[i]->devname);
3385 map_major_to_ctlr[hba[i]->major] = 0;
3386 //remove_proc_entry(hba[i]->devname, proc_cciss);
3389 /* remove it from the disk list */
3390 del_gendisk(&(hba[i]->gendisk));
3392 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3393 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3394 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3395 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3396 kfree(hba[i]->cmd_pool_bits);
3397 release_io_mem(hba[i]);
3398 free_hba(i);
3401 static struct pci_driver cciss_pci_driver = {
3402 name: "cciss",
3403 probe: cciss_init_one,
3404 remove: __devexit_p(cciss_remove_one),
3405 id_table: cciss_pci_device_id, /* id_table */
3406 };
3408 /*
3409 * This is it. Register the PCI driver information for the cards we control
3410 * the OS will call our registered routines when it finds one of our cards.
3411 */
3412 int __init cciss_init(void)
3415 /* printk(KERN_INFO DRIVER_NAME "\n");*/
3416 /* Register for out PCI devices */
3417 return pci_module_init(&cciss_pci_driver);
3420 EXPORT_NO_SYMBOLS;
3421 static int __init init_cciss_module(void)
3424 return cciss_init();
3427 static void __exit cleanup_cciss_module(void)
3429 int i;
3431 pci_unregister_driver(&cciss_pci_driver);
3432 /* double check that all controller entrys have been removed */
3433 for (i=0; i< MAX_CTLR; i++) {
3434 if (hba[i] != NULL) {
3435 printk(KERN_WARNING "cciss: had to remove"
3436 " controller %d\n", i);
3437 cciss_remove_one(hba[i]->pdev);
3440 //remove_proc_entry("cciss", proc_root_driver);
3443 module_init(init_cciss_module);
3444 module_exit(cleanup_cciss_module);