ia64/xen-unstable

view xen/drivers/block/cciss.c @ 1119:33e3adc72d8b

bitkeeper revision 1.745 (403b535dxe-hvv9dM0KddL1LhR9pdQ)

e1000_main.c, cciss.c:
Quieten unused device drivers in Xen.
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 24 13:36:29 2004 +0000 (2004-02-24)
parents ff30aea0a126
children 9f85adafc1e1
line source
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2002 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to Cciss-discuss@lists.sourceforge.net
20 *
21 */
23 #include <xeno/config.h> /* CONFIG_PROC_FS */
24 #include <xeno/module.h>
25 #include <xeno/version.h>
26 #include <xeno/types.h>
27 #include <xeno/pci.h>
28 #include <xeno/kernel.h>
29 #include <xeno/slab.h>
30 #include <xeno/delay.h>
31 #include <xeno/major.h>
32 #include <xeno/blkpg.h>
33 #include <xeno/interrupt.h>
34 #include <xeno/timer.h>
35 #include <xeno/init.h>
36 #include <xeno/hdreg.h>
37 #include <xeno/spinlock.h>
38 #include <asm/uaccess.h>
39 #include <asm/io.h>
41 #include <xeno/blk.h>
42 #include <xeno/blkdev.h>
43 #include <xeno/genhd.h>
45 #include <asm/irq.h>
46 #include <asm/byteorder.h>
48 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
49 #define DRIVER_NAME "HP CISS Driver (v 2.4.50)"
50 #define DRIVER_VERSION CCISS_DRIVER_VERSION(2,4,50)
52 /* Embedded module documentation macros - see modules.h */
53 MODULE_AUTHOR("Hewlett-Packard Company");
54 MODULE_DESCRIPTION("Driver for HP SA5xxx SA6xxx Controllers version 2.4.50");
55 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400 6i");
56 MODULE_LICENSE("GPL");
58 #include "cciss_cmd.h"
59 #include "cciss.h"
61 /* define the PCI info for the cards we can control */
62 const struct pci_device_id cciss_pci_device_id[] = {
63 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
64 0x0E11, 0x4070, 0, 0, 0},
65 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
66 0x0E11, 0x4080, 0, 0, 0},
67 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
68 0x0E11, 0x4082, 0, 0, 0},
69 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
70 0x0E11, 0x4083, 0, 0, 0},
71 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
72 0x0E11, 0x409A, 0, 0, 0},
73 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
74 0x0E11, 0x409B, 0, 0, 0},
75 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
76 0x0E11, 0x409C, 0, 0, 0},
77 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
78 0x0E11, 0x409D, 0, 0, 0},
79 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
80 0x0E11, 0x4091, 0, 0, 0},
81 {0,}
82 };
83 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
85 #define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
87 /* board_id = Subsystem Device ID & Vendor ID
88 * product = Marketing Name for the board
89 * access = Address of the struct of function pointers
90 */
91 static struct board_type products[] = {
92 { 0x40700E11, "Smart Array 5300", &SA5_access},
93 { 0x40800E11, "Smart Array 5i", &SA5B_access},
94 { 0x40820E11, "Smart Array 532", &SA5B_access},
95 { 0x40830E11, "Smart Array 5312", &SA5B_access},
96 { 0x409A0E11, "Smart Array 641", &SA5_access},
97 { 0x409B0E11, "Smart Array 642", &SA5_access},
98 { 0x409C0E11, "Smart Array 6400", &SA5_access},
99 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
100 { 0x40910E11, "Smart Array 6i", &SA5_access},
101 };
103 /* How long to wait (in millesconds) for board to go into simple mode */
104 #define MAX_CONFIG_WAIT 30000
105 #define MAX_IOCTL_CONFIG_WAIT 1000
107 /*define how many times we will try a command because of bus resets */
108 #define MAX_CMD_RETRIES 3
110 #define READ_AHEAD 128
111 #define NR_CMDS 128 /* #commands that can be outstanding */
112 #define MAX_CTLR 32
114 /* No sense in giving up our preallocated major numbers */
115 #if MAX_CTLR < 8
116 #error"cciss.c: MAX_CTLR must be 8 or greater"
117 #endif
119 /* Originally cciss driver only supports 8 major number */
120 #define MAX_CTLR_ORIG COMPAQ_CISS_MAJOR7 - COMPAQ_CISS_MAJOR + 1
122 #define CCISS_DMA_MASK 0xFFFFFFFFFFFFFFFF /* 64 bit DMA */
124 #ifdef CONFIG_CISS_MONITOR_THREAD
125 static int cciss_monitor(void *ctlr);
126 static int start_monitor_thread(ctlr_info_t *h, unsigned char *cmd,
127 unsigned long count, int (*cciss_monitor)(void *), int *rc);
128 static u32 heartbeat_timer = 0;
129 #else
130 #define cciss_monitor(x)
131 #define kill_monitor_thead(x)
132 #endif
134 static ctlr_info_t *hba[MAX_CTLR];
135 static int map_major_to_ctlr[MAX_BLKDEV] = {0}; /* gets ctlr num from maj num */
137 static void do_cciss_request(request_queue_t *q);
138 static int cciss_open(struct inode *inode, struct file *filep);
139 static int cciss_release(struct inode *inode, struct file *filep);
140 static int cciss_ioctl(struct inode *inode, struct file *filep,
141 unsigned int cmd, unsigned long arg);
143 static int revalidate_logvol(kdev_t dev, int maxusage);
144 static int frevalidate_logvol(kdev_t dev);
145 #if 0
146 static int deregister_disk(int ctlr, int logvol);
147 static int register_new_disk(int cltr, int opened_vol, __u64 requested_lun);
148 static int cciss_rescan_disk(int cltr, int logvol);
149 #endif
151 static void cciss_getgeometry(int cntl_num);
153 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c);
154 static void start_io( ctlr_info_t *h);
157 #ifdef CONFIG_PROC_FS
158 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
159 int length, int *eof, void *data);
160 static void cciss_procinit(int i);
162 XXX
163 #else
164 /*static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
165 int length, int *eof, void *data) { return 0;}
166 */
167 static void cciss_procinit(int i) {}
169 #endif /* CONFIG_PROC_FS */
172 static struct block_device_operations cciss_fops = {
173 open: cciss_open,
174 release: cciss_release,
175 ioctl: cciss_ioctl,
176 revalidate: frevalidate_logvol,
177 };
179 #include "cciss_scsi.c" /* For SCSI tape support */
181 #define ENG_GIG 1048576000
182 #define ENG_GIG_FACTOR (ENG_GIG/512)
183 #define RAID_UNKNOWN 6
184 static const char *raid_label[] = {"0","4","1(0+1)","5","5+1","ADG",
185 "UNKNOWN"};
186 /*
187 * Report information about this controller.
188 */
189 #ifdef CONFIG_PROC_FS
190 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
191 int length, int *eof, void *data)
192 {
193 off_t pos = 0;
194 off_t len = 0;
195 int size, i, ctlr;
196 ctlr_info_t *h = (ctlr_info_t*)data;
197 drive_info_struct *drv;
198 unsigned long flags;
199 unsigned int vol_sz, vol_sz_frac;
201 spin_lock_irqsave(&io_request_lock, flags);
202 if (h->busy_configuring) {
203 spin_unlock_irqrestore(&io_request_lock, flags);
204 return -EBUSY;
205 }
206 h->busy_configuring = 1;
207 spin_unlock_irqrestore(&io_request_lock, flags);
209 ctlr = h->ctlr;
210 size = sprintf(buffer, "%s: HP %s Controller\n"
211 "Board ID: 0x%08lx\n"
212 "Firmware Version: %c%c%c%c\n"
213 "IRQ: %d\n"
214 "Logical drives: %d\n"
215 "Current Q depth: %d\n"
216 "Current # commands on controller: %d\n"
217 "Max Q depth since init: %d\n"
218 "Max # commands on controller since init: %d\n"
219 "Max SG entries since init: %d\n"
220 MONITOR_PERIOD_PATTERN
221 MONITOR_DEADLINE_PATTERN
222 MONITOR_STATUS_PATTERN
223 "\n",
224 h->devname,
225 h->product_name,
226 (unsigned long)h->board_id,
227 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
228 (unsigned int)h->intr,
229 h->num_luns,
230 h->Qdepth, h->commands_outstanding,
231 h->maxQsinceinit, h->max_outstanding, h->maxSG,
232 MONITOR_PERIOD_VALUE(h),
233 MONITOR_DEADLINE_VALUE(h),
234 CTLR_STATUS(h));
236 pos += size; len += size;
237 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
238 for(i=0; i<=h->highest_lun; i++) {
239 drv = &h->drv[i];
240 if (drv->nr_blocks == 0)
241 continue;
242 vol_sz = drv->nr_blocks/ENG_GIG_FACTOR;
243 vol_sz_frac = (drv->nr_blocks%ENG_GIG_FACTOR)*100/ENG_GIG_FACTOR;
245 if (drv->raid_level > 5)
246 drv->raid_level = RAID_UNKNOWN;
247 size = sprintf(buffer+len, "cciss/c%dd%d:"
248 "\t%4d.%02dGB\tRAID %s\n",
249 ctlr, i, vol_sz,vol_sz_frac,
250 raid_label[drv->raid_level]);
251 pos += size, len += size;
252 }
254 *eof = 1;
255 *start = buffer+offset;
256 len -= offset;
257 if (len>length)
258 len = length;
259 h->busy_configuring = 0;
260 return len;
261 }
263 static int
264 cciss_proc_write(struct file *file, const char *buffer,
265 unsigned long count, void *data)
266 {
267 unsigned char cmd[80];
268 int len;
269 ctlr_info_t *h = (ctlr_info_t *) data;
270 int rc;
272 if (count > sizeof(cmd)-1)
273 return -EINVAL;
274 if (copy_from_user(cmd, buffer, count))
275 return -EFAULT;
276 cmd[count] = '\0';
277 len = strlen(cmd);
278 if (cmd[len-1] == '\n')
279 cmd[--len] = '\0';
281 # ifdef CONFIG_CISS_SCSI_TAPE
282 if (strcmp("engage scsi", cmd)==0) {
283 rc = cciss_engage_scsi(h->ctlr);
284 if (rc != 0)
285 return -rc;
286 return count;
287 }
288 /* might be nice to have "disengage" too, but it's not
289 safely possible. (only 1 module use count, lock issues.) */
290 # endif
292 if (START_MONITOR_THREAD(h, cmd, count, cciss_monitor, &rc) == 0)
293 return rc;
295 return -EINVAL;
296 }
298 /*
299 * Get us a file in /proc/cciss that says something about each controller.
300 * Create /proc/cciss if it doesn't exist yet.
301 */
302 static void __init cciss_procinit(int i)
303 {
304 struct proc_dir_entry *pde;
306 if (proc_cciss == NULL) {
307 proc_cciss = proc_mkdir("cciss", proc_root_driver);
308 if (!proc_cciss) {
309 printk("cciss: proc_mkdir failed\n");
310 return;
311 }
312 }
314 pde = create_proc_read_entry(hba[i]->devname,
315 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
316 proc_cciss, cciss_proc_get_info, hba[i]);
317 pde->write_proc = cciss_proc_write;
318 }
319 #endif /* CONFIG_PROC_FS */
321 /*
322 * For operations that cannot sleep, a command block is allocated at init,
323 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
324 * which ones are free or in use. For operations that can wait for kmalloc
325 * to possible sleep, this routine can be called with get_from_pool set to 0.
326 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
327 */
328 static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
329 {
330 CommandList_struct *c;
331 int i;
332 u64bit temp64;
333 dma_addr_t cmd_dma_handle, err_dma_handle;
335 if (!get_from_pool) {
336 c = (CommandList_struct *) pci_alloc_consistent(
337 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
338 if (c==NULL)
339 return NULL;
340 memset(c, 0, sizeof(CommandList_struct));
342 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
343 h->pdev, sizeof(ErrorInfo_struct),
344 &err_dma_handle);
346 if (c->err_info == NULL)
347 {
348 pci_free_consistent(h->pdev,
349 sizeof(CommandList_struct), c, cmd_dma_handle);
350 return NULL;
351 }
352 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
353 } else /* get it out of the controllers pool */
354 {
355 do {
356 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
357 if (i == NR_CMDS)
358 return NULL;
359 } while(test_and_set_bit(i%32, h->cmd_pool_bits+(i/32)) != 0);
360 #ifdef CCISS_DEBUG
361 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
362 #endif
363 c = h->cmd_pool + i;
364 memset(c, 0, sizeof(CommandList_struct));
365 cmd_dma_handle = h->cmd_pool_dhandle
366 + i*sizeof(CommandList_struct);
367 c->err_info = h->errinfo_pool + i;
368 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
369 err_dma_handle = h->errinfo_pool_dhandle
370 + i*sizeof(ErrorInfo_struct);
371 h->nr_allocs++;
372 }
374 c->busaddr = (__u32) cmd_dma_handle;
375 temp64.val = (__u64) err_dma_handle;
376 c->ErrDesc.Addr.lower = temp64.val32.lower;
377 c->ErrDesc.Addr.upper = temp64.val32.upper;
378 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
380 c->ctlr = h->ctlr;
381 return c;
384 }
386 /*
387 * Frees a command block that was previously allocated with cmd_alloc().
388 */
389 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
390 {
391 int i;
392 u64bit temp64;
394 if (!got_from_pool) {
395 temp64.val32.lower = c->ErrDesc.Addr.lower;
396 temp64.val32.upper = c->ErrDesc.Addr.upper;
397 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
398 c->err_info, (dma_addr_t) temp64.val);
399 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
400 c, (dma_addr_t) c->busaddr);
401 } else
402 {
403 i = c - h->cmd_pool;
404 clear_bit(i%32, h->cmd_pool_bits+(i/32));
405 h->nr_frees++;
406 }
407 }
409 /*
410 * fills in the disk information.
411 */
412 static void cciss_geninit( int ctlr)
413 {
414 drive_info_struct *drv;
415 int i,j;
417 /* Loop through each real device */
418 hba[ctlr]->gendisk.nr_real = 0;
419 for(i=0; i< NWD; i++) {
420 drv = &(hba[ctlr]->drv[i]);
421 if (!(drv->nr_blocks))
422 continue;
423 hba[ctlr]->hd[i << NWD_SHIFT].nr_sects =
424 hba[ctlr]->sizes[i << NWD_SHIFT] = drv->nr_blocks;
426 /* for each partition */
427 for(j=0; j<MAX_PART; j++) {
428 hba[ctlr]->blocksizes[(i<<NWD_SHIFT) + j] = 1024;
430 hba[ctlr]->hardsizes[ (i<<NWD_SHIFT) + j] =
431 drv->block_size;
432 }
433 }
434 hba[ctlr]->gendisk.nr_real = hba[ctlr]->highest_lun+1;
435 }
438 void cciss_probe_devices(xen_disk_info_t *xdi)
439 {
440 int i, ctlr;
441 drive_info_struct *drv;
442 xen_disk_t *xd = &xdi->disks[xdi->count];
445 for(ctlr = 0; ctlr < MAX_CTLR; ctlr++) {
447 if(hba[ctlr] != NULL) {
449 /* Loop through each real device */
450 for(i=0; i < NWD; i++) {
452 drv = &(hba[ctlr]->drv[i]);
454 if (!(drv->nr_blocks))
455 continue;
457 if ( xdi->count == xdi->max )
458 BUG();
461 hba[ctlr]->hd[i << NWD_SHIFT].nr_sects =
462 hba[ctlr]->sizes[i << NWD_SHIFT] = drv->nr_blocks;
464 /* We export 'raw' linux device numbers to domain 0. */
465 xd->device = MKDEV(hba[ctlr]->major, i << 4);
466 xd->info = XD_TYPE_DISK; /* XXX should check properly */
467 xd->capacity = drv->nr_blocks; /* number of 512 byte sectors */
468 xd->domain = 0;
470 xdi->count++;
471 xd++;
473 }
475 }
476 }
478 return;
479 }
481 /*
482 * Open. Make sure the device is really there.
483 */
484 static int cciss_open(struct inode *inode, struct file *filep)
485 {
486 int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
487 int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
489 #ifdef CCISS_DEBUG
490 printk(KERN_DEBUG "cciss_open %x (%x:%x)\n", inode->i_rdev, ctlr, dsk);
491 #endif /* CCISS_DEBUG */
493 if (ctlr > MAX_CTLR || hba[ctlr] == NULL || !CTLR_IS_ALIVE(hba[ctlr]))
494 return -ENXIO;
495 /*
496 * Root is allowed to open raw volume zero even if its not configured
497 * so array config can still work. Root is also allowed to open any
498 * volume that has a LUN ID, so it can issue IOCTL to reread the
499 * disk information. I don't think I really like this.
500 * but I'm already using way to many device nodes to claim another one
501 * for "raw controller".
502 */
503 if (hba[ctlr]->sizes[MINOR(inode->i_rdev)] == 0) { /* not online? */
504 if (MINOR(inode->i_rdev) != 0) { /* not node 0? */
505 /* if not node 0 make sure it is a partition = 0 */
506 if (MINOR(inode->i_rdev) & 0x0f) {
507 return -ENXIO;
508 /* if it is, make sure we have a LUN ID */
509 } else if (hba[ctlr]->drv[MINOR(inode->i_rdev)
510 >> NWD_SHIFT].LunID == 0) {
511 return -ENXIO;
512 }
513 }
514 if (!capable(CAP_SYS_ADMIN))
515 return -EPERM;
516 }
518 hba[ctlr]->drv[dsk].usage_count++;
519 hba[ctlr]->usage_count++;
520 return 0;
521 }
522 /*
523 * Close. Sync first.
524 */
525 static int cciss_release(struct inode *inode, struct file *filep)
526 {
527 int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
528 int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
530 #ifdef CCISS_DEBUG
531 printk(KERN_DEBUG "cciss_release %x (%x:%x)\n", inode->i_rdev, ctlr, dsk);
532 #endif /* CCISS_DEBUG */
534 /* fsync_dev(inode->i_rdev); */
536 hba[ctlr]->drv[dsk].usage_count--;
537 hba[ctlr]->usage_count--;
538 return 0;
539 }
541 /*
542 * ioctl
543 */
544 static int cciss_ioctl(struct inode *inode, struct file *filep,
545 unsigned int cmd, unsigned long arg)
546 {
547 #if 0
548 //int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
550 //int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
551 #endif
553 printk(KERN_ALERT "cciss_ioctl: Called BUT NOT SUPPORTED cmd=%x %lx\n", cmd, arg);
555 return -EBADRQC;
557 #if 0
559 #ifdef CCISS_DEBUG
560 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
561 #endif /* CCISS_DEBUG */
563 switch(cmd) {
564 case HDIO_GETGEO:
565 {
566 struct hd_geometry driver_geo;
567 if (hba[ctlr]->drv[dsk].cylinders) {
568 driver_geo.heads = hba[ctlr]->drv[dsk].heads;
569 driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
570 driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
571 } else {
572 driver_geo.heads = 0xff;
573 driver_geo.sectors = 0x3f;
574 driver_geo.cylinders =
575 hba[ctlr]->drv[dsk].nr_blocks / (0xff*0x3f);
576 }
577 driver_geo.start=
578 hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
579 if (copy_to_user((void *) arg, &driver_geo,
580 sizeof( struct hd_geometry)))
581 return -EFAULT;
582 return 0;
583 }
584 case HDIO_GETGEO_BIG:
585 {
586 struct hd_big_geometry driver_geo;
587 if (hba[ctlr]->drv[dsk].cylinders) {
588 driver_geo.heads = hba[ctlr]->drv[dsk].heads;
589 driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
590 driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
591 } else {
592 driver_geo.heads = 0xff;
593 driver_geo.sectors = 0x3f;
594 driver_geo.cylinders =
595 hba[ctlr]->drv[dsk].nr_blocks / (0xff*0x3f);
596 }
597 driver_geo.start=
598 hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
599 if (copy_to_user((void *) arg, &driver_geo,
600 sizeof( struct hd_big_geometry)))
601 return -EFAULT;
602 return 0;
603 }
604 case BLKRRPART:
605 if (!capable(CAP_SYS_ADMIN))
606 return -EPERM;
607 return revalidate_logvol(inode->i_rdev, 1);
608 case BLKGETSIZE:
609 case BLKGETSIZE64:
610 case BLKFLSBUF:
611 case BLKBSZSET:
612 case BLKBSZGET:
613 case BLKROSET:
614 case BLKROGET:
615 case BLKRASET:
616 case BLKRAGET:
617 case BLKPG:
618 case BLKELVGET:
619 case BLKELVSET:
620 return blk_ioctl(inode->i_rdev, cmd, arg);
621 case CCISS_GETPCIINFO:
622 {
623 cciss_pci_info_struct pciinfo;
625 if (!arg)
626 return -EINVAL;
627 pciinfo.bus = hba[ctlr]->pdev->bus->number;
628 pciinfo.dev_fn = hba[ctlr]->pdev->devfn;
629 pciinfo.board_id = hba[ctlr]->board_id;
630 if (copy_to_user((void *) arg, &pciinfo, sizeof( cciss_pci_info_struct )))
631 return -EFAULT;
632 return 0;
633 }
634 case CCISS_GETINTINFO:
635 {
636 cciss_coalint_struct intinfo;
637 ctlr_info_t *c = hba[ctlr];
639 if (!arg)
640 return -EINVAL;
641 intinfo.delay = readl(&c->cfgtable->HostWrite.CoalIntDelay);
642 intinfo.count = readl(&c->cfgtable->HostWrite.CoalIntCount);
643 if (copy_to_user((void *) arg, &intinfo, sizeof( cciss_coalint_struct )))
644 return -EFAULT;
645 return 0;
646 }
647 case CCISS_SETINTINFO:
648 {
649 cciss_coalint_struct intinfo;
650 ctlr_info_t *c = hba[ctlr];
651 unsigned long flags;
652 int i;
654 if (!arg)
655 return -EINVAL;
656 if (!capable(CAP_SYS_ADMIN))
657 return -EPERM;
658 if (copy_from_user(&intinfo, (void *) arg, sizeof( cciss_coalint_struct)))
659 return -EFAULT;
660 if ( (intinfo.delay == 0 ) && (intinfo.count == 0)) {
661 return -EINVAL;
662 }
664 spin_lock_irqsave(&io_request_lock, flags);
665 /* Can only safely update if no commands outstanding */
666 if (c->commands_outstanding > 0 ) {
667 spin_unlock_irqrestore(&io_request_lock, flags);
668 return -EINVAL;
669 }
670 /* Update the field, and then ring the doorbell */
671 writel( intinfo.delay,
672 &(c->cfgtable->HostWrite.CoalIntDelay));
673 writel( intinfo.count,
674 &(c->cfgtable->HostWrite.CoalIntCount));
675 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
677 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
678 if (!(readl(c->vaddr + SA5_DOORBELL)
679 & CFGTBL_ChangeReq))
680 break;
681 /* delay and try again */
682 udelay(1000);
683 }
684 spin_unlock_irqrestore(&io_request_lock, flags);
685 if (i >= MAX_IOCTL_CONFIG_WAIT)
686 /* there is an unlikely case where this can happen,
687 * involving hot replacing a failed 144 GB drive in a
688 * RAID 5 set just as we attempt this ioctl. */
689 return -EAGAIN;
690 return 0;
691 }
692 case CCISS_GETNODENAME:
693 {
694 NodeName_type NodeName;
695 ctlr_info_t *c = hba[ctlr];
696 int i;
698 if (!arg)
699 return -EINVAL;
700 for(i=0;i<16;i++)
701 NodeName[i] = readb(&c->cfgtable->ServerName[i]);
702 if (copy_to_user((void *) arg, NodeName, sizeof( NodeName_type)))
703 return -EFAULT;
704 return 0;
705 }
706 case CCISS_SETNODENAME:
707 {
708 NodeName_type NodeName;
709 ctlr_info_t *c = hba[ctlr];
710 unsigned long flags;
711 int i;
713 if (!arg)
714 return -EINVAL;
715 if (!capable(CAP_SYS_ADMIN))
716 return -EPERM;
718 if (copy_from_user(NodeName, (void *) arg, sizeof( NodeName_type)))
719 return -EFAULT;
721 spin_lock_irqsave(&io_request_lock, flags);
723 /* Update the field, and then ring the doorbell */
724 for(i=0;i<16;i++)
725 writeb( NodeName[i], &c->cfgtable->ServerName[i]);
727 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
729 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
730 if (!(readl(c->vaddr + SA5_DOORBELL)
731 & CFGTBL_ChangeReq))
732 break;
733 /* delay and try again */
734 udelay(1000);
735 }
736 spin_unlock_irqrestore(&io_request_lock, flags);
737 if (i >= MAX_IOCTL_CONFIG_WAIT)
738 /* there is an unlikely case where this can happen,
739 * involving hot replacing a failed 144 GB drive in a
740 * RAID 5 set just as we attempt this ioctl. */
741 return -EAGAIN;
742 return 0;
743 }
745 case CCISS_GETHEARTBEAT:
746 {
747 Heartbeat_type heartbeat;
748 ctlr_info_t *c = hba[ctlr];
750 if (!arg)
751 return -EINVAL;
752 heartbeat = readl(&c->cfgtable->HeartBeat);
753 if (copy_to_user((void *) arg, &heartbeat, sizeof( Heartbeat_type)))
754 return -EFAULT;
755 return 0;
756 }
757 case CCISS_GETBUSTYPES:
758 {
759 BusTypes_type BusTypes;
760 ctlr_info_t *c = hba[ctlr];
762 if (!arg)
763 return -EINVAL;
764 BusTypes = readl(&c->cfgtable->BusTypes);
765 if (copy_to_user((void *) arg, &BusTypes, sizeof( BusTypes_type) ))
766 return -EFAULT;
767 return 0;
768 }
769 case CCISS_GETFIRMVER:
770 {
771 FirmwareVer_type firmware;
773 if (!arg)
774 return -EINVAL;
775 memcpy(firmware, hba[ctlr]->firm_ver, 4);
777 if (copy_to_user((void *) arg, firmware, sizeof( FirmwareVer_type)))
778 return -EFAULT;
779 return 0;
780 }
781 case CCISS_GETDRIVVER:
782 {
783 DriverVer_type DriverVer = DRIVER_VERSION;
785 if (!arg)
786 return -EINVAL;
788 if (copy_to_user((void *) arg, &DriverVer, sizeof( DriverVer_type) ))
789 return -EFAULT;
790 return 0;
791 }
792 case CCISS_RESCANDISK:
793 {
794 return cciss_rescan_disk(ctlr, dsk);
795 }
796 case CCISS_DEREGDISK:
797 return deregister_disk(ctlr,dsk);
799 case CCISS_REGNEWD:
800 return register_new_disk(ctlr, dsk, 0);
801 case CCISS_REGNEWDISK:
802 {
803 __u64 new_logvol;
805 if (!arg)
806 return -EINVAL;
807 if (copy_from_user(&new_logvol, (void *) arg,
808 sizeof( __u64)))
809 return -EFAULT;
810 return register_new_disk(ctlr, dsk, new_logvol);
811 }
812 case CCISS_GETLUNINFO:
813 {
814 LogvolInfo_struct luninfo;
815 int num_parts = 0;
816 int i, start;
818 luninfo.LunID = hba[ctlr]->drv[dsk].LunID;
819 luninfo.num_opens = hba[ctlr]->drv[dsk].usage_count;
821 /* count partitions 1 to 15 with sizes > 0 */
822 start = (dsk << NWD_SHIFT);
823 for(i=1; i <MAX_PART; i++) {
824 int minor = start+i;
825 if (hba[ctlr]->sizes[minor] != 0)
826 num_parts++;
827 }
828 luninfo.num_parts = num_parts;
829 if (copy_to_user((void *) arg, &luninfo,
830 sizeof( LogvolInfo_struct) ))
831 return -EFAULT;
832 return 0;
833 }
834 #if 0
835 case CCISS_PASSTHRU:
836 {
837 IOCTL_Command_struct iocommand;
838 ctlr_info_t *h = hba[ctlr];
839 CommandList_struct *c;
840 char *buff = NULL;
841 u64bit temp64;
842 unsigned long flags;
843 DECLARE_COMPLETION(wait);
845 if (!arg)
846 return -EINVAL;
848 if (!capable(CAP_SYS_RAWIO))
849 return -EPERM;
851 if (copy_from_user(&iocommand, (void *) arg, sizeof( IOCTL_Command_struct) ))
852 return -EFAULT;
853 if ((iocommand.buf_size < 1) &&
854 (iocommand.Request.Type.Direction
855 != XFER_NONE)) {
856 return -EINVAL;
857 }
858 /* Check kmalloc limits */
859 if (iocommand.buf_size > 128000)
860 return -EINVAL;
861 if (iocommand.buf_size > 0) {
862 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
863 if (buff == NULL)
864 return -ENOMEM;
865 }
866 if (iocommand.Request.Type.Direction == XFER_WRITE) {
867 /* Copy the data into the buffer we created */
868 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
869 {
870 kfree(buff);
871 return -EFAULT;
872 }
873 }
874 if ((c = cmd_alloc(h , 0)) == NULL) {
875 kfree(buff);
876 return -ENOMEM;
877 }
878 /* Fill in the command type */
879 c->cmd_type = CMD_IOCTL_PEND;
880 /* Fill in Command Header */
881 c->Header.ReplyQueue = 0; /* unused in simple mode */
882 if (iocommand.buf_size > 0) { /* buffer to fill */
883 c->Header.SGList = 1;
884 c->Header.SGTotal= 1;
885 } else { /* no buffers to fill */
886 c->Header.SGList = 0;
887 c->Header.SGTotal= 0;
888 }
889 c->Header.LUN = iocommand.LUN_info;
890 c->Header.Tag.lower = c->busaddr; /* use the kernel address */
891 /* the cmd block for tag */
893 /* Fill in Request block */
894 c->Request = iocommand.Request;
896 /* Fill in the scatter gather information */
897 if (iocommand.buf_size > 0 ) {
898 temp64.val = pci_map_single( h->pdev, buff,
899 iocommand.buf_size,
900 PCI_DMA_BIDIRECTIONAL);
901 c->SG[0].Addr.lower = temp64.val32.lower;
902 c->SG[0].Addr.upper = temp64.val32.upper;
903 c->SG[0].Len = iocommand.buf_size;
904 c->SG[0].Ext = 0; /* we are not chaining */
905 }
906 c->waiting = &wait;
908 /* Put the request on the tail of the request queue */
909 spin_lock_irqsave(&io_request_lock, flags);
910 addQ(&h->reqQ, c);
911 h->Qdepth++;
912 start_io(h);
913 spin_unlock_irqrestore(&io_request_lock, flags);
915 wait_for_completion(&wait);
917 /* unlock the buffers from DMA */
918 temp64.val32.lower = c->SG[0].Addr.lower;
919 temp64.val32.upper = c->SG[0].Addr.upper;
920 pci_unmap_single( h->pdev, (dma_addr_t) temp64.val,
921 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
923 /* Copy the error information out */
924 iocommand.error_info = *(c->err_info);
925 if (copy_to_user((void *) arg, &iocommand,
926 sizeof( IOCTL_Command_struct) ) ) {
927 kfree(buff);
928 cmd_free(h, c, 0);
929 return( -EFAULT);
930 }
932 if (iocommand.Request.Type.Direction == XFER_READ) {
933 /* Copy the data out of the buffer we created */
934 if (copy_to_user(iocommand.buf, buff,
935 iocommand.buf_size)) {
936 kfree(buff);
937 cmd_free(h, c, 0);
938 return -EFAULT;
939 }
940 }
941 kfree(buff);
942 cmd_free(h, c, 0);
943 return 0;
944 }
945 case CCISS_BIG_PASSTHRU:
946 {
947 BIG_IOCTL_Command_struct iocommand;
948 ctlr_info_t *h = hba[ctlr];
949 CommandList_struct *c;
950 char *buff[MAXSGENTRIES] = {NULL,};
951 int buff_size[MAXSGENTRIES] = {0,};
952 u64bit temp64;
953 unsigned long flags;
954 BYTE sg_used = 0;
955 int status = 0;
956 int i;
957 DECLARE_COMPLETION(wait);
959 if (!arg)
960 return -EINVAL;
962 if (!capable(CAP_SYS_RAWIO))
963 return -EPERM;
965 if (copy_from_user(&iocommand, (void *) arg, sizeof( BIG_IOCTL_Command_struct) ))
966 return -EFAULT;
967 if ((iocommand.buf_size < 1) &&
968 (iocommand.Request.Type.Direction != XFER_NONE)) {
969 return -EINVAL;
970 }
971 /* Check kmalloc limits using all SGs */
972 if (iocommand.malloc_size > MAX_KMALLOC_SIZE)
973 return -EINVAL;
974 if (iocommand.buf_size > iocommand.malloc_size * MAXSGENTRIES)
975 return -EINVAL;
976 if (iocommand.buf_size > 0) {
977 __u32 size_left_alloc = iocommand.buf_size;
978 BYTE *data_ptr = (BYTE *) iocommand.buf;
979 while (size_left_alloc > 0) {
980 buff_size[sg_used] = (size_left_alloc
981 > iocommand.malloc_size)
982 ? iocommand.malloc_size : size_left_alloc;
983 buff[sg_used] = kmalloc( buff_size[sg_used],
984 GFP_KERNEL);
985 if (buff[sg_used] == NULL) {
986 status = -ENOMEM;
987 goto cleanup1;
988 }
989 if (iocommand.Request.Type.Direction ==
990 XFER_WRITE)
991 /* Copy the data into the buffer created */
992 if (copy_from_user(buff[sg_used], data_ptr,
993 buff_size[sg_used])) {
994 status = -ENOMEM;
995 goto cleanup1;
996 }
997 size_left_alloc -= buff_size[sg_used];
998 data_ptr += buff_size[sg_used];
999 sg_used++;
1003 if ((c = cmd_alloc(h , 0)) == NULL) {
1004 status = -ENOMEM;
1005 goto cleanup1;
1007 /* Fill in the command type */
1008 c->cmd_type = CMD_IOCTL_PEND;
1009 /* Fill in Command Header */
1010 c->Header.ReplyQueue = 0; /* unused in simple mode */
1012 if (iocommand.buf_size > 0) { /* buffer to fill */
1013 c->Header.SGList = sg_used;
1014 c->Header.SGTotal= sg_used;
1015 } else { /* no buffers to fill */
1016 c->Header.SGList = 0;
1017 c->Header.SGTotal= 0;
1019 c->Header.LUN = iocommand.LUN_info;
1020 c->Header.Tag.lower = c->busaddr; /* use the kernel address */
1021 /* the cmd block for tag */
1023 /* Fill in Request block */
1024 c->Request = iocommand.Request;
1025 /* Fill in the scatter gather information */
1026 if (iocommand.buf_size > 0 ) {
1027 int i;
1028 for(i=0; i< sg_used; i++) {
1029 temp64.val = pci_map_single( h->pdev, buff[i],
1030 buff_size[i],
1031 PCI_DMA_BIDIRECTIONAL);
1033 c->SG[i].Addr.lower = temp64.val32.lower;
1034 c->SG[i].Addr.upper = temp64.val32.upper;
1035 c->SG[i].Len = buff_size[i];
1036 c->SG[i].Ext = 0; /* we are not chaining */
1039 c->waiting = &wait;
1040 /* Put the request on the tail of the request queue */
1041 spin_lock_irqsave(&io_request_lock, flags);
1042 addQ(&h->reqQ, c);
1043 h->Qdepth++;
1044 start_io(h);
1045 spin_unlock_irqrestore(&io_request_lock, flags);
1046 wait_for_completion(&wait);
1047 /* unlock the buffers from DMA */
1048 for(i=0; i< sg_used; i++) {
1049 temp64.val32.lower = c->SG[i].Addr.lower;
1050 temp64.val32.upper = c->SG[i].Addr.upper;
1051 pci_unmap_single( h->pdev, (dma_addr_t) temp64.val,
1052 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1054 /* Copy the error information out */
1055 iocommand.error_info = *(c->err_info);
1056 if (copy_to_user((void *) arg, &iocommand,
1057 sizeof( IOCTL_Command_struct) ) ) {
1058 cmd_free(h, c, 0);
1059 status = -EFAULT;
1060 goto cleanup1;
1062 if (iocommand.Request.Type.Direction == XFER_READ) {
1063 /* Copy the data out of the buffer we created */
1064 BYTE *ptr = (BYTE *) iocommand.buf;
1065 for(i=0; i< sg_used; i++) {
1066 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1067 cmd_free(h, c, 0);
1068 status = -EFAULT;
1069 goto cleanup1;
1072 ptr += buff_size[i];
1075 cmd_free(h, c, 0);
1076 status = 0;
1079 cleanup1:
1080 for(i=0; i< sg_used; i++) {
1081 if (buff[i] != NULL)
1082 kfree(buff[i]);
1084 return status;
1086 #endif //PASSTHROUGH
1088 default:
1089 return -EBADRQC;
1092 #endif
1096 /* Borrowed and adapted from sd.c */
1097 static int revalidate_logvol(kdev_t dev, int maxusage)
1099 int ctlr, target;
1100 struct gendisk *gdev;
1101 unsigned long flags;
1102 int max_p;
1103 int start;
1104 int i;
1106 target = MINOR(dev) >> NWD_SHIFT;
1107 ctlr = map_major_to_ctlr[MAJOR(dev)];
1108 gdev = &(hba[ctlr]->gendisk);
1110 spin_lock_irqsave(&io_request_lock, flags);
1111 if (hba[ctlr]->drv[target].usage_count > maxusage) {
1112 spin_unlock_irqrestore(&io_request_lock, flags);
1113 printk(KERN_WARNING "cciss: Device busy for "
1114 "revalidation (usage=%d)\n",
1115 hba[ctlr]->drv[target].usage_count);
1116 return -EBUSY;
1118 hba[ctlr]->drv[target].usage_count++;
1119 spin_unlock_irqrestore(&io_request_lock, flags);
1121 max_p = gdev->max_p;
1122 start = target << gdev->minor_shift;
1124 for(i=max_p-1; i>=0; i--) {
1125 int minor = start+i;
1126 invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
1127 gdev->part[minor].start_sect = 0;
1128 gdev->part[minor].nr_sects = 0;
1130 /* reset the blocksize so we can read the partition table */
1131 blksize_size[hba[ctlr]->major][minor] = 1024;
1133 /* setup partitions per disk */
1134 grok_partitions(gdev, target, MAX_PART,
1135 hba[ctlr]->drv[target].nr_blocks);
1136 hba[ctlr]->drv[target].usage_count--;
1137 return 0;
1140 static int frevalidate_logvol(kdev_t dev)
1142 #ifdef CCISS_DEBUG
1143 printk(KERN_DEBUG "cciss: frevalidate has been called\n");
1144 #endif /* CCISS_DEBUG */
1145 return revalidate_logvol(dev, 0);
1147 #if 0
1148 static int deregister_disk(int ctlr, int logvol)
1150 unsigned long flags;
1151 struct gendisk *gdev = &(hba[ctlr]->gendisk);
1152 ctlr_info_t *h = hba[ctlr];
1153 int start, max_p, i;
1155 if (!capable(CAP_SYS_RAWIO))
1156 return -EPERM;
1158 spin_lock_irqsave(&io_request_lock, flags);
1159 /* make sure logical volume is NOT is use */
1160 if (h->drv[logvol].usage_count > 1 || h->busy_configuring) {
1161 spin_unlock_irqrestore(&io_request_lock, flags);
1162 return -EBUSY;
1164 h->busy_configuring = 1;
1165 spin_unlock_irqrestore(&io_request_lock, flags);
1167 /* invalidate the devices and deregister the disk */
1168 max_p = gdev->max_p;
1169 start = logvol << gdev->minor_shift;
1170 for (i=max_p-1; i>=0; i--) {
1171 int minor = start+i;
1172 /* printk("invalidating( %d %d)\n", ctlr, minor); */
1173 invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
1174 /* so open will now fail */
1175 h->sizes[minor] = 0;
1176 /* so it will no longer appear in /proc/partitions */
1177 gdev->part[minor].start_sect = 0;
1178 gdev->part[minor].nr_sects = 0;
1180 /* check to see if it was the last disk */
1181 if (logvol == h->highest_lun) {
1182 /* if so, find the new hightest lun */
1183 int i, newhighest =-1;
1184 for(i=0; i<h->highest_lun; i++) {
1185 /* if the disk has size > 0, it is available */
1186 if (h->sizes[i << gdev->minor_shift] != 0)
1187 newhighest = i;
1189 h->highest_lun = newhighest;
1192 --h->num_luns;
1193 gdev->nr_real = h->highest_lun+1;
1194 /* zero out the disk size info */
1195 h->drv[logvol].nr_blocks = 0;
1196 h->drv[logvol].block_size = 0;
1197 h->drv[logvol].cylinders = 0;
1198 h->drv[logvol].LunID = 0;
1199 h->busy_configuring = 0;
1200 return 0;
1202 static int sendcmd_withirq(__u8 cmd,
1203 int ctlr,
1204 void *buff,
1205 size_t size,
1206 unsigned int use_unit_num,
1207 unsigned int log_unit,
1208 __u8 page_code,
1209 __u8 cmdtype)
1211 ctlr_info_t *h = hba[ctlr];
1212 CommandList_struct *c;
1213 u64bit buff_dma_handle;
1214 unsigned long flags;
1215 int return_status = IO_OK;
1216 #if 0
1217 DECLARE_COMPLETION(wait);
1218 #else
1219 /* XXX SMH: no waiting for us ... spin instead */
1220 int wait = 1;
1221 int usecs = 0;
1222 #endif
1224 if ((c = cmd_alloc(h , 0)) == NULL)
1225 return -ENOMEM;
1226 c->cmd_type = CMD_IOCTL_PEND;
1227 /* Fill in Command Header */
1228 c->Header.ReplyQueue = 0; /* unused in simple mode */
1229 if (buff != NULL) { /* buffer to fill */
1230 c->Header.SGList = 1;
1231 c->Header.SGTotal= 1;
1232 } else {
1233 /* no buffers to fill */
1234 c->Header.SGList = 0;
1235 c->Header.SGTotal= 0;
1237 c->Header.Tag.lower = c->busaddr; /* tag is phys addr of cmd */
1238 /* Fill in Request block */
1239 c->Request.CDB[0] = cmd;
1240 c->Request.Type.Type = cmdtype;
1241 if (cmdtype == TYPE_CMD) {
1242 switch (cmd) {
1243 case CISS_INQUIRY:
1244 /* If the logical unit number is 0 then, this is going
1245 to controller so It's a physical command
1246 mode = 0 target = 0.
1247 So we have nothing to write.
1248 Otherwise
1249 mode = 1 target = LUNID
1250 */
1251 if (use_unit_num != 0) {
1252 c->Header.LUN.LogDev.VolId =
1253 hba[ctlr]->drv[log_unit].LunID;
1254 c->Header.LUN.LogDev.Mode = 1;
1256 if (page_code != 0) {
1257 c->Request.CDB[1] = 0x01;
1258 c->Request.CDB[2] = page_code;
1260 c->Request.CDBLen = 6;
1261 c->Request.Type.Attribute = ATTR_SIMPLE;
1262 c->Request.Type.Direction = XFER_READ; /* Read */
1263 c->Request.Timeout = 0; /* Don't time out */
1264 c->Request.CDB[4] = size & 0xFF;
1265 break;
1266 case CISS_REPORT_LOG:
1267 /* Talking to controller so It's a physical command
1268 mode = 00 target = 0.
1269 So we have nothing to write.
1270 */
1271 c->Request.CDBLen = 12;
1272 c->Request.Type.Attribute = ATTR_SIMPLE;
1273 c->Request.Type.Direction = XFER_READ; /* Read */
1274 c->Request.Timeout = 0; /* Don't time out */
1275 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
1276 c->Request.CDB[7] = (size >> 16) & 0xFF;
1277 c->Request.CDB[8] = (size >> 8) & 0xFF;
1278 c->Request.CDB[9] = size & 0xFF;
1279 break;
1280 case CCISS_READ_CAPACITY:
1281 c->Header.LUN.LogDev.VolId=
1282 hba[ctlr]->drv[log_unit].LunID;
1283 c->Header.LUN.LogDev.Mode = 1;
1284 c->Request.CDBLen = 10;
1285 c->Request.Type.Attribute = ATTR_SIMPLE;
1286 c->Request.Type.Direction = XFER_READ; /* Read */
1287 c->Request.Timeout = 0; /* Don't time out */
1288 break;
1289 default:
1290 printk(KERN_WARNING
1291 "cciss: Unknown Command 0x%x sent attempted\n", cmd);
1292 cmd_free(h, c, 1);
1293 return IO_ERROR;
1295 } else if (cmdtype == TYPE_MSG) {
1296 switch (cmd) {
1297 case 3: /* No-Op message */
1298 c->Request.CDBLen = 1;
1299 c->Request.Type.Attribute = ATTR_SIMPLE;
1300 c->Request.Type.Direction = XFER_WRITE;
1301 c->Request.Timeout = 0;
1302 c->Request.CDB[0] = cmd;
1303 break;
1304 default:
1305 printk(KERN_WARNING
1306 "cciss%d: unknown message type %d\n",
1307 ctlr, cmd);
1308 cmd_free(h, c, 1);
1309 return IO_ERROR;
1311 } else {
1312 printk(KERN_WARNING
1313 "cciss%d: unknown command type %d\n", ctlr, cmdtype);
1314 cmd_free(h, c, 1);
1315 return IO_ERROR;
1318 /* Fill in the scatter gather information */
1319 if (size > 0) {
1320 buff_dma_handle.val = (__u64) pci_map_single( h->pdev,
1321 buff, size, PCI_DMA_BIDIRECTIONAL);
1322 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1323 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1324 c->SG[0].Len = size;
1325 c->SG[0].Ext = 0; /* we are not chaining */
1328 resend_cmd2:
1330 #if 0
1331 c->waiting = &wait;
1332 #else
1333 /* XXX SMH: we spin instead of waiting... */
1334 c->waiting = (void *)&wait;
1335 #endif
1337 /* Put the request on the tail of the queue and send it */
1338 spin_lock_irqsave(&io_request_lock, flags);
1339 addQ(&h->reqQ, c);
1340 h->Qdepth++;
1341 start_io(h);
1342 spin_unlock_irqrestore(&io_request_lock, flags);
1344 #if 0
1345 wait_for_completion(&wait);
1346 #else
1347 /* XXX SMH: spin instead of waiting on wait queue */
1348 while(wait) {
1349 do_softirq();
1350 udelay(500);
1351 usecs += 500;
1352 if(usecs > 1000000) {
1353 printk("cciss: still waiting...!\n");
1354 usecs = 0;
1357 #endif
1360 if (c->err_info->CommandStatus != 0) {
1361 /* an error has occurred */
1362 switch (c->err_info->CommandStatus) {
1363 case CMD_TARGET_STATUS:
1364 printk(KERN_WARNING "cciss: cmd %p has "
1365 " completed with errors\n", c);
1366 if (c->err_info->ScsiStatus) {
1367 printk(KERN_WARNING "cciss: cmd %p "
1368 "has SCSI Status = %x\n", c,
1369 c->err_info->ScsiStatus);
1371 break;
1372 case CMD_DATA_UNDERRUN:
1373 case CMD_DATA_OVERRUN:
1374 /* expected for inquire and report lun commands */
1375 break;
1376 case CMD_INVALID:
1377 printk(KERN_WARNING "cciss: cmd %p is "
1378 "reported invalid\n", c);
1379 return_status = IO_ERROR;
1380 break;
1381 case CMD_PROTOCOL_ERR:
1382 printk(KERN_WARNING "cciss: cmd %p has "
1383 "protocol error \n", c);
1384 return_status = IO_ERROR;
1385 break;
1386 case CMD_HARDWARE_ERR:
1387 printk(KERN_WARNING "cciss: cmd %p had "
1388 " hardware error\n", c);
1389 return_status = IO_ERROR;
1390 break;
1391 case CMD_CONNECTION_LOST:
1392 printk(KERN_WARNING "cciss: cmd %p had "
1393 "connection lost\n", c);
1394 return_status = IO_ERROR;
1395 break;
1396 case CMD_ABORTED:
1397 printk(KERN_WARNING "cciss: cmd %p was "
1398 "aborted\n", c);
1399 return_status = IO_ERROR;
1400 break;
1401 case CMD_ABORT_FAILED:
1402 printk(KERN_WARNING "cciss: cmd %p reports "
1403 "abort failed\n", c);
1404 return_status = IO_ERROR;
1405 break;
1406 case CMD_UNSOLICITED_ABORT:
1407 printk(KERN_WARNING "cciss: cmd %p aborted "
1408 "do to an unsolicited abort\n", c);
1409 if (c->retry_count < MAX_CMD_RETRIES)
1411 printk(KERN_WARNING "retrying cmd\n");
1412 c->retry_count++;
1413 /* erase the old error */
1414 /* information */
1415 memset(c->err_info, 0,
1416 sizeof(ErrorInfo_struct));
1417 return_status = IO_OK;
1418 #if 0
1419 INIT_COMPLETION(wait);
1420 #else
1421 /* XXX SMH: spin instead of waiting. */
1422 wait = 0;
1423 #endif
1424 goto resend_cmd2;
1427 return_status = IO_ERROR;
1428 break;
1429 default:
1430 printk(KERN_WARNING "cciss: cmd %p returned "
1431 "unknown status %x\n", c,
1432 c->err_info->CommandStatus);
1433 return_status = IO_ERROR;
1437 /* unlock the buffers from DMA */
1438 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1439 size, PCI_DMA_BIDIRECTIONAL);
1440 cmd_free(h, c, 0);
1441 return return_status;
1443 static int register_new_disk(int ctlr, int opened_vol, __u64 requested_lun)
1445 struct gendisk *gdev = &(hba[ctlr]->gendisk);
1446 ctlr_info_t *h = hba[ctlr];
1447 int start, max_p, i;
1448 int num_luns;
1449 int logvol;
1450 int new_lun_found = 0;
1451 int new_lun_index = 0;
1452 int free_index_found = 0;
1453 int free_index = 0;
1454 ReportLunData_struct *ld_buff;
1455 ReadCapdata_struct *size_buff;
1456 InquiryData_struct *inq_buff;
1457 int return_code;
1458 int listlength = 0;
1459 __u32 lunid = 0;
1460 unsigned int block_size;
1461 unsigned int total_size;
1462 unsigned long flags;
1463 int req_lunid = (int) (requested_lun & (__u64) 0xffffffff);
1465 if (!capable(CAP_SYS_RAWIO))
1466 return -EPERM;
1467 /* if we have no space in our disk array left to add anything */
1468 spin_lock_irqsave(&io_request_lock, flags);
1469 if (h->num_luns >= CISS_MAX_LUN) {
1470 spin_unlock_irqrestore(&io_request_lock, flags);
1471 return -EINVAL;
1473 if (h->busy_configuring) {
1474 spin_unlock_irqrestore(&io_request_lock, flags);
1475 return -EBUSY;
1477 h->busy_configuring = 1;
1478 spin_unlock_irqrestore(&io_request_lock, flags);
1480 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1481 if (ld_buff == NULL) {
1482 printk(KERN_ERR "cciss: out of memory\n");
1483 h->busy_configuring = 0;
1484 return -ENOMEM;
1486 memset(ld_buff, 0, sizeof(ReportLunData_struct));
1487 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1488 if (size_buff == NULL) {
1489 printk(KERN_ERR "cciss: out of memory\n");
1490 kfree(ld_buff);
1491 h->busy_configuring = 0;
1492 return -ENOMEM;
1494 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1495 if (inq_buff == NULL) {
1496 printk(KERN_ERR "cciss: out of memory\n");
1497 kfree(ld_buff);
1498 kfree(size_buff);
1499 h->busy_configuring = 0;
1500 return -ENOMEM;
1503 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1504 sizeof(ReportLunData_struct), 0, 0, 0, TYPE_CMD);
1506 if (return_code == IO_OK) {
1507 listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0]));
1508 } else {
1509 /* reading number of logical volumes failed */
1510 printk(KERN_WARNING "cciss: report logical volume"
1511 " command failed\n");
1512 listlength = 0;
1513 h->busy_configuring = 0;
1514 return -1;
1516 num_luns = listlength / 8; /* 8 bytes pre entry */
1517 if (num_luns > CISS_MAX_LUN)
1518 num_luns = CISS_MAX_LUN;
1520 #ifdef CCISS_DEBUG
1521 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
1522 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
1523 ld_buff->LUNListLength[3], num_luns);
1524 #endif
1525 for(i=0; i< num_luns; i++) {
1526 int j;
1527 int lunID_found = 0;
1529 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
1530 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
1531 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
1532 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1534 /* check to see if this is a new lun */
1535 for(j=0; j <= h->highest_lun; j++) {
1536 #ifdef CCISS_DEBUG
1537 printk("Checking %d %x against %x\n", j,h->drv[j].LunID,
1538 lunid);
1539 #endif /* CCISS_DEBUG */
1540 if (h->drv[j].LunID == lunid) {
1541 lunID_found = 1;
1542 break;
1546 if (lunID_found == 1)
1547 continue;
1548 else { /* new lun found */
1550 #ifdef CCISS_DEBUG
1551 printk("new lun found at %d\n", i);
1552 #endif /* CCISS_DEBUG */
1553 if (req_lunid) /* we are looking for a specific lun */
1555 if (lunid != req_lunid)
1557 #ifdef CCISS_DEBUG
1558 printk("new lun %x is not %x\n",
1559 lunid, req_lunid);
1560 #endif /* CCISS_DEBUG */
1561 continue;
1564 new_lun_index = i;
1565 new_lun_found = 1;
1566 break;
1569 if (!new_lun_found) {
1570 printk(KERN_DEBUG "cciss: New Logical Volume not found\n");
1571 h->busy_configuring = 0;
1572 return -1;
1574 /* Now find the free index */
1575 for(i=0; i <CISS_MAX_LUN; i++) {
1576 #ifdef CCISS_DEBUG
1577 printk("Checking Index %d\n", i);
1578 #endif /* CCISS_DEBUG */
1579 if (hba[ctlr]->drv[i].LunID == 0) {
1580 #ifdef CCISS_DEBUG
1581 printk("free index found at %d\n", i);
1582 #endif /* CCISS_DEBUG */
1583 free_index_found = 1;
1584 free_index = i;
1585 break;
1588 if (!free_index_found) {
1589 printk(KERN_WARNING "cciss: unable to find free slot for disk\n");
1590 h->busy_configuring = 0;
1591 return -1;
1594 logvol = free_index;
1595 hba[ctlr]->drv[logvol].LunID = lunid;
1596 /* there could be gaps in lun numbers, track hightest */
1597 if (hba[ctlr]->highest_lun < logvol)
1598 hba[ctlr]->highest_lun = logvol;
1600 memset(size_buff, 0, sizeof(ReadCapdata_struct));
1601 return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr,
1602 size_buff, sizeof(ReadCapdata_struct), 1,
1603 logvol, 0, TYPE_CMD);
1604 if (return_code == IO_OK) {
1605 total_size = (0xff &
1606 (unsigned int) size_buff->total_size[0]) << 24;
1607 total_size |= (0xff &
1608 (unsigned int) size_buff->total_size[1]) << 16;
1609 total_size |= (0xff &
1610 (unsigned int) size_buff->total_size[2]) << 8;
1611 total_size |= (0xff &
1612 (unsigned int) size_buff->total_size[3]);
1613 total_size++; /* command returns highest block address */
1615 block_size = (0xff &
1616 (unsigned int) size_buff->block_size[0]) << 24;
1617 block_size |= (0xff &
1618 (unsigned int) size_buff->block_size[1]) << 16;
1619 block_size |= (0xff &
1620 (unsigned int) size_buff->block_size[2]) << 8;
1621 block_size |= (0xff &
1622 (unsigned int) size_buff->block_size[3]);
1623 } else {
1624 /* read capacity command failed */
1625 printk(KERN_WARNING "cciss: read capacity failed\n");
1626 total_size = 0;
1627 block_size = BLOCK_SIZE;
1629 printk(KERN_INFO " blocks= %d block_size= %d\n",
1630 total_size, block_size);
1631 /* Execute the command to read the disk geometry */
1632 memset(inq_buff, 0, sizeof(InquiryData_struct));
1633 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
1634 sizeof(InquiryData_struct), 1, logvol ,0xC1, TYPE_CMD);
1635 if (return_code == IO_OK) {
1636 if (inq_buff->data_byte[8] == 0xFF) {
1637 printk(KERN_WARNING
1638 "cciss: reading geometry failed, "
1639 "volume does not support reading geometry\n");
1641 hba[ctlr]->drv[logvol].block_size = block_size;
1642 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1643 hba[ctlr]->drv[logvol].heads = 255;
1644 hba[ctlr]->drv[logvol].sectors = 32; /* secs/trk */
1645 hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
1646 hba[ctlr]->drv[logvol].raid_level = RAID_UNKNOWN;
1647 } else {
1648 hba[ctlr]->drv[logvol].block_size = block_size;
1649 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1650 hba[ctlr]->drv[logvol].heads = inq_buff->data_byte[6];
1651 hba[ctlr]->drv[logvol].sectors = inq_buff->data_byte[7];
1652 hba[ctlr]->drv[logvol].cylinders =
1653 (inq_buff->data_byte[4] & 0xff) << 8;
1654 hba[ctlr]->drv[logvol].cylinders +=
1655 inq_buff->data_byte[5];
1656 hba[ctlr]->drv[logvol].raid_level =
1657 inq_buff->data_byte[8];
1659 } else {
1660 /* Get geometry failed */
1661 printk(KERN_WARNING "cciss: reading geometry failed, "
1662 "continuing with default geometry\n");
1664 hba[ctlr]->drv[logvol].block_size = block_size;
1665 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1666 hba[ctlr]->drv[logvol].heads = 255;
1667 hba[ctlr]->drv[logvol].sectors = 32; /* Sectors per track */
1668 hba[ctlr]->drv[logvol].cylinders = total_size / 255 / 32;
1670 if (hba[ctlr]->drv[logvol].raid_level > 5)
1671 hba[ctlr]->drv[logvol].raid_level = RAID_UNKNOWN;
1673 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d RAID %s\n\n",
1674 hba[ctlr]->drv[logvol].heads,
1675 hba[ctlr]->drv[logvol].sectors,
1676 hba[ctlr]->drv[logvol].cylinders,
1677 raid_label[hba[ctlr]->drv[logvol].raid_level]);
1680 /* special case for c?d0, which may be opened even when
1681 it does not "exist". In that case, don't mess with usage count.
1682 Also, /dev/c1d1 could be used to re-add c0d0 so we can't just
1683 check whether logvol == 0, must check logvol != opened_vol */
1684 if (logvol != opened_vol)
1685 hba[ctlr]->drv[logvol].usage_count = 0;
1687 max_p = gdev->max_p;
1688 start = logvol<< gdev->minor_shift;
1689 hba[ctlr]->hd[start].nr_sects = total_size;
1690 hba[ctlr]->sizes[start] = total_size;
1692 for(i=max_p-1; i>=0; i--) {
1693 int minor = start+i;
1695 invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
1696 gdev->part[minor].start_sect = 0;
1697 gdev->part[minor].nr_sects = 0;
1699 /* reset the blocksize so we can read the partition table */
1700 blksize_size[hba[ctlr]->major][minor] = block_size;
1701 hba[ctlr]->hardsizes[minor] = block_size;
1706 ++hba[ctlr]->num_luns;
1707 gdev->nr_real = hba[ctlr]->highest_lun + 1;
1710 /* setup partitions per disk */
1711 grok_partitions(gdev, logvol, MAX_PART,
1712 hba[ctlr]->drv[logvol].nr_blocks);
1713 kfree(ld_buff);
1714 kfree(size_buff);
1715 kfree(inq_buff);
1716 h->busy_configuring = 0;
1717 return logvol;
1720 static int cciss_rescan_disk(int ctlr, int logvol)
1722 struct gendisk *gdev = &(hba[ctlr]->gendisk);
1723 int start, max_p, i;
1724 ReadCapdata_struct *size_buff;
1725 InquiryData_struct *inq_buff;
1726 int return_code;
1727 unsigned int block_size;
1728 unsigned int total_size;
1730 if (!capable(CAP_SYS_RAWIO))
1731 return -EPERM;
1732 if (hba[ctlr]->sizes[logvol << NWD_SHIFT] != 0) {
1733 /* disk is possible on line, return just a warning */
1734 return 1;
1736 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1737 if (size_buff == NULL) {
1738 printk(KERN_ERR "cciss: out of memory\n");
1739 return -1;
1741 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1742 if (inq_buff == NULL) {
1743 printk(KERN_ERR "cciss: out of memory\n");
1744 kfree(size_buff);
1745 return -1;
1747 memset(size_buff, 0, sizeof(ReadCapdata_struct));
1748 return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, size_buff,
1749 sizeof( ReadCapdata_struct), 1, logvol, 0,
1750 TYPE_CMD);
1751 if (return_code == IO_OK) {
1752 total_size = (0xff &
1753 (unsigned int)(size_buff->total_size[0])) << 24;
1754 total_size |= (0xff &
1755 (unsigned int)(size_buff->total_size[1])) << 16;
1756 total_size |= (0xff &
1757 (unsigned int)(size_buff->total_size[2])) << 8;
1758 total_size |= (0xff & (unsigned int)
1759 (size_buff->total_size[3]));
1760 total_size++; /* command returns highest block address */
1762 block_size = (0xff &
1763 (unsigned int)(size_buff->block_size[0])) << 24;
1764 block_size |= (0xff &
1765 (unsigned int)(size_buff->block_size[1])) << 16;
1766 block_size |= (0xff &
1767 (unsigned int)(size_buff->block_size[2])) << 8;
1768 block_size |= (0xff &
1769 (unsigned int)(size_buff->block_size[3]));
1770 } else { /* read capacity command failed */
1771 printk(KERN_WARNING "cciss: read capacity failed\n");
1772 total_size = block_size = 0;
1774 printk(KERN_INFO " blocks= %d block_size= %d\n",
1775 total_size, block_size);
1776 /* Execute the command to read the disk geometry */
1777 memset(inq_buff, 0, sizeof(InquiryData_struct));
1778 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
1779 sizeof(InquiryData_struct), 1, logvol ,0xC1, TYPE_CMD);
1780 if (return_code == IO_OK) {
1781 if (inq_buff->data_byte[8] == 0xFF) {
1782 printk(KERN_WARNING "cciss: reading geometry failed, "
1783 "volume does not support reading geometry\n");
1785 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1786 hba[ctlr]->drv[logvol].heads = 255;
1787 hba[ctlr]->drv[logvol].sectors = 32; /* Sectors/track */
1788 hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
1789 } else {
1790 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1791 hba[ctlr]->drv[logvol].heads = inq_buff->data_byte[6];
1792 hba[ctlr]->drv[logvol].sectors = inq_buff->data_byte[7];
1793 hba[ctlr]->drv[logvol].cylinders =
1794 (inq_buff->data_byte[4] & 0xff) << 8;
1795 hba[ctlr]->drv[logvol].cylinders +=
1796 inq_buff->data_byte[5];
1798 } else { /* Get geometry failed */
1799 printk(KERN_WARNING "cciss: reading geometry failed, "
1800 "continuing with default geometry\n");
1802 hba[ctlr]->drv[logvol].nr_blocks = total_size;
1803 hba[ctlr]->drv[logvol].heads = 255;
1804 hba[ctlr]->drv[logvol].sectors = 32; /* Sectors / track */
1805 hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
1808 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d \n\n",
1809 hba[ctlr]->drv[logvol].heads,
1810 hba[ctlr]->drv[logvol].sectors,
1811 hba[ctlr]->drv[logvol].cylinders);
1812 max_p = gdev->max_p;
1813 start = logvol<< gdev->minor_shift;
1814 hba[ctlr]->hd[start].nr_sects = hba[ctlr]->sizes[start]= total_size;
1816 for (i=max_p-1; i>=0; i--) {
1817 int minor = start+i;
1818 invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
1819 gdev->part[minor].start_sect = 0;
1820 gdev->part[minor].nr_sects = 0;
1822 /* reset the blocksize so we can read the partition table */
1823 blksize_size[hba[ctlr]->major][minor] = block_size;
1824 hba[ctlr]->hardsizes[minor] = block_size;
1827 /* setup partitions per disk */
1828 grok_partitions(gdev, logvol, MAX_PART,
1829 hba[ctlr]->drv[logvol].nr_blocks );
1831 kfree(size_buff);
1832 kfree(inq_buff);
1833 return 0;
1835 #endif
1837 /*
1838 * Wait polling for a command to complete.
1839 * The memory mapped FIFO is polled for the completion.
1840 * Used only at init time, interrupts disabled.
1841 */
1842 static unsigned long pollcomplete(int ctlr)
1844 unsigned long done;
1845 int i;
1847 /* Wait (up to 20 seconds) for a command to complete */
1849 for (i = 20 * HZ; i > 0; i--) {
1850 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1851 if (done == FIFO_EMPTY) {
1852 set_current_state(TASK_UNINTERRUPTIBLE);
1853 schedule_timeout(1);
1854 } else
1855 return done;
1857 /* Invalid address to tell caller we ran out of time */
1858 return 1;
1860 /*
1861 * Send a command to the controller, and wait for it to complete.
1862 * Only used at init time.
1863 */
1864 static int sendcmd(
1865 __u8 cmd,
1866 int ctlr,
1867 void *buff,
1868 size_t size,
1869 unsigned int use_unit_num, /* 0: address the controller,
1870 1: address logical volume log_unit,
1871 2: periph device address is scsi3addr */
1872 unsigned int log_unit,
1873 __u8 page_code,
1874 unsigned char *scsi3addr)
1876 CommandList_struct *c;
1877 int i;
1878 unsigned long complete;
1879 ctlr_info_t *info_p= hba[ctlr];
1880 u64bit buff_dma_handle;
1881 int status = IO_OK;
1883 c = cmd_alloc(info_p, 1);
1884 if (c == NULL) {
1885 printk(KERN_WARNING "cciss: unable to get memory");
1886 return IO_ERROR;
1888 /* Fill in Command Header */
1889 c->Header.ReplyQueue = 0; /* unused in simple mode */
1890 if (buff != NULL) { /* buffer to fill */
1891 c->Header.SGList = 1;
1892 c->Header.SGTotal= 1;
1893 } else { /* no buffers to fill */
1894 c->Header.SGList = 0;
1895 c->Header.SGTotal= 0;
1897 c->Header.Tag.lower = c->busaddr; /* use the kernel address */
1898 /* the cmd block for tag */
1899 /* Fill in Request block */
1900 switch (cmd) {
1901 case CISS_INQUIRY:
1902 /* If the logical unit number is 0 then, this is going
1903 to controller so It's a physical command
1904 mode = 0 target = 0.
1905 So we have nothing to write.
1906 otherwise, if use_unit_num == 1,
1907 mode = 1(volume set addressing) target = LUNID
1908 otherwise, if use_unit_num == 2,
1909 mode = 0(periph dev addr) target = scsi3addr
1910 */
1911 if (use_unit_num == 1) {
1912 c->Header.LUN.LogDev.VolId=
1913 hba[ctlr]->drv[log_unit].LunID;
1914 c->Header.LUN.LogDev.Mode = 1;
1916 else if (use_unit_num == 2) {
1917 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1918 c->Header.LUN.LogDev.Mode = 0;
1919 /* phys dev addr */
1922 /* are we trying to read a vital product page */
1923 if (page_code != 0) {
1924 c->Request.CDB[1] = 0x01;
1925 c->Request.CDB[2] = page_code;
1927 c->Request.CDBLen = 6;
1928 c->Request.Type.Type = TYPE_CMD; /* It is a command. */
1929 c->Request.Type.Attribute = ATTR_SIMPLE;
1930 c->Request.Type.Direction = XFER_READ; /* Read */
1931 c->Request.Timeout = 0; /* Don't time out */
1932 c->Request.CDB[0] = CISS_INQUIRY;
1933 c->Request.CDB[4] = size & 0xFF;
1934 break;
1935 case CISS_REPORT_LOG:
1936 case CISS_REPORT_PHYS:
1937 /* Talking to controller so It's a physical command
1938 mode = 00 target = 0.
1939 So we have nothing to write.
1940 */
1941 c->Request.CDBLen = 12;
1942 c->Request.Type.Type = TYPE_CMD; /* It is a command. */
1943 c->Request.Type.Attribute = ATTR_SIMPLE;
1944 c->Request.Type.Direction = XFER_READ; /* Read */
1945 c->Request.Timeout = 0; /* Don't time out */
1946 c->Request.CDB[0] = cmd;
1947 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
1948 c->Request.CDB[7] = (size >> 16) & 0xFF;
1949 c->Request.CDB[8] = (size >> 8) & 0xFF;
1950 c->Request.CDB[9] = size & 0xFF;
1951 break;
1953 case CCISS_READ_CAPACITY:
1954 c->Header.LUN.LogDev.VolId=
1955 hba[ctlr]->drv[log_unit].LunID;
1956 c->Header.LUN.LogDev.Mode = 1;
1957 c->Request.CDBLen = 10;
1958 c->Request.Type.Type = TYPE_CMD; /* It is a command. */
1959 c->Request.Type.Attribute = ATTR_SIMPLE;
1960 c->Request.Type.Direction = XFER_READ; /* Read */
1961 c->Request.Timeout = 0; /* Don't time out */
1962 c->Request.CDB[0] = CCISS_READ_CAPACITY;
1963 break;
1964 case CCISS_CACHE_FLUSH:
1965 c->Request.CDBLen = 12;
1966 c->Request.Type.Type = TYPE_CMD; /* It is a command. */
1967 c->Request.Type.Attribute = ATTR_SIMPLE;
1968 c->Request.Type.Direction = XFER_WRITE; /* No data */
1969 c->Request.Timeout = 0; /* Don't time out */
1970 c->Request.CDB[0] = BMIC_WRITE; /* BMIC Passthru */
1971 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1972 break;
1973 default:
1974 printk(KERN_WARNING
1975 "cciss: Unknown Command 0x%x sent attempted\n",
1976 cmd);
1977 cmd_free(info_p, c, 1);
1978 return IO_ERROR;
1979 };
1980 /* Fill in the scatter gather information */
1981 if (size > 0) {
1982 buff_dma_handle.val = (__u64) pci_map_single( info_p->pdev,
1983 buff, size, PCI_DMA_BIDIRECTIONAL);
1984 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1985 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1986 c->SG[0].Len = size;
1987 c->SG[0].Ext = 0; /* we are not chaining */
1989 resend_cmd1:
1990 /*
1991 * Disable interrupt
1992 */
1993 #ifdef CCISS_DEBUG
1994 printk(KERN_DEBUG "cciss: turning intr off\n");
1995 #endif /* CCISS_DEBUG */
1996 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
1998 /* Make sure there is room in the command FIFO */
1999 /* Actually it should be completely empty at this time. */
2000 for (i = 200000; i > 0; i--) {
2001 /* if fifo isn't full go */
2002 if (!(info_p->access.fifo_full(info_p))) {
2004 break;
2006 udelay(10);
2007 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2008 " waiting!\n", ctlr);
2010 /*
2011 * Send the cmd
2012 */
2013 info_p->access.submit_command(info_p, c);
2014 complete = pollcomplete(ctlr);
2016 #ifdef CCISS_DEBUG
2017 printk(KERN_DEBUG "cciss: command completed\n");
2018 #endif /* CCISS_DEBUG */
2020 if (complete != 1) {
2021 if ( (complete & CISS_ERROR_BIT)
2022 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2023 /* if data overrun or underun on Report command
2024 ignore it
2025 */
2026 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2027 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2028 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2029 ((c->err_info->CommandStatus ==
2030 CMD_DATA_OVERRUN) ||
2031 (c->err_info->CommandStatus ==
2032 CMD_DATA_UNDERRUN)
2033 )) {
2034 complete = c->busaddr;
2035 } else {
2036 if (c->err_info->CommandStatus ==
2037 CMD_UNSOLICITED_ABORT) {
2038 printk(KERN_WARNING "cciss: "
2039 "cmd %p aborted do "
2040 "to an unsolicited abort \n", c);
2041 if (c->retry_count < MAX_CMD_RETRIES) {
2042 printk(KERN_WARNING
2043 "retrying cmd\n");
2044 c->retry_count++;
2045 /* erase the old error */
2046 /* information */
2047 memset(c->err_info, 0,
2048 sizeof(ErrorInfo_struct));
2049 goto resend_cmd1;
2050 } else {
2051 printk(KERN_WARNING
2052 "retried to many times\n");
2053 status = IO_ERROR;
2054 goto cleanup1;
2057 printk(KERN_WARNING "cciss cciss%d: sendcmd"
2058 " Error %x \n", ctlr,
2059 c->err_info->CommandStatus);
2060 printk(KERN_WARNING "cciss cciss%d: sendcmd"
2061 " offensive info\n"
2062 " size %x\n num %x value %x\n", ctlr,
2063 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2064 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2065 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2066 status = IO_ERROR;
2067 goto cleanup1;
2070 if (complete != c->busaddr) {
2071 printk( KERN_WARNING "cciss cciss%d: SendCmd "
2072 "Invalid command list address returned! (%lx)\n",
2073 ctlr, complete);
2074 status = IO_ERROR;
2075 goto cleanup1;
2077 } else {
2078 printk( KERN_WARNING
2079 "cciss cciss%d: SendCmd Timeout out, "
2080 "No command list address returned!\n",
2081 ctlr);
2082 status = IO_ERROR;
2085 cleanup1:
2086 /* unlock the data buffer from DMA */
2087 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2088 size, PCI_DMA_BIDIRECTIONAL);
2089 cmd_free(info_p, c, 1);
2090 return status;
2092 /*
2093 * Map (physical) PCI mem into (virtual) kernel space
2094 */
2095 static ulong remap_pci_mem(ulong base, ulong size)
2097 ulong page_base = ((ulong) base) & PAGE_MASK;
2098 ulong page_offs = ((ulong) base) - page_base;
2099 ulong page_remapped = (ulong) ioremap(page_base, page_offs+size);
2101 return (ulong) (page_remapped ? (page_remapped + page_offs) : 0UL);
2104 /*
2105 * Enqueuing and dequeuing functions for cmdlists.
2106 */
2107 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
2109 if (*Qptr == NULL) {
2110 *Qptr = c;
2111 c->next = c->prev = c;
2112 } else {
2113 c->prev = (*Qptr)->prev;
2114 c->next = (*Qptr);
2115 (*Qptr)->prev->next = c;
2116 (*Qptr)->prev = c;
2120 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
2121 CommandList_struct *c)
2123 if (c && c->next != c) {
2124 if (*Qptr == c) *Qptr = c->next;
2125 c->prev->next = c->next;
2126 c->next->prev = c->prev;
2127 } else {
2128 *Qptr = NULL;
2130 return c;
2133 /*
2134 * Takes jobs of the Q and sends them to the hardware, then puts it on
2135 * the Q to wait for completion.
2136 */
2137 static void start_io( ctlr_info_t *h)
2139 CommandList_struct *c;
2141 while(( c = h->reqQ) != NULL ) {
2142 /* can't do anything if fifo is full */
2143 if ((h->access.fifo_full(h))) {
2144 printk(KERN_WARNING "cciss: fifo full \n");
2145 return;
2147 /* Get the frist entry from the Request Q */
2148 removeQ(&(h->reqQ), c);
2149 h->Qdepth--;
2151 /* Tell the controller execute command */
2152 h->access.submit_command(h, c);
2154 /* Put job onto the completed Q */
2155 addQ (&(h->cmpQ), c);
2159 static inline void complete_buffers( struct buffer_head *bh, int status)
2161 struct buffer_head *xbh;
2163 while(bh) {
2164 xbh = bh->b_reqnext;
2165 bh->b_reqnext = NULL;
2166 blk_finished_io(bh->b_size >> 9);
2167 bh->b_end_io(bh, status);
2168 bh = xbh;
2171 /* This code assumes io_request_lock is already held */
2172 /* Zeros out the error record and then resends the command back */
2173 /* to the controller */
2174 static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2176 /* erase the old error information */
2177 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2179 /* add it to software queue and then send it to the controller */
2180 addQ(&(h->reqQ),c);
2181 h->Qdepth++;
2182 if (h->Qdepth > h->maxQsinceinit)
2183 h->maxQsinceinit = h->Qdepth;
2185 start_io(h);
2187 /* checks the status of the job and calls complete buffers to mark all
2188 * buffers for the completed job.
2189 */
2190 static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2191 int timeout)
2193 int status = 1;
2194 int retry_cmd = 0;
2195 int i, ddir;
2196 u64bit temp64;
2198 if (timeout)
2199 status = 0;
2201 if (cmd->err_info->CommandStatus != 0) {
2202 /* an error has occurred */
2203 switch (cmd->err_info->CommandStatus) {
2204 unsigned char sense_key;
2205 case CMD_TARGET_STATUS:
2206 status = 0;
2208 if (cmd->err_info->ScsiStatus == 0x02) {
2209 printk(KERN_WARNING "cciss: cmd %p "
2210 "has CHECK CONDITION,"
2211 " sense key = 0x%x\n", cmd,
2212 cmd->err_info->SenseInfo[2]);
2213 /* check the sense key */
2214 sense_key = 0xf &
2215 cmd->err_info->SenseInfo[2];
2216 /* recovered error */
2217 if ( sense_key == 0x1)
2218 status = 1;
2219 } else {
2220 printk(KERN_WARNING "cciss: cmd %p "
2221 "has SCSI Status 0x%x\n",
2222 cmd, cmd->err_info->ScsiStatus);
2224 break;
2225 case CMD_DATA_UNDERRUN:
2226 printk(KERN_WARNING "cciss: cmd %p has"
2227 " completed with data underrun "
2228 "reported\n", cmd);
2229 break;
2230 case CMD_DATA_OVERRUN:
2231 printk(KERN_WARNING "cciss: cmd %p has"
2232 " completed with data overrun "
2233 "reported\n", cmd);
2234 break;
2235 case CMD_INVALID:
2236 printk(KERN_WARNING "cciss: cmd %p is "
2237 "reported invalid\n", cmd);
2238 status = 0;
2239 break;
2240 case CMD_PROTOCOL_ERR:
2241 printk(KERN_WARNING "cciss: cmd %p has "
2242 "protocol error \n", cmd);
2243 status = 0;
2244 break;
2245 case CMD_HARDWARE_ERR:
2246 printk(KERN_WARNING "cciss: cmd %p had "
2247 " hardware error\n", cmd);
2248 status = 0;
2249 break;
2250 case CMD_CONNECTION_LOST:
2251 printk(KERN_WARNING "cciss: cmd %p had "
2252 "connection lost\n", cmd);
2253 status=0;
2254 break;
2255 case CMD_ABORTED:
2256 printk(KERN_WARNING "cciss: cmd %p was "
2257 "aborted\n", cmd);
2258 status=0;
2259 break;
2260 case CMD_ABORT_FAILED:
2261 printk(KERN_WARNING "cciss: cmd %p reports "
2262 "abort failed\n", cmd);
2263 status=0;
2264 break;
2265 case CMD_UNSOLICITED_ABORT:
2266 printk(KERN_WARNING "cciss: cmd %p aborted do "
2267 "to an unsolicited abort \n",
2268 cmd);
2269 if (cmd->retry_count < MAX_CMD_RETRIES) {
2270 retry_cmd=1;
2271 printk(KERN_WARNING
2272 "retrying cmd\n");
2273 cmd->retry_count++;
2274 } else {
2275 printk(KERN_WARNING
2276 "retried to many times\n");
2278 status=0;
2279 break;
2280 case CMD_TIMEOUT:
2281 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2282 cmd);
2283 status=0;
2284 break;
2285 default:
2286 printk(KERN_WARNING "cciss: cmd %p returned "
2287 "unknown status %x\n", cmd,
2288 cmd->err_info->CommandStatus);
2289 status=0;
2292 /* We need to return this command */
2293 if (retry_cmd) {
2294 resend_cciss_cmd(h,cmd);
2295 return;
2297 /* command did not need to be retried */
2298 /* unmap the DMA mapping for all the scatter gather elements */
2299 if (cmd->Request.Type.Direction == XFER_READ)
2300 ddir = PCI_DMA_FROMDEVICE;
2301 else
2302 ddir = PCI_DMA_TODEVICE;
2303 for(i=0; i<cmd->Header.SGList; i++) {
2304 temp64.val32.lower = cmd->SG[i].Addr.lower;
2305 temp64.val32.upper = cmd->SG[i].Addr.upper;
2306 pci_unmap_page(hba[cmd->ctlr]->pdev,
2307 temp64.val, cmd->SG[i].Len, ddir);
2309 complete_buffers(cmd->rq->bh, status);
2310 #ifdef CCISS_DEBUG
2311 printk("Done with %p\n", cmd->rq);
2312 #endif /* CCISS_DEBUG */
2313 end_that_request_last(cmd->rq);
2314 cmd_free(h,cmd,1);
2318 static inline int cpq_new_segment(request_queue_t *q, struct request *rq,
2319 int max_segments)
2321 if (rq->nr_segments < MAXSGENTRIES) {
2322 rq->nr_segments++;
2323 return 1;
2325 return 0;
2328 static int cpq_back_merge_fn(request_queue_t *q, struct request *rq,
2329 struct buffer_head *bh, int max_segments)
2331 if (blk_seg_merge_ok(rq->bhtail, bh))
2332 return 1;
2333 return cpq_new_segment(q, rq, max_segments);
2336 static int cpq_front_merge_fn(request_queue_t *q, struct request *rq,
2337 struct buffer_head *bh, int max_segments)
2339 if (blk_seg_merge_ok(bh, rq->bh))
2340 return 1;
2341 return cpq_new_segment(q, rq, max_segments);
2344 static int cpq_merge_requests_fn(request_queue_t *q, struct request *rq,
2345 struct request *nxt, int max_segments)
2347 int total_segments = rq->nr_segments + nxt->nr_segments;
2349 if (blk_seg_merge_ok(rq->bhtail, nxt->bh))
2350 total_segments--;
2352 if (total_segments > MAXSGENTRIES)
2353 return 0;
2355 rq->nr_segments = total_segments;
2356 return 1;
2359 /*
2360 * Get a request and submit it to the controller.
2361 * Currently we do one request at a time. Ideally we would like to send
2362 * everything to the controller on the first call, but there is a danger
2363 * of holding the io_request_lock for to long.
2364 */
2365 static void do_cciss_request(request_queue_t *q)
2367 ctlr_info_t *h= q->queuedata;
2368 CommandList_struct *c;
2369 int log_unit, start_blk, seg;
2370 unsigned long long lastdataend;
2371 struct buffer_head *bh;
2372 struct list_head *queue_head = &q->queue_head;
2373 struct request *creq;
2374 u64bit temp64;
2375 struct scatterlist tmp_sg[MAXSGENTRIES];
2376 int i, ddir;
2378 if (q->plugged)
2379 goto startio;
2381 next:
2382 if (list_empty(queue_head))
2383 goto startio;
2385 creq = blkdev_entry_next_request(queue_head);
2386 if (creq->nr_segments > MAXSGENTRIES)
2387 BUG();
2389 if( h->ctlr != map_major_to_ctlr[MAJOR(creq->rq_dev)] ) {
2390 printk(KERN_WARNING "doreq cmd for %d, %x at %p\n",
2391 h->ctlr, creq->rq_dev, creq);
2392 blkdev_dequeue_request(creq);
2393 complete_buffers(creq->bh, 0);
2394 end_that_request_last(creq);
2395 goto startio;
2398 /* make sure controller is alive. */
2399 if (!CTLR_IS_ALIVE(h)) {
2400 printk(KERN_WARNING "cciss%d: I/O quit ", h->ctlr);
2401 blkdev_dequeue_request(creq);
2402 complete_buffers(creq->bh, 0);
2403 end_that_request_last(creq);
2404 return;
2407 if (( c = cmd_alloc(h, 1)) == NULL)
2408 goto startio;
2410 blkdev_dequeue_request(creq);
2412 spin_unlock_irq(&io_request_lock);
2414 c->cmd_type = CMD_RWREQ;
2415 c->rq = creq;
2416 bh = creq->bh;
2418 /* fill in the request */
2419 log_unit = MINOR(creq->rq_dev) >> NWD_SHIFT;
2420 c->Header.ReplyQueue = 0; /* unused in simple mode */
2421 c->Header.Tag.lower = c->busaddr; /* use the physical address */
2422 /* the cmd block for tag */
2423 c->Header.LUN.LogDev.VolId= hba[h->ctlr]->drv[log_unit].LunID;
2424 c->Header.LUN.LogDev.Mode = 1;
2425 c->Request.CDBLen = 10; /* 12 byte commands not in FW yet. */
2426 c->Request.Type.Type = TYPE_CMD; /* It is a command. */
2427 c->Request.Type.Attribute = ATTR_SIMPLE;
2428 c->Request.Type.Direction =
2429 (creq->cmd == READ) ? XFER_READ: XFER_WRITE;
2430 c->Request.Timeout = 0; /* Don't time out */
2431 c->Request.CDB[0] = (creq->cmd == READ) ? CCISS_READ : CCISS_WRITE;
2432 start_blk = hba[h->ctlr]->hd[MINOR(creq->rq_dev)].start_sect + creq->sector;
2433 #ifdef CCISS_DEBUG
2434 if (bh == NULL)
2435 panic("cciss: bh== NULL?");
2436 printk(KERN_DEBUG "cciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2437 (int) creq->nr_sectors);
2438 #endif /* CCISS_DEBUG */
2439 seg = 0;
2440 lastdataend = ~0ULL;
2441 while(bh) {
2443 if (bh_phys(bh) == lastdataend)
2444 { /* tack it on to the last segment */
2445 tmp_sg[seg-1].length +=bh->b_size;
2446 lastdataend += bh->b_size;
2447 } else {
2448 if (seg == MAXSGENTRIES)
2449 BUG();
2450 tmp_sg[seg].page = bh->b_page;
2451 tmp_sg[seg].length = bh->b_size;
2452 tmp_sg[seg].offset = bh_offset(bh);
2453 lastdataend = bh_phys(bh) + bh->b_size;
2454 seg++;
2456 bh = bh->b_reqnext;
2459 /* get the DMA records for the setup */
2460 if (c->Request.Type.Direction == XFER_READ)
2461 ddir = PCI_DMA_FROMDEVICE;
2462 else
2463 ddir = PCI_DMA_TODEVICE;
2464 for (i=0; i<seg; i++) {
2465 c->SG[i].Len = tmp_sg[i].length;
2466 temp64.val = pci_map_page(h->pdev, tmp_sg[i].page,
2467 tmp_sg[i].offset, tmp_sg[i].length, ddir);
2468 c->SG[i].Addr.lower = temp64.val32.lower;
2469 c->SG[i].Addr.upper = temp64.val32.upper;
2470 c->SG[i].Ext = 0; /* we are not chaining */
2472 /* track how many SG entries we are using */
2473 if (seg > h->maxSG)
2474 h->maxSG = seg;
2476 #ifdef CCISS_DEBUG
2477 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2478 #endif /* CCISS_DEBUG */
2480 c->Header.SGList = c->Header.SGTotal = seg;
2481 c->Request.CDB[1]= 0;
2482 c->Request.CDB[2]= (start_blk >> 24) & 0xff; /* MSB */
2483 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2484 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2485 c->Request.CDB[5]= start_blk & 0xff;
2486 c->Request.CDB[6]= 0; /* (sect >> 24) & 0xff; MSB */
2487 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2488 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2489 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2491 spin_lock_irq(&io_request_lock);
2493 addQ(&(h->reqQ),c);
2494 h->Qdepth++;
2495 if (h->Qdepth > h->maxQsinceinit)
2496 h->maxQsinceinit = h->Qdepth;
2498 goto next;
2500 startio:
2501 start_io(h);
2504 static void do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2506 ctlr_info_t *h = dev_id;
2507 CommandList_struct *c;
2508 unsigned long flags;
2509 __u32 a, a1;
2512 /* Is this interrupt for us? */
2513 if ((h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0))
2514 return;
2516 /*
2517 * If there are completed commands in the completion queue,
2518 * we had better do something about it.
2519 */
2520 spin_lock_irqsave(&io_request_lock, flags);
2521 while( h->access.intr_pending(h)) {
2522 while((a = h->access.command_completed(h)) != FIFO_EMPTY) {
2523 a1 = a;
2524 a &= ~3;
2525 if ((c = h->cmpQ) == NULL) {
2526 printk(KERN_WARNING "cciss: Completion of %08lx ignored\n", (unsigned long)a1);
2527 continue;
2529 while(c->busaddr != a) {
2530 c = c->next;
2531 if (c == h->cmpQ)
2532 break;
2534 /*
2535 * If we've found the command, take it off the
2536 * completion Q and free it
2537 */
2538 if (c->busaddr == a) {
2539 removeQ(&h->cmpQ, c);
2540 if (c->cmd_type == CMD_RWREQ) {
2541 complete_command(h, c, 0);
2542 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2543 #if 0
2544 complete(c->waiting);
2545 #else
2546 /* XXX SMH: use a flag to signal */
2547 if(*(int *)(c->waiting) != 0)
2548 *(int *)(c->waiting) = 0;
2549 #endif
2551 # ifdef CONFIG_CISS_SCSI_TAPE
2552 else if (c->cmd_type == CMD_SCSI) {
2553 complete_scsi_command(c, 0, a1);
2555 # endif
2556 continue;
2560 /*
2561 * See if we can queue up some more IO
2562 */
2563 do_cciss_request(BLK_DEFAULT_QUEUE(h->major));
2564 spin_unlock_irqrestore(&io_request_lock, flags);
2566 /*
2567 * We cannot read the structure directly, for portablity we must use
2568 * the io functions.
2569 * This is for debug only.
2570 */
2571 #ifdef CCISS_DEBUG
2572 static void print_cfg_table( CfgTable_struct *tb)
2574 int i;
2575 char temp_name[17];
2577 printk("Controller Configuration information\n");
2578 printk("------------------------------------\n");
2579 for(i=0;i<4;i++)
2580 temp_name[i] = readb(&(tb->Signature[i]));
2581 temp_name[4]='\0';
2582 printk(" Signature = %s\n", temp_name);
2583 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2584 printk(" Transport methods supported = 0x%x\n",
2585 readl(&(tb-> TransportSupport)));
2586 printk(" Transport methods active = 0x%x\n",
2587 readl(&(tb->TransportActive)));
2588 printk(" Requested transport Method = 0x%x\n",
2589 readl(&(tb->HostWrite.TransportRequest)));
2590 printk(" Coalese Interrupt Delay = 0x%x\n",
2591 readl(&(tb->HostWrite.CoalIntDelay)));
2592 printk(" Coalese Interrupt Count = 0x%x\n",
2593 readl(&(tb->HostWrite.CoalIntCount)));
2594 printk(" Max outstanding commands = 0x%d\n",
2595 readl(&(tb->CmdsOutMax)));
2596 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2597 for(i=0;i<16;i++)
2598 temp_name[i] = readb(&(tb->ServerName[i]));
2599 temp_name[16] = '\0';
2600 printk(" Server Name = %s\n", temp_name);
2601 printk(" Heartbeat Counter = 0x%x\n\n\n",
2602 readl(&(tb->HeartBeat)));
2604 #endif /* CCISS_DEBUG */
2606 static void release_io_mem(ctlr_info_t *c)
2608 /* if IO mem was not protected do nothing */
2609 if (c->io_mem_addr == 0)
2610 return;
2611 release_region(c->io_mem_addr, c->io_mem_length);
2612 c->io_mem_addr = 0;
2613 c->io_mem_length = 0;
2615 static int find_PCI_BAR_index(struct pci_dev *pdev,
2616 unsigned long pci_bar_addr)
2618 int i, offset, mem_type, bar_type;
2619 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2620 return 0;
2621 offset = 0;
2622 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2623 bar_type = pci_resource_flags(pdev, i) &
2624 PCI_BASE_ADDRESS_SPACE;
2625 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2626 offset += 4;
2627 else {
2628 mem_type = pci_resource_flags(pdev, i) &
2629 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2630 switch (mem_type) {
2631 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2632 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2633 offset += 4; /* 32 bit */
2634 break;
2635 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2636 offset += 8;
2637 break;
2638 default: /* reserved in PCI 2.2 */
2639 printk(KERN_WARNING "Base address is invalid\n");
2640 return -1;
2641 break;
2644 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2645 return i+1;
2647 return -1;
2650 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2652 ushort subsystem_vendor_id, subsystem_device_id, command;
2653 unchar irq = pdev->irq;
2654 __u32 board_id;
2655 __u64 cfg_offset;
2656 __u32 cfg_base_addr;
2657 __u64 cfg_base_addr_index;
2658 int i;
2660 /* check to see if controller has been disabled */
2661 /* BEFORE we try to enable it */
2662 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2663 if (!(command & 0x02)) {
2664 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2665 return -1;
2667 if (pci_enable_device(pdev)) {
2668 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2669 return -1;
2671 if (pci_set_dma_mask(pdev, CCISS_DMA_MASK ) != 0) {
2672 printk(KERN_ERR "cciss: Unable to set DMA mask\n");
2673 return -1;
2676 subsystem_vendor_id = pdev->subsystem_vendor;
2677 subsystem_device_id = pdev->subsystem_device;
2678 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2679 subsystem_vendor_id );
2682 /* search for our IO range so we can protect it */
2683 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2684 /* is this an IO range */
2685 if (pci_resource_flags(pdev, i) & 0x01) {
2686 c->io_mem_addr = pci_resource_start(pdev, i);
2687 c->io_mem_length = pci_resource_end(pdev, i) -
2688 pci_resource_start(pdev, i) + 1;
2689 #ifdef CCISS_DEBUG
2690 printk("IO value found base_addr[%d] %lx %lx\n", i,
2691 c->io_mem_addr, c->io_mem_length);
2692 #endif /* CCISS_DEBUG */
2693 /* register the IO range */
2694 if (!request_region( c->io_mem_addr,
2695 c->io_mem_length, "cciss")) {
2696 printk(KERN_WARNING
2697 "cciss I/O memory range already in "
2698 "use addr=%lx length=%ld\n",
2699 c->io_mem_addr, c->io_mem_length);
2700 c->io_mem_addr= 0;
2701 c->io_mem_length = 0;
2703 break;
2707 #ifdef CCISS_DEBUG
2708 printk("command = %x\n", command);
2709 printk("irq = %x\n", irq);
2710 printk("board_id = %x\n", board_id);
2711 #endif /* CCISS_DEBUG */
2713 c->intr = irq;
2715 /*
2716 * Memory base addr is first addr , the second points to the config
2717 * table
2718 */
2720 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2721 #ifdef CCISS_DEBUG
2722 printk("address 0 = %x\n", c->paddr);
2723 #endif /* CCISS_DEBUG */
2724 c->vaddr = remap_pci_mem(c->paddr, 200);
2726 /* get the address index number */
2727 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2728 /* I am not prepared to deal with a 64 bit address value */
2729 cfg_base_addr &= (__u32) 0x0000ffff;
2730 #ifdef CCISS_DEBUG
2731 printk("cfg base address = %x\n", cfg_base_addr);
2732 #endif /* CCISS_DEBUG */
2733 cfg_base_addr_index =
2734 find_PCI_BAR_index(pdev, cfg_base_addr);
2735 #ifdef CCISS_DEBUG
2736 printk("cfg base address index = %x\n", cfg_base_addr_index);
2737 #endif /* CCISS_DEBUG */
2738 if (cfg_base_addr_index == -1) {
2739 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2740 release_io_mem(hba[i]);
2741 return -1;
2744 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2745 #ifdef CCISS_DEBUG
2746 printk("cfg offset = %x\n", cfg_offset);
2747 #endif /* CCISS_DEBUG */
2748 c->cfgtable = (CfgTable_struct *)
2749 remap_pci_mem(pci_resource_start(pdev, cfg_base_addr_index)
2750 + cfg_offset, sizeof(CfgTable_struct));
2751 c->board_id = board_id;
2753 #ifdef CCISS_DEBUG
2754 print_cfg_table(c->cfgtable);
2755 #endif /* CCISS_DEBUG */
2757 for(i=0; i<NR_PRODUCTS; i++) {
2758 if (board_id == products[i].board_id) {
2759 c->product_name = products[i].product_name;
2760 c->access = *(products[i].access);
2761 break;
2764 if (i == NR_PRODUCTS) {
2765 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2766 " to access the Smart Array controller %08lx\n",
2767 (unsigned long)board_id);
2768 return -1;
2770 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2771 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2772 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2773 (readb(&c->cfgtable->Signature[3]) != 'S') ) {
2774 printk("Does not appear to be a valid CISS config table\n");
2775 return -1;
2777 #ifdef CCISS_DEBUG
2778 printk("Trying to put board into Simple mode\n");
2779 #endif /* CCISS_DEBUG */
2780 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2781 /* Update the field, and then ring the doorbell */
2782 writel( CFGTBL_Trans_Simple,
2783 &(c->cfgtable->HostWrite.TransportRequest));
2784 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2786 /* Here, we wait, possibly for a long time, (4 secs or more).
2787 * In some unlikely cases, (e.g. A failed 144 GB drive in a
2788 * RAID 5 set was hot replaced just as we're coming in here) it
2789 * can take that long. Normally (almost always) we will wait
2790 * less than 1 sec. */
2791 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2792 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2793 break;
2794 /* delay and try again */
2795 set_current_state(TASK_INTERRUPTIBLE);
2796 schedule_timeout(1);
2799 #ifdef CCISS_DEBUG
2800 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2801 #endif /* CCISS_DEBUG */
2802 #ifdef CCISS_DEBUG
2803 print_cfg_table(c->cfgtable);
2804 #endif /* CCISS_DEBUG */
2806 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
2807 printk(KERN_WARNING "cciss: unable to get board into"
2808 " simple mode\n");
2809 return -1;
2811 return 0;
2815 /*
2816 * Gets information about the local volumes attached to the controller.
2817 */
2818 static void cciss_getgeometry(int cntl_num)
2820 ReportLunData_struct *ld_buff;
2821 ReadCapdata_struct *size_buff;
2822 InquiryData_struct *inq_buff;
2823 int return_code;
2824 int i;
2825 int listlength = 0;
2826 __u32 lunid = 0;
2827 int block_size;
2828 int total_size;
2830 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2831 if (ld_buff == NULL) {
2832 printk(KERN_ERR "cciss: out of memory\n");
2833 return;
2835 memset(ld_buff, 0, sizeof(ReportLunData_struct));
2836 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2837 if (size_buff == NULL) {
2838 printk(KERN_ERR "cciss: out of memory\n");
2839 kfree(ld_buff);
2840 return;
2842 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2843 if (inq_buff == NULL) {
2844 printk(KERN_ERR "cciss: out of memory\n");
2845 kfree(ld_buff);
2846 kfree(size_buff);
2847 return;
2849 /* Get the firmware version */
2850 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2851 sizeof(InquiryData_struct), 0, 0 ,0, NULL);
2852 if (return_code == IO_OK) {
2853 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2854 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2855 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2856 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2857 } else { /* send command failed */
2858 printk(KERN_WARNING "cciss: unable to determine firmware"
2859 " version of controller\n");
2861 /* Get the number of logical volumes */
2862 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2863 sizeof(ReportLunData_struct), 0, 0, 0, NULL);
2865 if (return_code == IO_OK) {
2866 #ifdef CCISS_DEBUG
2867 printk("LUN Data\n--------------------------\n");
2868 #endif /* CCISS_DEBUG */
2870 listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0]));
2871 } else { /* reading number of logical volumes failed */
2872 printk(KERN_WARNING "cciss: report logical volume"
2873 " command failed\n");
2874 listlength = 0;
2876 hba[cntl_num]->num_luns = listlength / 8; /* 8 bytes pre entry */
2877 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
2878 printk(KERN_ERR "cciss: only %d number of logical volumes supported\n",
2879 CISS_MAX_LUN);
2880 hba[cntl_num]->num_luns = CISS_MAX_LUN;
2882 #ifdef CCISS_DEBUG
2883 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
2884 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
2885 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
2886 #endif /* CCISS_DEBUG */
2888 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
2889 for(i=0; i< hba[cntl_num]->num_luns; i++) {
2890 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
2891 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
2892 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
2893 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
2894 hba[cntl_num]->drv[i].LunID = lunid;
2896 #ifdef CCISS_DEBUG
2897 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
2898 ld_buff->LUN[i][0], ld_buff->LUN[i][1],ld_buff->LUN[i][2],
2899 ld_buff->LUN[i][3], hba[cntl_num]->drv[i].LunID);
2900 #endif /* CCISS_DEBUG */
2902 memset(size_buff, 0, sizeof(ReadCapdata_struct));
2903 return_code = sendcmd(CCISS_READ_CAPACITY, cntl_num, size_buff,
2904 sizeof( ReadCapdata_struct), 1, i, 0, NULL);
2905 if (return_code == IO_OK) {
2906 total_size = (0xff &
2907 (unsigned int)(size_buff->total_size[0])) << 24;
2908 total_size |= (0xff &
2909 (unsigned int)(size_buff->total_size[1])) << 16;
2910 total_size |= (0xff &
2911 (unsigned int)(size_buff->total_size[2])) << 8;
2912 total_size |= (0xff & (unsigned int)
2913 (size_buff->total_size[3]));
2914 total_size++; /* command returns highest */
2915 /* block address */
2917 block_size = (0xff &
2918 (unsigned int)(size_buff->block_size[0])) << 24;
2919 block_size |= (0xff &
2920 (unsigned int)(size_buff->block_size[1])) << 16;
2921 block_size |= (0xff &
2922 (unsigned int)(size_buff->block_size[2])) << 8;
2923 block_size |= (0xff &
2924 (unsigned int)(size_buff->block_size[3]));
2925 } else { /* read capacity command failed */
2926 printk(KERN_WARNING "cciss: read capacity failed\n");
2927 total_size = block_size = 0;
2929 printk(KERN_INFO " blocks= %d block_size= %d\n",
2930 total_size, block_size);
2932 /* Execute the command to read the disk geometry */
2933 memset(inq_buff, 0, sizeof(InquiryData_struct));
2934 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2935 sizeof(InquiryData_struct), 1, i, 0xC1, NULL );
2936 if (return_code == IO_OK) {
2937 if (inq_buff->data_byte[8] == 0xFF) {
2938 printk(KERN_WARNING "cciss: reading geometry failed, volume does not support reading geometry\n");
2940 hba[cntl_num]->drv[i].block_size = block_size;
2941 hba[cntl_num]->drv[i].nr_blocks = total_size;
2942 hba[cntl_num]->drv[i].heads = 255;
2943 hba[cntl_num]->drv[i].sectors = 32; /* Sectors */
2944 /* per track */
2945 hba[cntl_num]->drv[i].cylinders = total_size
2946 / 255 / 32;
2947 } else {
2949 hba[cntl_num]->drv[i].block_size = block_size;
2950 hba[cntl_num]->drv[i].nr_blocks = total_size;
2951 hba[cntl_num]->drv[i].heads =
2952 inq_buff->data_byte[6];
2953 hba[cntl_num]->drv[i].sectors =
2954 inq_buff->data_byte[7];
2955 hba[cntl_num]->drv[i].cylinders =
2956 (inq_buff->data_byte[4] & 0xff) << 8;
2957 hba[cntl_num]->drv[i].cylinders +=
2958 inq_buff->data_byte[5];
2959 hba[cntl_num]->drv[i].raid_level =
2960 inq_buff->data_byte[8];
2963 else { /* Get geometry failed */
2964 printk(KERN_WARNING "cciss: reading geometry failed, continuing with default geometry\n");
2966 hba[cntl_num]->drv[i].block_size = block_size;
2967 hba[cntl_num]->drv[i].nr_blocks = total_size;
2968 hba[cntl_num]->drv[i].heads = 255;
2969 hba[cntl_num]->drv[i].sectors = 32; /* Sectors */
2970 /* per track */
2971 hba[cntl_num]->drv[i].cylinders = total_size / 255 / 32;
2973 if (hba[cntl_num]->drv[i].raid_level > 5)
2974 hba[cntl_num]->drv[i].raid_level = RAID_UNKNOWN;
2975 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d RAID %s\n\n",
2976 hba[cntl_num]->drv[i].heads,
2977 hba[cntl_num]->drv[i].sectors,
2978 hba[cntl_num]->drv[i].cylinders,
2979 raid_label[hba[cntl_num]->drv[i].raid_level]);
2981 kfree(ld_buff);
2982 kfree(size_buff);
2983 kfree(inq_buff);
2986 /* Function to find the first free pointer into our hba[] array */
2987 /* Returns -1 if no free entries are left. */
2988 static int alloc_cciss_hba(void)
2990 int i;
2991 for(i=0; i< MAX_CTLR; i++) {
2992 if (hba[i] == NULL) {
2993 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
2994 if (hba[i]==NULL) {
2995 printk(KERN_ERR "cciss: out of memory.\n");
2996 return -1;
2998 return i;
3001 printk(KERN_WARNING
3002 "cciss: This driver supports a maximum of %d controllers.\n"
3003 "You can change this value in cciss.c and recompile.\n",
3004 MAX_CTLR);
3005 return -1;
3008 static void free_hba(int i)
3010 kfree(hba[i]);
3011 hba[i]=NULL;
3013 #ifdef CONFIG_CISS_MONITOR_THREAD
3014 static void fail_all_cmds(unsigned long ctlr)
3016 /* If we get here, the board is apparently dead. */
3017 ctlr_info_t *h = hba[ctlr];
3018 CommandList_struct *c;
3019 unsigned long flags;
3021 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3022 h->alive = 0; /* the controller apparently died... */
3024 spin_lock_irqsave(&io_request_lock, flags);
3026 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3028 /* move everything off the request queue onto the completed queue */
3029 while( (c = h->reqQ) != NULL ) {
3030 removeQ(&(h->reqQ), c);
3031 h->Qdepth--;
3032 addQ (&(h->cmpQ), c);
3035 /* Now, fail everything on the completed queue with a HW error */
3036 while( (c = h->cmpQ) != NULL ) {
3037 removeQ(&h->cmpQ, c);
3038 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3039 if (c->cmd_type == CMD_RWREQ) {
3040 complete_command(h, c, 0);
3041 } else if (c->cmd_type == CMD_IOCTL_PEND)
3042 complete(c->waiting);
3043 # ifdef CONFIG_CISS_SCSI_TAPE
3044 else if (c->cmd_type == CMD_SCSI)
3045 complete_scsi_command(c, 0, 0);
3046 # endif
3048 spin_unlock_irqrestore(&io_request_lock, flags);
3049 return;
3051 static int cciss_monitor(void *ctlr)
3053 /* If the board fails, we ought to detect that. So we periodically
3054 send down a No-Op message and expect it to complete quickly. If it
3055 doesn't, then we assume the board is dead, and fail all commands.
3056 This is useful mostly in a multipath configuration, so that failover
3057 will happen. */
3059 int rc;
3060 ctlr_info_t *h = (ctlr_info_t *) ctlr;
3061 unsigned long flags;
3062 u32 current_timer;
3064 daemonize();
3065 exit_files(current);
3066 reparent_to_init();
3068 printk("cciss%d: Monitor thread starting.\n", h->ctlr);
3070 /* only listen to signals if the HA was loaded as a module. */
3071 #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
3072 siginitsetinv(&current->blocked, SHUTDOWN_SIGS);
3073 sprintf(current->comm, "ccissmon%d", h->ctlr);
3074 h->monitor_thread = current;
3076 init_timer(&h->watchdog);
3077 h->watchdog.function = fail_all_cmds;
3078 h->watchdog.data = (unsigned long) h->ctlr;
3079 while (1) {
3080 /* check heartbeat timer */
3081 current_timer = readl(&h->cfgtable->HeartBeat);
3082 current_timer &= 0x0fffffff;
3083 if (heartbeat_timer == current_timer) {
3084 fail_all_cmds(h->ctlr);
3085 break;
3087 else
3088 heartbeat_timer = current_timer;
3090 set_current_state(TASK_UNINTERRUPTIBLE);
3091 schedule_timeout(h->monitor_period * HZ);
3092 h->watchdog.expires = jiffies + HZ * h->monitor_deadline;
3093 add_timer(&h->watchdog);
3094 /* send down a trivial command (no op message) to ctlr */
3095 rc = sendcmd_withirq(3, h->ctlr, NULL, 0, 0, 0, 0, TYPE_MSG);
3096 del_timer(&h->watchdog);
3097 if (!CTLR_IS_ALIVE(h))
3098 break;
3099 if (signal_pending(current)) {
3100 printk(KERN_WARNING "%s received signal.\n",
3101 current->comm);
3102 break;
3104 if (h->monitor_period == 0) /* zero period means exit thread */
3105 break;
3107 printk(KERN_INFO "%s exiting.\n", current->comm);
3108 spin_lock_irqsave(&io_request_lock, flags);
3109 h->monitor_started = 0;
3110 h->monitor_thread = NULL;
3111 spin_unlock_irqrestore(&io_request_lock, flags);
3112 return 0;
3114 static int start_monitor_thread(ctlr_info_t *h, unsigned char *cmd,
3115 unsigned long count, int (*cciss_monitor)(void *), int *rc)
3117 unsigned long flags;
3118 unsigned int new_period, old_period, new_deadline, old_deadline;
3120 if (strncmp("monitor", cmd, 7) == 0) {
3121 new_period = simple_strtol(cmd + 8, NULL, 10);
3122 spin_lock_irqsave(&io_request_lock, flags);
3123 new_deadline = h->monitor_deadline;
3124 spin_unlock_irqrestore(&io_request_lock, flags);
3125 } else if (strncmp("deadline", cmd, 8) == 0) {
3126 new_deadline = simple_strtol(cmd + 9, NULL, 10);
3127 spin_lock_irqsave(&io_request_lock, flags);
3128 new_period = h->monitor_period;
3129 spin_unlock_irqrestore(&io_request_lock, flags);
3130 } else
3131 return -1;
3132 if (new_period != 0 && new_period < CCISS_MIN_PERIOD)
3133 new_period = CCISS_MIN_PERIOD;
3134 if (new_period > CCISS_MAX_PERIOD)
3135 new_period = CCISS_MAX_PERIOD;
3136 if (new_deadline >= new_period) {
3137 new_deadline = new_period - 5;
3138 printk(KERN_INFO "setting deadline to %d\n", new_deadline);
3140 spin_lock_irqsave(&io_request_lock, flags);
3141 if (h->monitor_started != 0) {
3142 old_period = h->monitor_period;
3143 old_deadline = h->monitor_deadline;
3144 h->monitor_period = new_period;
3145 h->monitor_deadline = new_deadline;
3146 spin_unlock_irqrestore(&io_request_lock, flags);
3147 if (new_period == 0) {
3148 printk(KERN_INFO "cciss%d: stopping monitor thread\n",
3149 h->ctlr);
3150 *rc = count;
3151 return 0;
3153 if (new_period != old_period)
3154 printk(KERN_INFO "cciss%d: adjusting monitor thread "
3155 "period from %d to %d seconds\n",
3156 h->ctlr, old_period, new_period);
3157 if (new_deadline != old_deadline)
3158 printk(KERN_INFO "cciss%d: adjusting monitor thread "
3159 "deadline from %d to %d seconds\n",
3160 h->ctlr, old_deadline, new_deadline);
3161 *rc = count;
3162 return 0;
3164 h->monitor_started = 1;
3165 h->monitor_period = new_period;
3166 h->monitor_deadline = new_deadline;
3167 spin_unlock_irqrestore(&io_request_lock, flags);
3168 kernel_thread(cciss_monitor, h, 0);
3169 *rc = count;
3170 return 0;
3173 static void kill_monitor_thread(ctlr_info_t *h)
3175 if (h->monitor_thread)
3176 send_sig(SIGKILL, h->monitor_thread, 1);
3178 #else
3179 #define kill_monitor_thread(h)
3180 #endif
3181 /*
3182 * This is it. Find all the controllers and register them. I really hate
3183 * stealing all these major device numbers.
3184 * returns the number of block devices registered.
3185 */
3186 static int __init cciss_init_one(struct pci_dev *pdev,
3187 const struct pci_device_id *ent)
3189 request_queue_t *q;
3190 int i;
3191 int j;
3192 #if 0
3193 int rc;
3194 #endif
3196 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3197 " bus %d dev %d func %d\n",
3198 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3199 PCI_FUNC(pdev->devfn));
3200 i = alloc_cciss_hba();
3201 if (i < 0 )
3202 return -1;
3203 memset(hba[i], 0, sizeof(ctlr_info_t));
3204 if (cciss_pci_init(hba[i], pdev) != 0) {
3205 free_hba(i);
3206 return -1;
3208 sprintf(hba[i]->devname, "cciss%d", i);
3209 hba[i]->ctlr = i;
3211 /* register with the major number, or get a dynamic major number */
3212 /* by passing 0 as argument */
3214 if (i < MAX_CTLR_ORIG)
3215 hba[i]->major = MAJOR_NR + i;
3217 hba[i]->pdev = pdev;
3218 ASSERT_CTLR_ALIVE(hba[i]);
3220 #if 0
3221 rc = (register_blkdev(hba[i]->major, hba[i]->devname, &cciss_fops));
3222 if (rc < 0) {
3223 printk(KERN_ERR "cciss: Unable to get major number "
3224 "%d for %s\n", hba[i]->major, hba[i]->devname);
3225 release_io_mem(hba[i]);
3226 free_hba(i);
3227 return -1;
3228 } else
3230 if (i < MAX_CTLR_ORIG) {
3231 hba[i]->major = MAJOR_NR + i;
3232 map_major_to_ctlr[MAJOR_NR + i] = i;
3233 } else {
3234 hba[i]->major = rc;
3235 map_major_to_ctlr[rc] = i;
3239 XXXX Need to register this...
3241 #endif
3243 /* make sure the board interrupts are off */
3244 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3245 if (request_irq(hba[i]->intr, do_cciss_intr,
3246 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
3247 hba[i]->devname, hba[i])) {
3249 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3250 hba[i]->intr, hba[i]->devname);
3251 unregister_blkdev( hba[i]->major, hba[i]->devname);
3252 map_major_to_ctlr[hba[i]->major] = 0;
3253 release_io_mem(hba[i]);
3254 free_hba(i);
3255 return -1;
3257 hba[i]->cmd_pool_bits = (__u32*)kmalloc(
3258 ((NR_CMDS+31)/32)*sizeof(__u32), GFP_KERNEL);
3259 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3260 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3261 &(hba[i]->cmd_pool_dhandle));
3262 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3263 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3264 &(hba[i]->errinfo_pool_dhandle));
3265 if ((hba[i]->cmd_pool_bits == NULL)
3266 || (hba[i]->cmd_pool == NULL)
3267 || (hba[i]->errinfo_pool == NULL)) {
3269 if (hba[i]->cmd_pool_bits)
3270 kfree(hba[i]->cmd_pool_bits);
3271 if (hba[i]->cmd_pool)
3272 pci_free_consistent(hba[i]->pdev,
3273 NR_CMDS * sizeof(CommandList_struct),
3274 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3275 if (hba[i]->errinfo_pool)
3276 pci_free_consistent(hba[i]->pdev,
3277 NR_CMDS * sizeof( ErrorInfo_struct),
3278 hba[i]->errinfo_pool,
3279 hba[i]->errinfo_pool_dhandle);
3280 free_irq(hba[i]->intr, hba[i]);
3281 unregister_blkdev(hba[i]->major, hba[i]->devname);
3282 map_major_to_ctlr[hba[i]->major] = 0;
3283 release_io_mem(hba[i]);
3284 free_hba(i);
3285 printk( KERN_ERR "cciss: out of memory");
3286 return -1;
3289 /* Initialize the pdev driver private data.
3290 have it point to hba[i]. */
3291 pci_set_drvdata(pdev, hba[i]);
3292 /* command and error info recs zeroed out before
3293 they are used */
3294 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+31)/32)*sizeof(__u32));
3296 #ifdef CCISS_DEBUG
3297 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3298 #endif /* CCISS_DEBUG */
3300 cciss_getgeometry(i);
3302 cciss_find_non_disk_devices(i); /* find our tape drives, if any */
3304 /* Turn the interrupts on so we can service requests */
3305 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3307 cciss_procinit(i);
3309 q = BLK_DEFAULT_QUEUE(hba[i]->major);
3310 q->queuedata = hba[i];
3311 blk_init_queue(q, do_cciss_request);
3312 #if 0
3313 // XXX SMH; no bounce support for us yet
3314 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3315 #endif
3316 blk_queue_headactive(q, 0);
3318 /* fill in the other Kernel structs */
3319 blksize_size[hba[i]->major] = hba[i]->blocksizes;
3320 hardsect_size[hba[i]->major] = hba[i]->hardsizes;
3321 read_ahead[hba[i]->major] = READ_AHEAD;
3323 /* Set the pointers to queue functions */
3324 q->back_merge_fn = cpq_back_merge_fn;
3325 q->front_merge_fn = cpq_front_merge_fn;
3326 q->merge_requests_fn = cpq_merge_requests_fn;
3329 /* Fill in the gendisk data */
3330 hba[i]->gendisk.major = hba[i]->major;
3331 hba[i]->gendisk.major_name = "cciss";
3332 hba[i]->gendisk.minor_shift = NWD_SHIFT;
3333 hba[i]->gendisk.max_p = MAX_PART;
3334 hba[i]->gendisk.part = hba[i]->hd;
3335 hba[i]->gendisk.sizes = hba[i]->sizes;
3336 hba[i]->gendisk.nr_real = hba[i]->highest_lun+1;
3337 hba[i]->gendisk.fops = &cciss_fops;
3339 /* Get on the disk list */
3340 add_gendisk(&(hba[i]->gendisk));
3342 cciss_geninit(i);
3343 for(j=0; j<NWD; j++)
3344 register_disk(&(hba[i]->gendisk),
3345 MKDEV(hba[i]->major, j <<4),
3346 MAX_PART, &cciss_fops,
3347 hba[i]->drv[j].nr_blocks);
3349 cciss_register_scsi(i, 1); /* hook ourself into SCSI subsystem */
3351 return 1;
3354 static void __devexit cciss_remove_one (struct pci_dev *pdev)
3356 ctlr_info_t *tmp_ptr;
3357 int i;
3358 char flush_buf[4];
3359 int return_code;
3361 if (pci_get_drvdata(pdev) == NULL) {
3362 printk( KERN_ERR "cciss: Unable to remove device \n");
3363 return;
3365 tmp_ptr = pci_get_drvdata(pdev);
3366 i = tmp_ptr->ctlr;
3367 if (hba[i] == NULL) {
3368 printk(KERN_ERR "cciss: device appears to "
3369 "already be removed \n");
3370 return;
3372 kill_monitor_thread(hba[i]);
3373 /* no sense in trying to flush a dead board's cache. */
3374 if (CTLR_IS_ALIVE(hba[i])) {
3375 /* Turn board interrupts off and flush the cache */
3376 /* write all data in the battery backed cache to disks */
3377 memset(flush_buf, 0, 4);
3378 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf,
3379 4, 0, 0, 0, NULL);
3380 if (return_code != IO_OK)
3381 printk(KERN_WARNING
3382 "cciss%d: Error flushing cache\n", i);
3384 free_irq(hba[i]->intr, hba[i]);
3385 pci_set_drvdata(pdev, NULL);
3386 iounmap((void*)hba[i]->vaddr);
3387 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3388 unregister_blkdev(hba[i]->major, hba[i]->devname);
3389 map_major_to_ctlr[hba[i]->major] = 0;
3390 //remove_proc_entry(hba[i]->devname, proc_cciss);
3393 /* remove it from the disk list */
3394 del_gendisk(&(hba[i]->gendisk));
3396 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3397 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3398 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3399 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3400 kfree(hba[i]->cmd_pool_bits);
3401 release_io_mem(hba[i]);
3402 free_hba(i);
3405 static struct pci_driver cciss_pci_driver = {
3406 name: "cciss",
3407 probe: cciss_init_one,
3408 remove: __devexit_p(cciss_remove_one),
3409 id_table: cciss_pci_device_id, /* id_table */
3410 };
3412 /*
3413 * This is it. Register the PCI driver information for the cards we control
3414 * the OS will call our registered routines when it finds one of our cards.
3415 */
3416 int __init cciss_init(void)
3418 int ret = pci_module_init(&cciss_pci_driver);
3419 if (ret >= 0)
3420 printk(KERN_INFO DRIVER_NAME "\n");
3421 return ret;
3424 EXPORT_NO_SYMBOLS;
3425 static int __init init_cciss_module(void)
3428 return cciss_init();
3431 static void __exit cleanup_cciss_module(void)
3433 int i;
3435 pci_unregister_driver(&cciss_pci_driver);
3436 /* double check that all controller entrys have been removed */
3437 for (i=0; i< MAX_CTLR; i++) {
3438 if (hba[i] != NULL) {
3439 printk(KERN_WARNING "cciss: had to remove"
3440 " controller %d\n", i);
3441 cciss_remove_one(hba[i]->pdev);
3444 //remove_proc_entry("cciss", proc_root_driver);
3447 module_init(init_cciss_module);
3448 module_exit(cleanup_cciss_module);