ia64/linux-2.6.18-xen.hg

view drivers/block/cpqarray.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 3e8752eb6d9c
children
line source
1 /*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22 #include <linux/config.h> /* CONFIG_PROC_FS */
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/bio.h>
27 #include <linux/interrupt.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/blkdev.h>
40 #include <linux/genhd.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
45 #define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
47 #define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
48 #define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
50 /* Embedded module documentation macros - see modules.h */
51 /* Original author Chris Frantz - Compaq Computer Corporation */
52 MODULE_AUTHOR("Compaq Computer Corporation");
53 MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
54 MODULE_LICENSE("GPL");
56 #include "cpqarray.h"
57 #include "ida_cmd.h"
58 #include "smart1,2.h"
59 #include "ida_ioctl.h"
61 #define READ_AHEAD 128
62 #define NR_CMDS 128 /* This could probably go as high as ~400 */
64 #define MAX_CTLR 8
65 #define CTLR_SHIFT 8
67 #define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
69 static int nr_ctlr;
70 static ctlr_info_t *hba[MAX_CTLR];
72 static int eisa[8];
74 #define NR_PRODUCTS ARRAY_SIZE(products)
76 /* board_id = Subsystem Device ID & Vendor ID
77 * product = Marketing Name for the board
78 * access = Address of the struct of function pointers
79 */
80 static struct board_type products[] = {
81 { 0x0040110E, "IDA", &smart1_access },
82 { 0x0140110E, "IDA-2", &smart1_access },
83 { 0x1040110E, "IAES", &smart1_access },
84 { 0x2040110E, "SMART", &smart1_access },
85 { 0x3040110E, "SMART-2/E", &smart2e_access },
86 { 0x40300E11, "SMART-2/P", &smart2_access },
87 { 0x40310E11, "SMART-2SL", &smart2_access },
88 { 0x40320E11, "Smart Array 3200", &smart2_access },
89 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
90 { 0x40340E11, "Smart Array 221", &smart2_access },
91 { 0x40400E11, "Integrated Array", &smart4_access },
92 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
93 { 0x40500E11, "Smart Array 4200", &smart4_access },
94 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
95 { 0x40580E11, "Smart Array 431", &smart4_access },
96 };
98 /* define the PCI info for the PCI cards this driver can control */
99 static const struct pci_device_id cpqarray_pci_device_id[] =
100 {
101 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
102 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
103 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
104 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
105 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
106 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
107 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
108 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
109 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
110 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
111 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
112 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
113 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
114 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
115 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
116 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
117 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
118 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
119 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
120 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
121 { 0 }
122 };
124 MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
126 static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
128 /* Debug... */
129 #define DBG(s) do { s } while(0)
130 /* Debug (general info)... */
131 #define DBGINFO(s) do { } while(0)
132 /* Debug Paranoid... */
133 #define DBGP(s) do { } while(0)
134 /* Debug Extra Paranoid... */
135 #define DBGPX(s) do { } while(0)
137 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
138 static void __iomem *remap_pci_mem(ulong base, ulong size);
139 static int cpqarray_eisa_detect(void);
140 static int pollcomplete(int ctlr);
141 static void getgeometry(int ctlr);
142 static void start_fwbk(int ctlr);
144 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
145 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
147 static void free_hba(int i);
148 static int alloc_cpqarray_hba(void);
150 static int sendcmd(
151 __u8 cmd,
152 int ctlr,
153 void *buff,
154 size_t size,
155 unsigned int blk,
156 unsigned int blkcnt,
157 unsigned int log_unit );
159 static int ida_open(struct inode *inode, struct file *filep);
160 static int ida_release(struct inode *inode, struct file *filep);
161 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
162 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
163 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
165 static void do_ida_request(request_queue_t *q);
166 static void start_io(ctlr_info_t *h);
168 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
169 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
170 static inline void complete_buffers(struct bio *bio, int ok);
171 static inline void complete_command(cmdlist_t *cmd, int timeout);
173 static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs * regs);
174 static void ida_timer(unsigned long tdata);
175 static int ida_revalidate(struct gendisk *disk);
176 static int revalidate_allvol(ctlr_info_t *host);
177 static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
179 #ifdef CONFIG_PROC_FS
180 static void ida_procinit(int i);
181 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
182 #else
183 static void ida_procinit(int i) {}
184 #endif
186 static inline drv_info_t *get_drv(struct gendisk *disk)
187 {
188 return disk->private_data;
189 }
191 static inline ctlr_info_t *get_host(struct gendisk *disk)
192 {
193 return disk->queue->queuedata;
194 }
197 static struct block_device_operations ida_fops = {
198 .owner = THIS_MODULE,
199 .open = ida_open,
200 .release = ida_release,
201 .ioctl = ida_ioctl,
202 .getgeo = ida_getgeo,
203 .revalidate_disk= ida_revalidate,
204 };
207 #ifdef CONFIG_PROC_FS
209 static struct proc_dir_entry *proc_array;
211 /*
212 * Get us a file in /proc/array that says something about each controller.
213 * Create /proc/array if it doesn't exist yet.
214 */
215 static void __init ida_procinit(int i)
216 {
217 if (proc_array == NULL) {
218 proc_array = proc_mkdir("cpqarray", proc_root_driver);
219 if (!proc_array) return;
220 }
222 create_proc_read_entry(hba[i]->devname, 0, proc_array,
223 ida_proc_get_info, hba[i]);
224 }
226 /*
227 * Report information about this controller.
228 */
229 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
230 {
231 off_t pos = 0;
232 off_t len = 0;
233 int size, i, ctlr;
234 ctlr_info_t *h = (ctlr_info_t*)data;
235 drv_info_t *drv;
236 #ifdef CPQ_PROC_PRINT_QUEUES
237 cmdlist_t *c;
238 unsigned long flags;
239 #endif
241 ctlr = h->ctlr;
242 size = sprintf(buffer, "%s: Compaq %s Controller\n"
243 " Board ID: 0x%08lx\n"
244 " Firmware Revision: %c%c%c%c\n"
245 " Controller Sig: 0x%08lx\n"
246 " Memory Address: 0x%08lx\n"
247 " I/O Port: 0x%04x\n"
248 " IRQ: %d\n"
249 " Logical drives: %d\n"
250 " Physical drives: %d\n\n"
251 " Current Q depth: %d\n"
252 " Max Q depth since init: %d\n\n",
253 h->devname,
254 h->product_name,
255 (unsigned long)h->board_id,
256 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
257 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
258 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
259 h->log_drives, h->phys_drives,
260 h->Qdepth, h->maxQsinceinit);
262 pos += size; len += size;
264 size = sprintf(buffer+len, "Logical Drive Info:\n");
265 pos += size; len += size;
267 for(i=0; i<h->log_drives; i++) {
268 drv = &h->drv[i];
269 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
270 ctlr, i, drv->blk_size, drv->nr_blks);
271 pos += size; len += size;
272 }
274 #ifdef CPQ_PROC_PRINT_QUEUES
275 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
276 size = sprintf(buffer+len, "\nCurrent Queues:\n");
277 pos += size; len += size;
279 c = h->reqQ;
280 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
281 if (c) c=c->next;
282 while(c && c != h->reqQ) {
283 size = sprintf(buffer+len, "->%p", c);
284 pos += size; len += size;
285 c=c->next;
286 }
288 c = h->cmpQ;
289 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
290 if (c) c=c->next;
291 while(c && c != h->cmpQ) {
292 size = sprintf(buffer+len, "->%p", c);
293 pos += size; len += size;
294 c=c->next;
295 }
297 size = sprintf(buffer+len, "\n"); pos += size; len += size;
298 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
299 #endif
300 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
301 h->nr_allocs, h->nr_frees);
302 pos += size; len += size;
304 *eof = 1;
305 *start = buffer+offset;
306 len -= offset;
307 if (len>length)
308 len = length;
309 return len;
310 }
311 #endif /* CONFIG_PROC_FS */
313 module_param_array(eisa, int, NULL, 0);
315 static void release_io_mem(ctlr_info_t *c)
316 {
317 /* if IO mem was not protected do nothing */
318 if( c->io_mem_addr == 0)
319 return;
320 release_region(c->io_mem_addr, c->io_mem_length);
321 c->io_mem_addr = 0;
322 c->io_mem_length = 0;
323 }
325 static void __devexit cpqarray_remove_one(int i)
326 {
327 int j;
328 char buff[4];
330 /* sendcmd will turn off interrupt, and send the flush...
331 * To write all data in the battery backed cache to disks
332 * no data returned, but don't want to send NULL to sendcmd */
333 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
334 {
335 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
336 i);
337 }
338 free_irq(hba[i]->intr, hba[i]);
339 iounmap(hba[i]->vaddr);
340 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
341 del_timer(&hba[i]->timer);
342 remove_proc_entry(hba[i]->devname, proc_array);
343 pci_free_consistent(hba[i]->pci_dev,
344 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
345 hba[i]->cmd_pool_dhandle);
346 kfree(hba[i]->cmd_pool_bits);
347 for(j = 0; j < NWD; j++) {
348 if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
349 del_gendisk(ida_gendisk[i][j]);
350 put_disk(ida_gendisk[i][j]);
351 }
352 blk_cleanup_queue(hba[i]->queue);
353 release_io_mem(hba[i]);
354 free_hba(i);
355 }
357 static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
358 {
359 int i;
360 ctlr_info_t *tmp_ptr;
362 if (pci_get_drvdata(pdev) == NULL) {
363 printk( KERN_ERR "cpqarray: Unable to remove device \n");
364 return;
365 }
367 tmp_ptr = pci_get_drvdata(pdev);
368 i = tmp_ptr->ctlr;
369 if (hba[i] == NULL) {
370 printk(KERN_ERR "cpqarray: controller %d appears to have"
371 "already been removed \n", i);
372 return;
373 }
374 pci_set_drvdata(pdev, NULL);
376 cpqarray_remove_one(i);
377 }
379 /* removing an instance that was not removed automatically..
380 * must be an eisa card.
381 */
382 static void __devexit cpqarray_remove_one_eisa (int i)
383 {
384 if (hba[i] == NULL) {
385 printk(KERN_ERR "cpqarray: controller %d appears to have"
386 "already been removed \n", i);
387 return;
388 }
389 cpqarray_remove_one(i);
390 }
392 /* pdev is NULL for eisa */
393 static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
394 {
395 request_queue_t *q;
396 int j;
398 /*
399 * register block devices
400 * Find disks and fill in structs
401 * Get an interrupt, set the Q depth and get into /proc
402 */
404 /* If this successful it should insure that we are the only */
405 /* instance of the driver */
406 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
407 goto Enomem4;
408 }
409 hba[i]->access.set_intr_mask(hba[i], 0);
410 if (request_irq(hba[i]->intr, do_ida_intr,
411 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
412 {
413 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
414 hba[i]->intr, hba[i]->devname);
415 goto Enomem3;
416 }
418 for (j=0; j<NWD; j++) {
419 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
420 if (!ida_gendisk[i][j])
421 goto Enomem2;
422 }
424 hba[i]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
425 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
426 &(hba[i]->cmd_pool_dhandle));
427 hba[i]->cmd_pool_bits = kmalloc(
428 ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long),
429 GFP_KERNEL);
431 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
432 goto Enomem1;
434 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
435 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
436 printk(KERN_INFO "cpqarray: Finding drives on %s",
437 hba[i]->devname);
439 spin_lock_init(&hba[i]->lock);
440 q = blk_init_queue(do_ida_request, &hba[i]->lock);
441 if (!q)
442 goto Enomem1;
444 hba[i]->queue = q;
445 q->queuedata = hba[i];
447 getgeometry(i);
448 start_fwbk(i);
450 ida_procinit(i);
452 if (pdev)
453 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
455 /* This is a hardware imposed limit. */
456 blk_queue_max_hw_segments(q, SG_MAX);
458 /* This is a driver limit and could be eliminated. */
459 blk_queue_max_phys_segments(q, SG_MAX);
461 init_timer(&hba[i]->timer);
462 hba[i]->timer.expires = jiffies + IDA_TIMER;
463 hba[i]->timer.data = (unsigned long)hba[i];
464 hba[i]->timer.function = ida_timer;
465 add_timer(&hba[i]->timer);
467 /* Enable IRQ now that spinlock and rate limit timer are set up */
468 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
470 for(j=0; j<NWD; j++) {
471 struct gendisk *disk = ida_gendisk[i][j];
472 drv_info_t *drv = &hba[i]->drv[j];
473 sprintf(disk->disk_name, "ida/c%dd%d", i, j);
474 disk->major = COMPAQ_SMART2_MAJOR + i;
475 disk->first_minor = j<<NWD_SHIFT;
476 disk->fops = &ida_fops;
477 if (j && !drv->nr_blks)
478 continue;
479 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
480 set_capacity(disk, drv->nr_blks);
481 disk->queue = hba[i]->queue;
482 disk->private_data = drv;
483 add_disk(disk);
484 }
486 /* done ! */
487 return(i);
489 Enomem1:
490 nr_ctlr = i;
491 kfree(hba[i]->cmd_pool_bits);
492 if (hba[i]->cmd_pool)
493 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
494 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
495 Enomem2:
496 while (j--) {
497 put_disk(ida_gendisk[i][j]);
498 ida_gendisk[i][j] = NULL;
499 }
500 free_irq(hba[i]->intr, hba[i]);
501 Enomem3:
502 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
503 Enomem4:
504 if (pdev)
505 pci_set_drvdata(pdev, NULL);
506 release_io_mem(hba[i]);
507 free_hba(i);
509 printk( KERN_ERR "cpqarray: out of memory");
511 return -1;
512 }
514 static int __init cpqarray_init_one( struct pci_dev *pdev,
515 const struct pci_device_id *ent)
516 {
517 int i;
519 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
520 " bus %d dev %d func %d\n",
521 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
522 PCI_FUNC(pdev->devfn));
523 i = alloc_cpqarray_hba();
524 if( i < 0 )
525 return (-1);
526 memset(hba[i], 0, sizeof(ctlr_info_t));
527 sprintf(hba[i]->devname, "ida%d", i);
528 hba[i]->ctlr = i;
529 /* Initialize the pdev driver private data */
530 pci_set_drvdata(pdev, hba[i]);
532 if (cpqarray_pci_init(hba[i], pdev) != 0) {
533 pci_set_drvdata(pdev, NULL);
534 release_io_mem(hba[i]);
535 free_hba(i);
536 return -1;
537 }
539 return (cpqarray_register_ctlr(i, pdev));
540 }
542 static struct pci_driver cpqarray_pci_driver = {
543 .name = "cpqarray",
544 .probe = cpqarray_init_one,
545 .remove = __devexit_p(cpqarray_remove_one_pci),
546 .id_table = cpqarray_pci_device_id,
547 };
549 /*
550 * This is it. Find all the controllers and register them.
551 * returns the number of block devices registered.
552 */
553 static int __init cpqarray_init(void)
554 {
555 int num_cntlrs_reg = 0;
556 int i;
557 int rc = 0;
559 /* detect controllers */
560 printk(DRIVER_NAME "\n");
562 rc = pci_register_driver(&cpqarray_pci_driver);
563 if (rc)
564 return rc;
565 cpqarray_eisa_detect();
567 for (i=0; i < MAX_CTLR; i++) {
568 if (hba[i] != NULL)
569 num_cntlrs_reg++;
570 }
572 return(num_cntlrs_reg);
573 }
575 /* Function to find the first free pointer into our hba[] array */
576 /* Returns -1 if no free entries are left. */
577 static int alloc_cpqarray_hba(void)
578 {
579 int i;
581 for(i=0; i< MAX_CTLR; i++) {
582 if (hba[i] == NULL) {
583 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
584 if(hba[i]==NULL) {
585 printk(KERN_ERR "cpqarray: out of memory.\n");
586 return (-1);
587 }
588 return (i);
589 }
590 }
591 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
592 " of 8 controllers.\n");
593 return(-1);
594 }
596 static void free_hba(int i)
597 {
598 kfree(hba[i]);
599 hba[i]=NULL;
600 }
602 /*
603 * Find the IO address of the controller, its IRQ and so forth. Fill
604 * in some basic stuff into the ctlr_info_t structure.
605 */
606 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
607 {
608 ushort vendor_id, device_id, command;
609 unchar cache_line_size, latency_timer;
610 unchar irq, revision;
611 unsigned long addr[6];
612 __u32 board_id;
614 int i;
616 c->pci_dev = pdev;
617 if (pci_enable_device(pdev)) {
618 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
619 return -1;
620 }
621 vendor_id = pdev->vendor;
622 device_id = pdev->device;
623 irq = pdev->irq;
625 for(i=0; i<6; i++)
626 addr[i] = pci_resource_start(pdev, i);
628 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
629 {
630 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
631 return -1;
632 }
634 pci_read_config_word(pdev, PCI_COMMAND, &command);
635 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
636 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
637 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
639 pci_read_config_dword(pdev, 0x2c, &board_id);
641 /* check to see if controller has been disabled */
642 if(!(command & 0x02)) {
643 printk(KERN_WARNING
644 "cpqarray: controller appears to be disabled\n");
645 return(-1);
646 }
648 DBGINFO(
649 printk("vendor_id = %x\n", vendor_id);
650 printk("device_id = %x\n", device_id);
651 printk("command = %x\n", command);
652 for(i=0; i<6; i++)
653 printk("addr[%d] = %lx\n", i, addr[i]);
654 printk("revision = %x\n", revision);
655 printk("irq = %x\n", irq);
656 printk("cache_line_size = %x\n", cache_line_size);
657 printk("latency_timer = %x\n", latency_timer);
658 printk("board_id = %x\n", board_id);
659 );
661 c->intr = irq;
663 for(i=0; i<6; i++) {
664 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
665 { /* IO space */
666 c->io_mem_addr = addr[i];
667 c->io_mem_length = pci_resource_end(pdev, i)
668 - pci_resource_start(pdev, i) + 1;
669 if(!request_region( c->io_mem_addr, c->io_mem_length,
670 "cpqarray"))
671 {
672 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
673 c->io_mem_addr = 0;
674 c->io_mem_length = 0;
675 }
676 break;
677 }
678 }
680 c->paddr = 0;
681 for(i=0; i<6; i++)
682 if (!(pci_resource_flags(pdev, i) &
683 PCI_BASE_ADDRESS_SPACE_IO)) {
684 c->paddr = pci_resource_start (pdev, i);
685 break;
686 }
687 if (!c->paddr)
688 return -1;
689 c->vaddr = remap_pci_mem(c->paddr, 128);
690 if (!c->vaddr)
691 return -1;
692 c->board_id = board_id;
694 for(i=0; i<NR_PRODUCTS; i++) {
695 if (board_id == products[i].board_id) {
696 c->product_name = products[i].product_name;
697 c->access = *(products[i].access);
698 break;
699 }
700 }
701 if (i == NR_PRODUCTS) {
702 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
703 " to access the SMART Array controller %08lx\n",
704 (unsigned long)board_id);
705 return -1;
706 }
708 return 0;
709 }
711 /*
712 * Map (physical) PCI mem into (virtual) kernel space
713 */
714 static void __iomem *remap_pci_mem(ulong base, ulong size)
715 {
716 ulong page_base = ((ulong) base) & PAGE_MASK;
717 ulong page_offs = ((ulong) base) - page_base;
718 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
720 return (page_remapped ? (page_remapped + page_offs) : NULL);
721 }
723 #ifndef MODULE
724 /*
725 * Config string is a comma separated set of i/o addresses of EISA cards.
726 */
727 static int cpqarray_setup(char *str)
728 {
729 int i, ints[9];
731 (void)get_options(str, ARRAY_SIZE(ints), ints);
733 for(i=0; i<ints[0] && i<8; i++)
734 eisa[i] = ints[i+1];
735 return 1;
736 }
738 __setup("smart2=", cpqarray_setup);
740 #endif
742 /*
743 * Find an EISA controller's signature. Set up an hba if we find it.
744 */
745 static int __init cpqarray_eisa_detect(void)
746 {
747 int i=0, j;
748 __u32 board_id;
749 int intr;
750 int ctlr;
751 int num_ctlr = 0;
753 while(i<8 && eisa[i]) {
754 ctlr = alloc_cpqarray_hba();
755 if(ctlr == -1)
756 break;
757 board_id = inl(eisa[i]+0xC80);
758 for(j=0; j < NR_PRODUCTS; j++)
759 if (board_id == products[j].board_id)
760 break;
762 if (j == NR_PRODUCTS) {
763 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
764 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
765 continue;
766 }
768 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
769 hba[ctlr]->io_mem_addr = eisa[i];
770 hba[ctlr]->io_mem_length = 0x7FF;
771 if(!request_region(hba[ctlr]->io_mem_addr,
772 hba[ctlr]->io_mem_length,
773 "cpqarray"))
774 {
775 printk(KERN_WARNING "cpqarray: I/O range already in "
776 "use addr = %lx length = %ld\n",
777 hba[ctlr]->io_mem_addr,
778 hba[ctlr]->io_mem_length);
779 free_hba(ctlr);
780 continue;
781 }
783 /*
784 * Read the config register to find our interrupt
785 */
786 intr = inb(eisa[i]+0xCC0) >> 4;
787 if (intr & 1) intr = 11;
788 else if (intr & 2) intr = 10;
789 else if (intr & 4) intr = 14;
790 else if (intr & 8) intr = 15;
792 hba[ctlr]->intr = intr;
793 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
794 hba[ctlr]->product_name = products[j].product_name;
795 hba[ctlr]->access = *(products[j].access);
796 hba[ctlr]->ctlr = ctlr;
797 hba[ctlr]->board_id = board_id;
798 hba[ctlr]->pci_dev = NULL; /* not PCI */
800 DBGINFO(
801 printk("i = %d, j = %d\n", i, j);
802 printk("irq = %x\n", intr);
803 printk("product name = %s\n", products[j].product_name);
804 printk("board_id = %x\n", board_id);
805 );
807 num_ctlr++;
808 i++;
810 if (cpqarray_register_ctlr(ctlr, NULL) == -1)
811 printk(KERN_WARNING
812 "cpqarray: Can't register EISA controller %d\n",
813 ctlr);
815 }
817 return num_ctlr;
818 }
820 /*
821 * Open. Make sure the device is really there.
822 */
823 static int ida_open(struct inode *inode, struct file *filep)
824 {
825 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
826 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
828 DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name));
829 /*
830 * Root is allowed to open raw volume zero even if it's not configured
831 * so array config can still work. I don't think I really like this,
832 * but I'm already using way to many device nodes to claim another one
833 * for "raw controller".
834 */
835 if (!drv->nr_blks) {
836 if (!capable(CAP_SYS_RAWIO))
837 return -ENXIO;
838 if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
839 return -ENXIO;
840 }
841 host->usage_count++;
842 return 0;
843 }
845 /*
846 * Close. Sync first.
847 */
848 static int ida_release(struct inode *inode, struct file *filep)
849 {
850 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
851 host->usage_count--;
852 return 0;
853 }
855 /*
856 * Enqueuing and dequeuing functions for cmdlists.
857 */
858 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
859 {
860 if (*Qptr == NULL) {
861 *Qptr = c;
862 c->next = c->prev = c;
863 } else {
864 c->prev = (*Qptr)->prev;
865 c->next = (*Qptr);
866 (*Qptr)->prev->next = c;
867 (*Qptr)->prev = c;
868 }
869 }
871 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
872 {
873 if (c && c->next != c) {
874 if (*Qptr == c) *Qptr = c->next;
875 c->prev->next = c->next;
876 c->next->prev = c->prev;
877 } else {
878 *Qptr = NULL;
879 }
880 return c;
881 }
883 /*
884 * Get a request and submit it to the controller.
885 * This routine needs to grab all the requests it possibly can from the
886 * req Q and submit them. Interrupts are off (and need to be off) when you
887 * are in here (either via the dummy do_ida_request functions or by being
888 * called from the interrupt handler
889 */
890 static void do_ida_request(request_queue_t *q)
891 {
892 ctlr_info_t *h = q->queuedata;
893 cmdlist_t *c;
894 struct request *creq;
895 struct scatterlist tmp_sg[SG_MAX];
896 int i, dir, seg;
898 if (blk_queue_plugged(q))
899 goto startio;
901 queue_next:
902 creq = elv_next_request(q);
903 if (!creq)
904 goto startio;
906 BUG_ON(creq->nr_phys_segments > SG_MAX);
908 if ((c = cmd_alloc(h,1)) == NULL)
909 goto startio;
911 blkdev_dequeue_request(creq);
913 c->ctlr = h->ctlr;
914 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
915 c->hdr.size = sizeof(rblk_t) >> 2;
916 c->size += sizeof(rblk_t);
918 c->req.hdr.blk = creq->sector;
919 c->rq = creq;
920 DBGPX(
921 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
922 );
923 seg = blk_rq_map_sg(q, creq, tmp_sg);
925 /* Now do all the DMA Mappings */
926 if (rq_data_dir(creq) == READ)
927 dir = PCI_DMA_FROMDEVICE;
928 else
929 dir = PCI_DMA_TODEVICE;
930 for( i=0; i < seg; i++)
931 {
932 c->req.sg[i].size = tmp_sg[i].length;
933 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
934 tmp_sg[i].page,
935 tmp_sg[i].offset,
936 tmp_sg[i].length, dir);
937 }
938 DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
939 c->req.hdr.sg_cnt = seg;
940 c->req.hdr.blk_cnt = creq->nr_sectors;
941 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
942 c->type = CMD_RWREQ;
944 /* Put the request on the tail of the request queue */
945 addQ(&h->reqQ, c);
946 h->Qdepth++;
947 if (h->Qdepth > h->maxQsinceinit)
948 h->maxQsinceinit = h->Qdepth;
950 goto queue_next;
952 startio:
953 start_io(h);
954 }
956 /*
957 * start_io submits everything on a controller's request queue
958 * and moves it to the completion queue.
959 *
960 * Interrupts had better be off if you're in here
961 */
962 static void start_io(ctlr_info_t *h)
963 {
964 cmdlist_t *c;
966 while((c = h->reqQ) != NULL) {
967 /* Can't do anything if we're busy */
968 if (h->access.fifo_full(h) == 0)
969 return;
971 /* Get the first entry from the request Q */
972 removeQ(&h->reqQ, c);
973 h->Qdepth--;
975 /* Tell the controller to do our bidding */
976 h->access.submit_command(h, c);
978 /* Get onto the completion Q */
979 addQ(&h->cmpQ, c);
980 }
981 }
983 static inline void complete_buffers(struct bio *bio, int ok)
984 {
985 struct bio *xbh;
986 while(bio) {
987 int nr_sectors = bio_sectors(bio);
989 xbh = bio->bi_next;
990 bio->bi_next = NULL;
992 blk_finished_io(nr_sectors);
993 bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
995 bio = xbh;
996 }
997 }
998 /*
999 * Mark all buffers that cmd was responsible for
1000 */
1001 static inline void complete_command(cmdlist_t *cmd, int timeout)
1003 struct request *rq = cmd->rq;
1004 int ok=1;
1005 int i, ddir;
1007 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1008 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1009 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1010 cmd->ctlr, cmd->hdr.unit);
1011 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1013 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1014 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1015 cmd->ctlr, cmd->hdr.unit);
1016 ok = 0;
1018 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1019 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1020 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1021 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1022 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1023 ok = 0;
1025 if (timeout) ok = 0;
1026 /* unmap the DMA mapping for all the scatter gather elements */
1027 if (cmd->req.hdr.cmd == IDA_READ)
1028 ddir = PCI_DMA_FROMDEVICE;
1029 else
1030 ddir = PCI_DMA_TODEVICE;
1031 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1032 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1033 cmd->req.sg[i].size, ddir);
1035 complete_buffers(rq->bio, ok);
1037 if (blk_fs_request(rq)) {
1038 const int rw = rq_data_dir(rq);
1040 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1043 add_disk_randomness(rq->rq_disk);
1045 DBGPX(printk("Done with %p\n", rq););
1046 end_that_request_last(rq, ok ? 1 : -EIO);
1049 /*
1050 * The controller will interrupt us upon completion of commands.
1051 * Find the command on the completion queue, remove it, tell the OS and
1052 * try to queue up more IO
1053 */
1054 static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
1056 ctlr_info_t *h = dev_id;
1057 cmdlist_t *c;
1058 unsigned long istat;
1059 unsigned long flags;
1060 __u32 a,a1;
1062 istat = h->access.intr_pending(h);
1063 /* Is this interrupt for us? */
1064 if (istat == 0)
1065 return IRQ_NONE;
1067 /*
1068 * If there are completed commands in the completion queue,
1069 * we had better do something about it.
1070 */
1071 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1072 if (istat & FIFO_NOT_EMPTY) {
1073 while((a = h->access.command_completed(h))) {
1074 a1 = a; a &= ~3;
1075 if ((c = h->cmpQ) == NULL)
1077 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1078 continue;
1080 while(c->busaddr != a) {
1081 c = c->next;
1082 if (c == h->cmpQ)
1083 break;
1085 /*
1086 * If we've found the command, take it off the
1087 * completion Q and free it
1088 */
1089 if (c->busaddr == a) {
1090 removeQ(&h->cmpQ, c);
1091 /* Check for invalid command.
1092 * Controller returns command error,
1093 * But rcode = 0.
1094 */
1096 if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1098 c->req.hdr.rcode = RCODE_INVREQ;
1100 if (c->type == CMD_RWREQ) {
1101 complete_command(c, 0);
1102 cmd_free(h, c, 1);
1103 } else if (c->type == CMD_IOCTL_PEND) {
1104 c->type = CMD_IOCTL_DONE;
1106 continue;
1111 /*
1112 * See if we can queue up some more IO
1113 */
1114 do_ida_request(h->queue);
1115 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1116 return IRQ_HANDLED;
1119 /*
1120 * This timer was for timing out requests that haven't happened after
1121 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1122 * reset a flags structure so we don't flood the user with
1123 * "Non-Fatal error" messages.
1124 */
1125 static void ida_timer(unsigned long tdata)
1127 ctlr_info_t *h = (ctlr_info_t*)tdata;
1129 h->timer.expires = jiffies + IDA_TIMER;
1130 add_timer(&h->timer);
1131 h->misc_tflags = 0;
1134 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1136 drv_info_t *drv = get_drv(bdev->bd_disk);
1138 if (drv->cylinders) {
1139 geo->heads = drv->heads;
1140 geo->sectors = drv->sectors;
1141 geo->cylinders = drv->cylinders;
1142 } else {
1143 geo->heads = 0xff;
1144 geo->sectors = 0x3f;
1145 geo->cylinders = drv->nr_blks / (0xff*0x3f);
1148 return 0;
1151 /*
1152 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1153 * setting readahead and submitting commands from userspace to the controller.
1154 */
1155 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
1157 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
1158 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
1159 int error;
1160 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1161 ida_ioctl_t *my_io;
1163 switch(cmd) {
1164 case IDAGETDRVINFO:
1165 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1166 return -EFAULT;
1167 return 0;
1168 case IDAPASSTHRU:
1169 if (!capable(CAP_SYS_RAWIO))
1170 return -EPERM;
1171 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1172 if (!my_io)
1173 return -ENOMEM;
1174 error = -EFAULT;
1175 if (copy_from_user(my_io, io, sizeof(*my_io)))
1176 goto out_passthru;
1177 error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1178 if (error)
1179 goto out_passthru;
1180 error = -EFAULT;
1181 if (copy_to_user(io, my_io, sizeof(*my_io)))
1182 goto out_passthru;
1183 error = 0;
1184 out_passthru:
1185 kfree(my_io);
1186 return error;
1187 case IDAGETCTLRSIG:
1188 if (!arg) return -EINVAL;
1189 put_user(host->ctlr_sig, (int __user *)arg);
1190 return 0;
1191 case IDAREVALIDATEVOLS:
1192 if (iminor(inode) != 0)
1193 return -ENXIO;
1194 return revalidate_allvol(host);
1195 case IDADRIVERVERSION:
1196 if (!arg) return -EINVAL;
1197 put_user(DRIVER_VERSION, (unsigned long __user *)arg);
1198 return 0;
1199 case IDAGETPCIINFO:
1202 ida_pci_info_struct pciinfo;
1204 if (!arg) return -EINVAL;
1205 pciinfo.bus = host->pci_dev->bus->number;
1206 pciinfo.dev_fn = host->pci_dev->devfn;
1207 pciinfo.board_id = host->board_id;
1208 if(copy_to_user((void __user *) arg, &pciinfo,
1209 sizeof( ida_pci_info_struct)))
1210 return -EFAULT;
1211 return(0);
1214 default:
1215 return -EINVAL;
1219 /*
1220 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1221 * The command block (io) has already been copied to kernel space for us,
1222 * however, any elements in the sglist need to be copied to kernel space
1223 * or copied back to userspace.
1225 * Only root may perform a controller passthru command, however I'm not doing
1226 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1227 * putting a 64M buffer in the sglist is probably a *bad* idea.
1228 */
1229 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1231 int ctlr = h->ctlr;
1232 cmdlist_t *c;
1233 void *p = NULL;
1234 unsigned long flags;
1235 int error;
1237 if ((c = cmd_alloc(h, 0)) == NULL)
1238 return -ENOMEM;
1239 c->ctlr = ctlr;
1240 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1241 c->hdr.size = sizeof(rblk_t) >> 2;
1242 c->size += sizeof(rblk_t);
1244 c->req.hdr.cmd = io->cmd;
1245 c->req.hdr.blk = io->blk;
1246 c->req.hdr.blk_cnt = io->blk_cnt;
1247 c->type = CMD_IOCTL_PEND;
1249 /* Pre submit processing */
1250 switch(io->cmd) {
1251 case PASSTHRU_A:
1252 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1253 if (!p)
1255 error = -ENOMEM;
1256 cmd_free(h, c, 0);
1257 return(error);
1259 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1260 kfree(p);
1261 cmd_free(h, c, 0);
1262 return -EFAULT;
1264 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1265 sizeof(ida_ioctl_t),
1266 PCI_DMA_BIDIRECTIONAL);
1267 c->req.sg[0].size = io->sg[0].size;
1268 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1269 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1270 c->req.hdr.sg_cnt = 1;
1271 break;
1272 case IDA_READ:
1273 case READ_FLASH_ROM:
1274 case SENSE_CONTROLLER_PERFORMANCE:
1275 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1276 if (!p)
1278 error = -ENOMEM;
1279 cmd_free(h, c, 0);
1280 return(error);
1283 c->req.sg[0].size = io->sg[0].size;
1284 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1285 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1286 c->req.hdr.sg_cnt = 1;
1287 break;
1288 case IDA_WRITE:
1289 case IDA_WRITE_MEDIA:
1290 case DIAG_PASS_THRU:
1291 case COLLECT_BUFFER:
1292 case WRITE_FLASH_ROM:
1293 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1294 if (!p)
1296 error = -ENOMEM;
1297 cmd_free(h, c, 0);
1298 return(error);
1300 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1301 kfree(p);
1302 cmd_free(h, c, 0);
1303 return -EFAULT;
1305 c->req.sg[0].size = io->sg[0].size;
1306 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1307 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1308 c->req.hdr.sg_cnt = 1;
1309 break;
1310 default:
1311 c->req.sg[0].size = sizeof(io->c);
1312 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1313 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1314 c->req.hdr.sg_cnt = 1;
1317 /* Put the request on the tail of the request queue */
1318 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1319 addQ(&h->reqQ, c);
1320 h->Qdepth++;
1321 start_io(h);
1322 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1324 /* Wait for completion */
1325 while(c->type != CMD_IOCTL_DONE)
1326 schedule();
1328 /* Unmap the DMA */
1329 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1330 PCI_DMA_BIDIRECTIONAL);
1331 /* Post submit processing */
1332 switch(io->cmd) {
1333 case PASSTHRU_A:
1334 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1335 sizeof(ida_ioctl_t),
1336 PCI_DMA_BIDIRECTIONAL);
1337 case IDA_READ:
1338 case DIAG_PASS_THRU:
1339 case SENSE_CONTROLLER_PERFORMANCE:
1340 case READ_FLASH_ROM:
1341 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1342 kfree(p);
1343 return -EFAULT;
1345 /* fall through and free p */
1346 case IDA_WRITE:
1347 case IDA_WRITE_MEDIA:
1348 case COLLECT_BUFFER:
1349 case WRITE_FLASH_ROM:
1350 kfree(p);
1351 break;
1352 default:;
1353 /* Nothing to do */
1356 io->rcode = c->req.hdr.rcode;
1357 cmd_free(h, c, 0);
1358 return(0);
1361 /*
1362 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1363 * scheme to suballocte them to the driver. Operations that are not time
1364 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1365 * as the first argument to get a new command.
1366 */
1367 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1369 cmdlist_t * c;
1370 int i;
1371 dma_addr_t cmd_dhandle;
1373 if (!get_from_pool) {
1374 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1375 sizeof(cmdlist_t), &cmd_dhandle);
1376 if(c==NULL)
1377 return NULL;
1378 } else {
1379 do {
1380 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1381 if (i == NR_CMDS)
1382 return NULL;
1383 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1384 c = h->cmd_pool + i;
1385 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1386 h->nr_allocs++;
1389 memset(c, 0, sizeof(cmdlist_t));
1390 c->busaddr = cmd_dhandle;
1391 return c;
1394 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1396 int i;
1398 if (!got_from_pool) {
1399 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1400 c->busaddr);
1401 } else {
1402 i = c - h->cmd_pool;
1403 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1404 h->nr_frees++;
1408 /***********************************************************************
1409 name: sendcmd
1410 Send a command to an IDA using the memory mapped FIFO interface
1411 and wait for it to complete.
1412 This routine should only be called at init time.
1413 ***********************************************************************/
1414 static int sendcmd(
1415 __u8 cmd,
1416 int ctlr,
1417 void *buff,
1418 size_t size,
1419 unsigned int blk,
1420 unsigned int blkcnt,
1421 unsigned int log_unit )
1423 cmdlist_t *c;
1424 int complete;
1425 unsigned long temp;
1426 unsigned long i;
1427 ctlr_info_t *info_p = hba[ctlr];
1429 c = cmd_alloc(info_p, 1);
1430 if(!c)
1431 return IO_ERROR;
1432 c->ctlr = ctlr;
1433 c->hdr.unit = log_unit;
1434 c->hdr.prio = 0;
1435 c->hdr.size = sizeof(rblk_t) >> 2;
1436 c->size += sizeof(rblk_t);
1438 /* The request information. */
1439 c->req.hdr.next = 0;
1440 c->req.hdr.rcode = 0;
1441 c->req.bp = 0;
1442 c->req.hdr.sg_cnt = 1;
1443 c->req.hdr.reserved = 0;
1445 if (size == 0)
1446 c->req.sg[0].size = 512;
1447 else
1448 c->req.sg[0].size = size;
1450 c->req.hdr.blk = blk;
1451 c->req.hdr.blk_cnt = blkcnt;
1452 c->req.hdr.cmd = (unsigned char) cmd;
1453 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1454 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1455 /*
1456 * Disable interrupt
1457 */
1458 info_p->access.set_intr_mask(info_p, 0);
1459 /* Make sure there is room in the command FIFO */
1460 /* Actually it should be completely empty at this time. */
1461 for (i = 200000; i > 0; i--) {
1462 temp = info_p->access.fifo_full(info_p);
1463 if (temp != 0) {
1464 break;
1466 udelay(10);
1467 DBG(
1468 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1469 " waiting!\n", ctlr);
1470 );
1472 /*
1473 * Send the cmd
1474 */
1475 info_p->access.submit_command(info_p, c);
1476 complete = pollcomplete(ctlr);
1478 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1479 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1480 if (complete != 1) {
1481 if (complete != c->busaddr) {
1482 printk( KERN_WARNING
1483 "cpqarray ida%d: idaSendPciCmd "
1484 "Invalid command list address returned! (%08lx)\n",
1485 ctlr, (unsigned long)complete);
1486 cmd_free(info_p, c, 1);
1487 return (IO_ERROR);
1489 } else {
1490 printk( KERN_WARNING
1491 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1492 "No command list address returned!\n",
1493 ctlr);
1494 cmd_free(info_p, c, 1);
1495 return (IO_ERROR);
1498 if (c->req.hdr.rcode & 0x00FE) {
1499 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1500 printk( KERN_WARNING
1501 "cpqarray ida%d: idaSendPciCmd, error: "
1502 "Controller failed at init time "
1503 "cmd: 0x%x, return code = 0x%x\n",
1504 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1506 cmd_free(info_p, c, 1);
1507 return (IO_ERROR);
1510 cmd_free(info_p, c, 1);
1511 return (IO_OK);
1514 /*
1515 * revalidate_allvol is for online array config utilities. After a
1516 * utility reconfigures the drives in the array, it can use this function
1517 * (through an ioctl) to make the driver zap any previous disk structs for
1518 * that controller and get new ones.
1520 * Right now I'm using the getgeometry() function to do this, but this
1521 * function should probably be finer grained and allow you to revalidate one
1522 * particualar logical volume (instead of all of them on a particular
1523 * controller).
1524 */
1525 static int revalidate_allvol(ctlr_info_t *host)
1527 int ctlr = host->ctlr;
1528 int i;
1529 unsigned long flags;
1531 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1532 if (host->usage_count > 1) {
1533 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1534 printk(KERN_WARNING "cpqarray: Device busy for volume"
1535 " revalidation (usage=%d)\n", host->usage_count);
1536 return -EBUSY;
1538 host->usage_count++;
1539 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1541 /*
1542 * Set the partition and block size structures for all volumes
1543 * on this controller to zero. We will reread all of this data
1544 */
1545 set_capacity(ida_gendisk[ctlr][0], 0);
1546 for (i = 1; i < NWD; i++) {
1547 struct gendisk *disk = ida_gendisk[ctlr][i];
1548 if (disk->flags & GENHD_FL_UP)
1549 del_gendisk(disk);
1551 memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1553 /*
1554 * Tell the array controller not to give us any interrupts while
1555 * we check the new geometry. Then turn interrupts back on when
1556 * we're done.
1557 */
1558 host->access.set_intr_mask(host, 0);
1559 getgeometry(ctlr);
1560 host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1562 for(i=0; i<NWD; i++) {
1563 struct gendisk *disk = ida_gendisk[ctlr][i];
1564 drv_info_t *drv = &host->drv[i];
1565 if (i && !drv->nr_blks)
1566 continue;
1567 blk_queue_hardsect_size(host->queue, drv->blk_size);
1568 set_capacity(disk, drv->nr_blks);
1569 disk->queue = host->queue;
1570 disk->private_data = drv;
1571 if (i)
1572 add_disk(disk);
1575 host->usage_count--;
1576 return 0;
1579 static int ida_revalidate(struct gendisk *disk)
1581 drv_info_t *drv = disk->private_data;
1582 set_capacity(disk, drv->nr_blks);
1583 return 0;
1586 /********************************************************************
1587 name: pollcomplete
1588 Wait polling for a command to complete.
1589 The memory mapped FIFO is polled for the completion.
1590 Used only at init time, interrupts disabled.
1591 ********************************************************************/
1592 static int pollcomplete(int ctlr)
1594 int done;
1595 int i;
1597 /* Wait (up to 2 seconds) for a command to complete */
1599 for (i = 200000; i > 0; i--) {
1600 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1601 if (done == 0) {
1602 udelay(10); /* a short fixed delay */
1603 } else
1604 return (done);
1606 /* Invalid address to tell caller we ran out of time */
1607 return 1;
1609 /*****************************************************************
1610 start_fwbk
1611 Starts controller firmwares background processing.
1612 Currently only the Integrated Raid controller needs this done.
1613 If the PCI mem address registers are written to after this,
1614 data corruption may occur
1615 *****************************************************************/
1616 static void start_fwbk(int ctlr)
1618 id_ctlr_t *id_ctlr_buf;
1619 int ret_code;
1621 if( (hba[ctlr]->board_id != 0x40400E11)
1622 && (hba[ctlr]->board_id != 0x40480E11) )
1624 /* Not a Integrated Raid, so there is nothing for us to do */
1625 return;
1626 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1627 " processing\n");
1628 /* Command does not return anything, but idasend command needs a
1629 buffer */
1630 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1631 if(id_ctlr_buf==NULL)
1633 printk(KERN_WARNING "cpqarray: Out of memory. "
1634 "Unable to start background processing.\n");
1635 return;
1637 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1638 id_ctlr_buf, 0, 0, 0, 0);
1639 if(ret_code != IO_OK)
1640 printk(KERN_WARNING "cpqarray: Unable to start"
1641 " background processing\n");
1643 kfree(id_ctlr_buf);
1645 /*****************************************************************
1646 getgeometry
1647 Get ida logical volume geometry from the controller
1648 This is a large bit of code which once existed in two flavors,
1649 It is used only at init time.
1650 *****************************************************************/
1651 static void getgeometry(int ctlr)
1653 id_log_drv_t *id_ldrive;
1654 id_ctlr_t *id_ctlr_buf;
1655 sense_log_drv_stat_t *id_lstatus_buf;
1656 config_t *sense_config_buf;
1657 unsigned int log_unit, log_index;
1658 int ret_code, size;
1659 drv_info_t *drv;
1660 ctlr_info_t *info_p = hba[ctlr];
1661 int i;
1663 info_p->log_drv_map = 0;
1665 id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1666 if(id_ldrive == NULL)
1668 printk( KERN_ERR "cpqarray: out of memory.\n");
1669 return;
1672 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1673 if(id_ctlr_buf == NULL)
1675 kfree(id_ldrive);
1676 printk( KERN_ERR "cpqarray: out of memory.\n");
1677 return;
1680 id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1681 if(id_lstatus_buf == NULL)
1683 kfree(id_ctlr_buf);
1684 kfree(id_ldrive);
1685 printk( KERN_ERR "cpqarray: out of memory.\n");
1686 return;
1689 sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL);
1690 if(sense_config_buf == NULL)
1692 kfree(id_lstatus_buf);
1693 kfree(id_ctlr_buf);
1694 kfree(id_ldrive);
1695 printk( KERN_ERR "cpqarray: out of memory.\n");
1696 return;
1699 memset(id_ldrive, 0, sizeof(id_log_drv_t));
1700 memset(id_ctlr_buf, 0, sizeof(id_ctlr_t));
1701 memset(id_lstatus_buf, 0, sizeof(sense_log_drv_stat_t));
1702 memset(sense_config_buf, 0, sizeof(config_t));
1704 info_p->phys_drives = 0;
1705 info_p->log_drv_map = 0;
1706 info_p->drv_assign_map = 0;
1707 info_p->drv_spare_map = 0;
1708 info_p->mp_failed_drv_map = 0; /* only initialized here */
1709 /* Get controllers info for this logical drive */
1710 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1711 if (ret_code == IO_ERROR) {
1712 /*
1713 * If can't get controller info, set the logical drive map to 0,
1714 * so the idastubopen will fail on all logical drives
1715 * on the controller.
1716 */
1717 /* Free all the buffers and return */
1718 printk(KERN_ERR "cpqarray: error sending ID controller\n");
1719 kfree(sense_config_buf);
1720 kfree(id_lstatus_buf);
1721 kfree(id_ctlr_buf);
1722 kfree(id_ldrive);
1723 return;
1726 info_p->log_drives = id_ctlr_buf->nr_drvs;
1727 for(i=0;i<4;i++)
1728 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1729 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1731 printk(" (%s)\n", info_p->product_name);
1732 /*
1733 * Initialize logical drive map to zero
1734 */
1735 log_index = 0;
1736 /*
1737 * Get drive geometry for all logical drives
1738 */
1739 if (id_ctlr_buf->nr_drvs > 16)
1740 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
1741 "16 logical drives per controller.\n. "
1742 " Additional drives will not be "
1743 "detected\n", ctlr);
1745 for (log_unit = 0;
1746 (log_index < id_ctlr_buf->nr_drvs)
1747 && (log_unit < NWD);
1748 log_unit++) {
1749 size = sizeof(sense_log_drv_stat_t);
1751 /*
1752 Send "Identify logical drive status" cmd
1753 */
1754 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1755 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1756 if (ret_code == IO_ERROR) {
1757 /*
1758 If can't get logical drive status, set
1759 the logical drive map to 0, so the
1760 idastubopen will fail for all logical drives
1761 on the controller.
1762 */
1763 info_p->log_drv_map = 0;
1764 printk( KERN_WARNING
1765 "cpqarray ida%d: idaGetGeometry - Controller"
1766 " failed to report status of logical drive %d\n"
1767 "Access to this controller has been disabled\n",
1768 ctlr, log_unit);
1769 /* Free all the buffers and return */
1770 kfree(sense_config_buf);
1771 kfree(id_lstatus_buf);
1772 kfree(id_ctlr_buf);
1773 kfree(id_ldrive);
1774 return;
1776 /*
1777 Make sure the logical drive is configured
1778 */
1779 if (id_lstatus_buf->status != LOG_NOT_CONF) {
1780 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1781 sizeof(id_log_drv_t), 0, 0, log_unit);
1782 /*
1783 If error, the bit for this
1784 logical drive won't be set and
1785 idastubopen will return error.
1786 */
1787 if (ret_code != IO_ERROR) {
1788 drv = &info_p->drv[log_unit];
1789 drv->blk_size = id_ldrive->blk_size;
1790 drv->nr_blks = id_ldrive->nr_blks;
1791 drv->cylinders = id_ldrive->drv.cyl;
1792 drv->heads = id_ldrive->drv.heads;
1793 drv->sectors = id_ldrive->drv.sect_per_track;
1794 info_p->log_drv_map |= (1 << log_unit);
1796 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1797 ctlr, log_unit, drv->blk_size, drv->nr_blks);
1798 ret_code = sendcmd(SENSE_CONFIG,
1799 ctlr, sense_config_buf,
1800 sizeof(config_t), 0, 0, log_unit);
1801 if (ret_code == IO_ERROR) {
1802 info_p->log_drv_map = 0;
1803 /* Free all the buffers and return */
1804 printk(KERN_ERR "cpqarray: error sending sense config\n");
1805 kfree(sense_config_buf);
1806 kfree(id_lstatus_buf);
1807 kfree(id_ctlr_buf);
1808 kfree(id_ldrive);
1809 return;
1813 info_p->phys_drives =
1814 sense_config_buf->ctlr_phys_drv;
1815 info_p->drv_assign_map
1816 |= sense_config_buf->drv_asgn_map;
1817 info_p->drv_assign_map
1818 |= sense_config_buf->spare_asgn_map;
1819 info_p->drv_spare_map
1820 |= sense_config_buf->spare_asgn_map;
1821 } /* end of if no error on id_ldrive */
1822 log_index = log_index + 1;
1823 } /* end of if logical drive configured */
1824 } /* end of for log_unit */
1825 kfree(sense_config_buf);
1826 kfree(id_ldrive);
1827 kfree(id_lstatus_buf);
1828 kfree(id_ctlr_buf);
1829 return;
1833 static void __exit cpqarray_exit(void)
1835 int i;
1837 pci_unregister_driver(&cpqarray_pci_driver);
1839 /* Double check that all controller entries have been removed */
1840 for(i=0; i<MAX_CTLR; i++) {
1841 if (hba[i] != NULL) {
1842 printk(KERN_WARNING "cpqarray: Removing EISA "
1843 "controller %d\n", i);
1844 cpqarray_remove_one_eisa(i);
1848 remove_proc_entry("cpqarray", proc_root_driver);
1851 module_init(cpqarray_init)
1852 module_exit(cpqarray_exit)